LCOV - code coverage report
Current view: top level - kernel - workqueue.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 385 1234 31.2 %
Date: 2014-02-18 Functions: 47 112 42.0 %
Branches: 294 1222 24.1 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * kernel/workqueue.c - generic async execution with shared worker pool
       3                 :            :  *
       4                 :            :  * Copyright (C) 2002           Ingo Molnar
       5                 :            :  *
       6                 :            :  *   Derived from the taskqueue/keventd code by:
       7                 :            :  *     David Woodhouse <dwmw2@infradead.org>
       8                 :            :  *     Andrew Morton
       9                 :            :  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
      10                 :            :  *     Theodore Ts'o <tytso@mit.edu>
      11                 :            :  *
      12                 :            :  * Made to use alloc_percpu by Christoph Lameter.
      13                 :            :  *
      14                 :            :  * Copyright (C) 2010           SUSE Linux Products GmbH
      15                 :            :  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
      16                 :            :  *
      17                 :            :  * This is the generic async execution mechanism.  Work items as are
      18                 :            :  * executed in process context.  The worker pool is shared and
      19                 :            :  * automatically managed.  There are two worker pools for each CPU (one for
      20                 :            :  * normal work items and the other for high priority ones) and some extra
      21                 :            :  * pools for workqueues which are not bound to any specific CPU - the
      22                 :            :  * number of these backing pools is dynamic.
      23                 :            :  *
      24                 :            :  * Please read Documentation/workqueue.txt for details.
      25                 :            :  */
      26                 :            : 
      27                 :            : #include <linux/export.h>
      28                 :            : #include <linux/kernel.h>
      29                 :            : #include <linux/sched.h>
      30                 :            : #include <linux/init.h>
      31                 :            : #include <linux/signal.h>
      32                 :            : #include <linux/completion.h>
      33                 :            : #include <linux/workqueue.h>
      34                 :            : #include <linux/slab.h>
      35                 :            : #include <linux/cpu.h>
      36                 :            : #include <linux/notifier.h>
      37                 :            : #include <linux/kthread.h>
      38                 :            : #include <linux/hardirq.h>
      39                 :            : #include <linux/mempolicy.h>
      40                 :            : #include <linux/freezer.h>
      41                 :            : #include <linux/kallsyms.h>
      42                 :            : #include <linux/debug_locks.h>
      43                 :            : #include <linux/lockdep.h>
      44                 :            : #include <linux/idr.h>
      45                 :            : #include <linux/jhash.h>
      46                 :            : #include <linux/hashtable.h>
      47                 :            : #include <linux/rculist.h>
      48                 :            : #include <linux/nodemask.h>
      49                 :            : #include <linux/moduleparam.h>
      50                 :            : #include <linux/uaccess.h>
      51                 :            : 
      52                 :            : #include "workqueue_internal.h"
      53                 :            : 
      54                 :            : enum {
      55                 :            :         /*
      56                 :            :          * worker_pool flags
      57                 :            :          *
      58                 :            :          * A bound pool is either associated or disassociated with its CPU.
      59                 :            :          * While associated (!DISASSOCIATED), all workers are bound to the
      60                 :            :          * CPU and none has %WORKER_UNBOUND set and concurrency management
      61                 :            :          * is in effect.
      62                 :            :          *
      63                 :            :          * While DISASSOCIATED, the cpu may be offline and all workers have
      64                 :            :          * %WORKER_UNBOUND set and concurrency management disabled, and may
      65                 :            :          * be executing on any CPU.  The pool behaves as an unbound one.
      66                 :            :          *
      67                 :            :          * Note that DISASSOCIATED should be flipped only while holding
      68                 :            :          * manager_mutex to avoid changing binding state while
      69                 :            :          * create_worker() is in progress.
      70                 :            :          */
      71                 :            :         POOL_MANAGE_WORKERS     = 1 << 0, /* need to manage workers */
      72                 :            :         POOL_DISASSOCIATED      = 1 << 2, /* cpu can't serve workers */
      73                 :            :         POOL_FREEZING           = 1 << 3, /* freeze in progress */
      74                 :            : 
      75                 :            :         /* worker flags */
      76                 :            :         WORKER_STARTED          = 1 << 0, /* started */
      77                 :            :         WORKER_DIE              = 1 << 1, /* die die die */
      78                 :            :         WORKER_IDLE             = 1 << 2, /* is idle */
      79                 :            :         WORKER_PREP             = 1 << 3, /* preparing to run works */
      80                 :            :         WORKER_CPU_INTENSIVE    = 1 << 6, /* cpu intensive */
      81                 :            :         WORKER_UNBOUND          = 1 << 7, /* worker is unbound */
      82                 :            :         WORKER_REBOUND          = 1 << 8, /* worker was rebound */
      83                 :            : 
      84                 :            :         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
      85                 :            :                                   WORKER_UNBOUND | WORKER_REBOUND,
      86                 :            : 
      87                 :            :         NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
      88                 :            : 
      89                 :            :         UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
      90                 :            :         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
      91                 :            : 
      92                 :            :         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
      93                 :            :         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
      94                 :            : 
      95                 :            :         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
      96                 :            :                                                 /* call for help after 10ms
      97                 :            :                                                    (min two ticks) */
      98                 :            :         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
      99                 :            :         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
     100                 :            : 
     101                 :            :         /*
     102                 :            :          * Rescue workers are used only on emergencies and shared by
     103                 :            :          * all cpus.  Give -20.
     104                 :            :          */
     105                 :            :         RESCUER_NICE_LEVEL      = -20,
     106                 :            :         HIGHPRI_NICE_LEVEL      = -20,
     107                 :            : 
     108                 :            :         WQ_NAME_LEN             = 24,
     109                 :            : };
     110                 :            : 
     111                 :            : /*
     112                 :            :  * Structure fields follow one of the following exclusion rules.
     113                 :            :  *
     114                 :            :  * I: Modifiable by initialization/destruction paths and read-only for
     115                 :            :  *    everyone else.
     116                 :            :  *
     117                 :            :  * P: Preemption protected.  Disabling preemption is enough and should
     118                 :            :  *    only be modified and accessed from the local cpu.
     119                 :            :  *
     120                 :            :  * L: pool->lock protected.  Access with pool->lock held.
     121                 :            :  *
     122                 :            :  * X: During normal operation, modification requires pool->lock and should
     123                 :            :  *    be done only from local cpu.  Either disabling preemption on local
     124                 :            :  *    cpu or grabbing pool->lock is enough for read access.  If
     125                 :            :  *    POOL_DISASSOCIATED is set, it's identical to L.
     126                 :            :  *
     127                 :            :  * MG: pool->manager_mutex and pool->lock protected.  Writes require both
     128                 :            :  *     locks.  Reads can happen under either lock.
     129                 :            :  *
     130                 :            :  * PL: wq_pool_mutex protected.
     131                 :            :  *
     132                 :            :  * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
     133                 :            :  *
     134                 :            :  * WQ: wq->mutex protected.
     135                 :            :  *
     136                 :            :  * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
     137                 :            :  *
     138                 :            :  * MD: wq_mayday_lock protected.
     139                 :            :  */
     140                 :            : 
     141                 :            : /* struct worker is defined in workqueue_internal.h */
     142                 :            : 
     143                 :            : struct worker_pool {
     144                 :            :         spinlock_t              lock;           /* the pool lock */
     145                 :            :         int                     cpu;            /* I: the associated cpu */
     146                 :            :         int                     node;           /* I: the associated node ID */
     147                 :            :         int                     id;             /* I: pool ID */
     148                 :            :         unsigned int            flags;          /* X: flags */
     149                 :            : 
     150                 :            :         struct list_head        worklist;       /* L: list of pending works */
     151                 :            :         int                     nr_workers;     /* L: total number of workers */
     152                 :            : 
     153                 :            :         /* nr_idle includes the ones off idle_list for rebinding */
     154                 :            :         int                     nr_idle;        /* L: currently idle ones */
     155                 :            : 
     156                 :            :         struct list_head        idle_list;      /* X: list of idle workers */
     157                 :            :         struct timer_list       idle_timer;     /* L: worker idle timeout */
     158                 :            :         struct timer_list       mayday_timer;   /* L: SOS timer for workers */
     159                 :            : 
     160                 :            :         /* a workers is either on busy_hash or idle_list, or the manager */
     161                 :            :         DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
     162                 :            :                                                 /* L: hash of busy workers */
     163                 :            : 
     164                 :            :         /* see manage_workers() for details on the two manager mutexes */
     165                 :            :         struct mutex            manager_arb;    /* manager arbitration */
     166                 :            :         struct mutex            manager_mutex;  /* manager exclusion */
     167                 :            :         struct idr              worker_idr;     /* MG: worker IDs and iteration */
     168                 :            : 
     169                 :            :         struct workqueue_attrs  *attrs;         /* I: worker attributes */
     170                 :            :         struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
     171                 :            :         int                     refcnt;         /* PL: refcnt for unbound pools */
     172                 :            : 
     173                 :            :         /*
     174                 :            :          * The current concurrency level.  As it's likely to be accessed
     175                 :            :          * from other CPUs during try_to_wake_up(), put it in a separate
     176                 :            :          * cacheline.
     177                 :            :          */
     178                 :            :         atomic_t                nr_running ____cacheline_aligned_in_smp;
     179                 :            : 
     180                 :            :         /*
     181                 :            :          * Destruction of pool is sched-RCU protected to allow dereferences
     182                 :            :          * from get_work_pool().
     183                 :            :          */
     184                 :            :         struct rcu_head         rcu;
     185                 :            : } ____cacheline_aligned_in_smp;
     186                 :            : 
     187                 :            : /*
     188                 :            :  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
     189                 :            :  * of work_struct->data are used for flags and the remaining high bits
     190                 :            :  * point to the pwq; thus, pwqs need to be aligned at two's power of the
     191                 :            :  * number of flag bits.
     192                 :            :  */
     193                 :            : struct pool_workqueue {
     194                 :            :         struct worker_pool      *pool;          /* I: the associated pool */
     195                 :            :         struct workqueue_struct *wq;            /* I: the owning workqueue */
     196                 :            :         int                     work_color;     /* L: current color */
     197                 :            :         int                     flush_color;    /* L: flushing color */
     198                 :            :         int                     refcnt;         /* L: reference count */
     199                 :            :         int                     nr_in_flight[WORK_NR_COLORS];
     200                 :            :                                                 /* L: nr of in_flight works */
     201                 :            :         int                     nr_active;      /* L: nr of active works */
     202                 :            :         int                     max_active;     /* L: max active works */
     203                 :            :         struct list_head        delayed_works;  /* L: delayed works */
     204                 :            :         struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
     205                 :            :         struct list_head        mayday_node;    /* MD: node on wq->maydays */
     206                 :            : 
     207                 :            :         /*
     208                 :            :          * Release of unbound pwq is punted to system_wq.  See put_pwq()
     209                 :            :          * and pwq_unbound_release_workfn() for details.  pool_workqueue
     210                 :            :          * itself is also sched-RCU protected so that the first pwq can be
     211                 :            :          * determined without grabbing wq->mutex.
     212                 :            :          */
     213                 :            :         struct work_struct      unbound_release_work;
     214                 :            :         struct rcu_head         rcu;
     215                 :            : } __aligned(1 << WORK_STRUCT_FLAG_BITS);
     216                 :            : 
     217                 :            : /*
     218                 :            :  * Structure used to wait for workqueue flush.
     219                 :            :  */
     220                 :            : struct wq_flusher {
     221                 :            :         struct list_head        list;           /* WQ: list of flushers */
     222                 :            :         int                     flush_color;    /* WQ: flush color waiting for */
     223                 :            :         struct completion       done;           /* flush completion */
     224                 :            : };
     225                 :            : 
     226                 :            : struct wq_device;
     227                 :            : 
     228                 :            : /*
     229                 :            :  * The externally visible workqueue.  It relays the issued work items to
     230                 :            :  * the appropriate worker_pool through its pool_workqueues.
     231                 :            :  */
     232                 :            : struct workqueue_struct {
     233                 :            :         struct list_head        pwqs;           /* WR: all pwqs of this wq */
     234                 :            :         struct list_head        list;           /* PL: list of all workqueues */
     235                 :            : 
     236                 :            :         struct mutex            mutex;          /* protects this wq */
     237                 :            :         int                     work_color;     /* WQ: current work color */
     238                 :            :         int                     flush_color;    /* WQ: current flush color */
     239                 :            :         atomic_t                nr_pwqs_to_flush; /* flush in progress */
     240                 :            :         struct wq_flusher       *first_flusher; /* WQ: first flusher */
     241                 :            :         struct list_head        flusher_queue;  /* WQ: flush waiters */
     242                 :            :         struct list_head        flusher_overflow; /* WQ: flush overflow list */
     243                 :            : 
     244                 :            :         struct list_head        maydays;        /* MD: pwqs requesting rescue */
     245                 :            :         struct worker           *rescuer;       /* I: rescue worker */
     246                 :            : 
     247                 :            :         int                     nr_drainers;    /* WQ: drain in progress */
     248                 :            :         int                     saved_max_active; /* WQ: saved pwq max_active */
     249                 :            : 
     250                 :            :         struct workqueue_attrs  *unbound_attrs; /* WQ: only for unbound wqs */
     251                 :            :         struct pool_workqueue   *dfl_pwq;       /* WQ: only for unbound wqs */
     252                 :            : 
     253                 :            : #ifdef CONFIG_SYSFS
     254                 :            :         struct wq_device        *wq_dev;        /* I: for sysfs interface */
     255                 :            : #endif
     256                 :            : #ifdef CONFIG_LOCKDEP
     257                 :            :         struct lockdep_map      lockdep_map;
     258                 :            : #endif
     259                 :            :         char                    name[WQ_NAME_LEN]; /* I: workqueue name */
     260                 :            : 
     261                 :            :         /* hot fields used during command issue, aligned to cacheline */
     262                 :            :         unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
     263                 :            :         struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
     264                 :            :         struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
     265                 :            : };
     266                 :            : 
     267                 :            : static struct kmem_cache *pwq_cache;
     268                 :            : 
     269                 :            : static int wq_numa_tbl_len;             /* highest possible NUMA node id + 1 */
     270                 :            : static cpumask_var_t *wq_numa_possible_cpumask;
     271                 :            :                                         /* possible CPUs of each node */
     272                 :            : 
     273                 :            : static bool wq_disable_numa;
     274                 :            : module_param_named(disable_numa, wq_disable_numa, bool, 0444);
     275                 :            : 
     276                 :            : /* see the comment above the definition of WQ_POWER_EFFICIENT */
     277                 :            : #ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
     278                 :            : static bool wq_power_efficient = true;
     279                 :            : #else
     280                 :            : static bool wq_power_efficient;
     281                 :            : #endif
     282                 :            : 
     283                 :            : module_param_named(power_efficient, wq_power_efficient, bool, 0444);
     284                 :            : 
     285                 :            : static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
     286                 :            : 
     287                 :            : /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
     288                 :            : static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
     289                 :            : 
     290                 :            : static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
     291                 :            : static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
     292                 :            : 
     293                 :            : static LIST_HEAD(workqueues);           /* PL: list of all workqueues */
     294                 :            : static bool workqueue_freezing;         /* PL: have wqs started freezing? */
     295                 :            : 
     296                 :            : /* the per-cpu worker pools */
     297                 :            : static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
     298                 :            :                                      cpu_worker_pools);
     299                 :            : 
     300                 :            : static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
     301                 :            : 
     302                 :            : /* PL: hash of all unbound pools keyed by pool->attrs */
     303                 :            : static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
     304                 :            : 
     305                 :            : /* I: attributes used when instantiating standard unbound pools on demand */
     306                 :            : static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
     307                 :            : 
     308                 :            : /* I: attributes used when instantiating ordered pools on demand */
     309                 :            : static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
     310                 :            : 
     311                 :            : struct workqueue_struct *system_wq __read_mostly;
     312                 :            : EXPORT_SYMBOL(system_wq);
     313                 :            : struct workqueue_struct *system_highpri_wq __read_mostly;
     314                 :            : EXPORT_SYMBOL_GPL(system_highpri_wq);
     315                 :            : struct workqueue_struct *system_long_wq __read_mostly;
     316                 :            : EXPORT_SYMBOL_GPL(system_long_wq);
     317                 :            : struct workqueue_struct *system_unbound_wq __read_mostly;
     318                 :            : EXPORT_SYMBOL_GPL(system_unbound_wq);
     319                 :            : struct workqueue_struct *system_freezable_wq __read_mostly;
     320                 :            : EXPORT_SYMBOL_GPL(system_freezable_wq);
     321                 :            : struct workqueue_struct *system_power_efficient_wq __read_mostly;
     322                 :            : EXPORT_SYMBOL_GPL(system_power_efficient_wq);
     323                 :            : struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
     324                 :            : EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
     325                 :            : 
     326                 :            : static int worker_thread(void *__worker);
     327                 :            : static void copy_workqueue_attrs(struct workqueue_attrs *to,
     328                 :            :                                  const struct workqueue_attrs *from);
     329                 :            : 
     330                 :            : #define CREATE_TRACE_POINTS
     331                 :            : #include <trace/events/workqueue.h>
     332                 :            : 
     333                 :            : #define assert_rcu_or_pool_mutex()                                      \
     334                 :            :         rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
     335                 :            :                            lockdep_is_held(&wq_pool_mutex),         \
     336                 :            :                            "sched RCU or wq_pool_mutex should be held")
     337                 :            : 
     338                 :            : #define assert_rcu_or_wq_mutex(wq)                                      \
     339                 :            :         rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
     340                 :            :                            lockdep_is_held(&wq->mutex),                  \
     341                 :            :                            "sched RCU or wq->mutex should be held")
     342                 :            : 
     343                 :            : #ifdef CONFIG_LOCKDEP
     344                 :            : #define assert_manager_or_pool_lock(pool)                               \
     345                 :            :         WARN_ONCE(debug_locks &&                                        \
     346                 :            :                   !lockdep_is_held(&(pool)->manager_mutex) &&            \
     347                 :            :                   !lockdep_is_held(&(pool)->lock),                       \
     348                 :            :                   "pool->manager_mutex or ->lock should be held")
     349                 :            : #else
     350                 :            : #define assert_manager_or_pool_lock(pool)       do { } while (0)
     351                 :            : #endif
     352                 :            : 
     353                 :            : #define for_each_cpu_worker_pool(pool, cpu)                             \
     354                 :            :         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];           \
     355                 :            :              (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
     356                 :            :              (pool)++)
     357                 :            : 
     358                 :            : /**
     359                 :            :  * for_each_pool - iterate through all worker_pools in the system
     360                 :            :  * @pool: iteration cursor
     361                 :            :  * @pi: integer used for iteration
     362                 :            :  *
     363                 :            :  * This must be called either with wq_pool_mutex held or sched RCU read
     364                 :            :  * locked.  If the pool needs to be used beyond the locking in effect, the
     365                 :            :  * caller is responsible for guaranteeing that the pool stays online.
     366                 :            :  *
     367                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     368                 :            :  * ignored.
     369                 :            :  */
     370                 :            : #define for_each_pool(pool, pi)                                         \
     371                 :            :         idr_for_each_entry(&worker_pool_idr, pool, pi)                      \
     372                 :            :                 if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
     373                 :            :                 else
     374                 :            : 
     375                 :            : /**
     376                 :            :  * for_each_pool_worker - iterate through all workers of a worker_pool
     377                 :            :  * @worker: iteration cursor
     378                 :            :  * @wi: integer used for iteration
     379                 :            :  * @pool: worker_pool to iterate workers of
     380                 :            :  *
     381                 :            :  * This must be called with either @pool->manager_mutex or ->lock held.
     382                 :            :  *
     383                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     384                 :            :  * ignored.
     385                 :            :  */
     386                 :            : #define for_each_pool_worker(worker, wi, pool)                          \
     387                 :            :         idr_for_each_entry(&(pool)->worker_idr, (worker), (wi))          \
     388                 :            :                 if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
     389                 :            :                 else
     390                 :            : 
     391                 :            : /**
     392                 :            :  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
     393                 :            :  * @pwq: iteration cursor
     394                 :            :  * @wq: the target workqueue
     395                 :            :  *
     396                 :            :  * This must be called either with wq->mutex held or sched RCU read locked.
     397                 :            :  * If the pwq needs to be used beyond the locking in effect, the caller is
     398                 :            :  * responsible for guaranteeing that the pwq stays online.
     399                 :            :  *
     400                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     401                 :            :  * ignored.
     402                 :            :  */
     403                 :            : #define for_each_pwq(pwq, wq)                                           \
     404                 :            :         list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)           \
     405                 :            :                 if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
     406                 :            :                 else
     407                 :            : 
     408                 :            : #ifdef CONFIG_DEBUG_OBJECTS_WORK
     409                 :            : 
     410                 :            : static struct debug_obj_descr work_debug_descr;
     411                 :            : 
     412                 :            : static void *work_debug_hint(void *addr)
     413                 :            : {
     414                 :            :         return ((struct work_struct *) addr)->func;
     415                 :            : }
     416                 :            : 
     417                 :            : /*
     418                 :            :  * fixup_init is called when:
     419                 :            :  * - an active object is initialized
     420                 :            :  */
     421                 :            : static int work_fixup_init(void *addr, enum debug_obj_state state)
     422                 :            : {
     423                 :            :         struct work_struct *work = addr;
     424                 :            : 
     425                 :            :         switch (state) {
     426                 :            :         case ODEBUG_STATE_ACTIVE:
     427                 :            :                 cancel_work_sync(work);
     428                 :            :                 debug_object_init(work, &work_debug_descr);
     429                 :            :                 return 1;
     430                 :            :         default:
     431                 :            :                 return 0;
     432                 :            :         }
     433                 :            : }
     434                 :            : 
     435                 :            : /*
     436                 :            :  * fixup_activate is called when:
     437                 :            :  * - an active object is activated
     438                 :            :  * - an unknown object is activated (might be a statically initialized object)
     439                 :            :  */
     440                 :            : static int work_fixup_activate(void *addr, enum debug_obj_state state)
     441                 :            : {
     442                 :            :         struct work_struct *work = addr;
     443                 :            : 
     444                 :            :         switch (state) {
     445                 :            : 
     446                 :            :         case ODEBUG_STATE_NOTAVAILABLE:
     447                 :            :                 /*
     448                 :            :                  * This is not really a fixup. The work struct was
     449                 :            :                  * statically initialized. We just make sure that it
     450                 :            :                  * is tracked in the object tracker.
     451                 :            :                  */
     452                 :            :                 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
     453                 :            :                         debug_object_init(work, &work_debug_descr);
     454                 :            :                         debug_object_activate(work, &work_debug_descr);
     455                 :            :                         return 0;
     456                 :            :                 }
     457                 :            :                 WARN_ON_ONCE(1);
     458                 :            :                 return 0;
     459                 :            : 
     460                 :            :         case ODEBUG_STATE_ACTIVE:
     461                 :            :                 WARN_ON(1);
     462                 :            : 
     463                 :            :         default:
     464                 :            :                 return 0;
     465                 :            :         }
     466                 :            : }
     467                 :            : 
     468                 :            : /*
     469                 :            :  * fixup_free is called when:
     470                 :            :  * - an active object is freed
     471                 :            :  */
     472                 :            : static int work_fixup_free(void *addr, enum debug_obj_state state)
     473                 :            : {
     474                 :            :         struct work_struct *work = addr;
     475                 :            : 
     476                 :            :         switch (state) {
     477                 :            :         case ODEBUG_STATE_ACTIVE:
     478                 :            :                 cancel_work_sync(work);
     479                 :            :                 debug_object_free(work, &work_debug_descr);
     480                 :            :                 return 1;
     481                 :            :         default:
     482                 :            :                 return 0;
     483                 :            :         }
     484                 :            : }
     485                 :            : 
     486                 :            : static struct debug_obj_descr work_debug_descr = {
     487                 :            :         .name           = "work_struct",
     488                 :            :         .debug_hint     = work_debug_hint,
     489                 :            :         .fixup_init     = work_fixup_init,
     490                 :            :         .fixup_activate = work_fixup_activate,
     491                 :            :         .fixup_free     = work_fixup_free,
     492                 :            : };
     493                 :            : 
     494                 :            : static inline void debug_work_activate(struct work_struct *work)
     495                 :            : {
     496                 :            :         debug_object_activate(work, &work_debug_descr);
     497                 :            : }
     498                 :            : 
     499                 :            : static inline void debug_work_deactivate(struct work_struct *work)
     500                 :            : {
     501                 :            :         debug_object_deactivate(work, &work_debug_descr);
     502                 :            : }
     503                 :            : 
     504                 :            : void __init_work(struct work_struct *work, int onstack)
     505                 :            : {
     506                 :            :         if (onstack)
     507                 :            :                 debug_object_init_on_stack(work, &work_debug_descr);
     508                 :            :         else
     509                 :            :                 debug_object_init(work, &work_debug_descr);
     510                 :            : }
     511                 :            : EXPORT_SYMBOL_GPL(__init_work);
     512                 :            : 
     513                 :            : void destroy_work_on_stack(struct work_struct *work)
     514                 :            : {
     515                 :            :         debug_object_free(work, &work_debug_descr);
     516                 :            : }
     517                 :            : EXPORT_SYMBOL_GPL(destroy_work_on_stack);
     518                 :            : 
     519                 :            : #else
     520                 :            : static inline void debug_work_activate(struct work_struct *work) { }
     521                 :            : static inline void debug_work_deactivate(struct work_struct *work) { }
     522                 :            : #endif
     523                 :            : 
     524                 :            : /**
     525                 :            :  * worker_pool_assign_id - allocate ID and assing it to @pool
     526                 :            :  * @pool: the pool pointer of interest
     527                 :            :  *
     528                 :            :  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
     529                 :            :  * successfully, -errno on failure.
     530                 :            :  */
     531                 :          0 : static int worker_pool_assign_id(struct worker_pool *pool)
     532                 :            : {
     533                 :            :         int ret;
     534                 :            : 
     535                 :            :         lockdep_assert_held(&wq_pool_mutex);
     536                 :            : 
     537                 :          0 :         ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
     538                 :            :                         GFP_KERNEL);
     539         [ #  # ]:          0 :         if (ret >= 0) {
     540                 :          0 :                 pool->id = ret;
     541                 :          0 :                 return 0;
     542                 :            :         }
     543                 :            :         return ret;
     544                 :            : }
     545                 :            : 
     546                 :            : /**
     547                 :            :  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
     548                 :            :  * @wq: the target workqueue
     549                 :            :  * @node: the node ID
     550                 :            :  *
     551                 :            :  * This must be called either with pwq_lock held or sched RCU read locked.
     552                 :            :  * If the pwq needs to be used beyond the locking in effect, the caller is
     553                 :            :  * responsible for guaranteeing that the pwq stays online.
     554                 :            :  *
     555                 :            :  * Return: The unbound pool_workqueue for @node.
     556                 :            :  */
     557                 :            : static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
     558                 :            :                                                   int node)
     559                 :            : {
     560                 :            :         assert_rcu_or_wq_mutex(wq);
     561                 :      95397 :         return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
     562                 :            : }
     563                 :            : 
     564                 :            : static unsigned int work_color_to_flags(int color)
     565                 :            : {
     566                 :     966932 :         return color << WORK_STRUCT_COLOR_SHIFT;
     567                 :            : }
     568                 :            : 
     569                 :            : static int get_work_color(struct work_struct *work)
     570                 :            : {
     571                 :     967037 :         return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
     572                 :            :                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
     573                 :            : }
     574                 :            : 
     575                 :            : static int work_next_color(int color)
     576                 :            : {
     577                 :      11832 :         return (color + 1) % WORK_NR_COLORS;
     578                 :            : }
     579                 :            : 
     580                 :            : /*
     581                 :            :  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
     582                 :            :  * contain the pointer to the queued pwq.  Once execution starts, the flag
     583                 :            :  * is cleared and the high bits contain OFFQ flags and pool ID.
     584                 :            :  *
     585                 :            :  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
     586                 :            :  * and clear_work_data() can be used to set the pwq, pool or clear
     587                 :            :  * work->data.  These functions should only be called while the work is
     588                 :            :  * owned - ie. while the PENDING bit is set.
     589                 :            :  *
     590                 :            :  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
     591                 :            :  * corresponding to a work.  Pool is available once the work has been
     592                 :            :  * queued anywhere after initialization until it is sync canceled.  pwq is
     593                 :            :  * available only while the work item is queued.
     594                 :            :  *
     595                 :            :  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
     596                 :            :  * canceled.  While being canceled, a work item may have its PENDING set
     597                 :            :  * but stay off timer and worklist for arbitrarily long and nobody should
     598                 :            :  * try to steal the PENDING bit.
     599                 :            :  */
     600                 :            : static inline void set_work_data(struct work_struct *work, unsigned long data,
     601                 :            :                                  unsigned long flags)
     602                 :            : {
     603 [ -  + ][ #  # ]:    1935599 :         WARN_ON_ONCE(!work_pending(work));
         [ #  # ][ -  + ]
         [ #  # ][ #  # ]
         [ -  + ][ #  # ]
         [ #  # ][ -  + ]
         [ #  # ][ #  # ]
         [ -  + ][ #  # ]
                 [ #  # ]
     604                 :    1934750 :         atomic_long_set(&work->data, data | flags | work_static(work));
     605                 :            : }
     606                 :            : 
     607                 :          0 : static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
     608                 :            :                          unsigned long extra_flags)
     609                 :            : {
     610                 :     966967 :         set_work_data(work, (unsigned long)pwq,
     611                 :            :                       WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
     612                 :     966967 : }
     613                 :            : 
     614                 :          0 : static void set_work_pool_and_keep_pending(struct work_struct *work,
     615                 :            :                                            int pool_id)
     616                 :            : {
     617                 :       3603 :         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
     618                 :            :                       WORK_STRUCT_PENDING);
     619                 :       3603 : }
     620                 :            : 
     621                 :          0 : static void set_work_pool_and_clear_pending(struct work_struct *work,
     622                 :            :                                             int pool_id)
     623                 :            : {
     624                 :            :         /*
     625                 :            :          * The following wmb is paired with the implied mb in
     626                 :            :          * test_and_set_bit(PENDING) and ensures all updates to @work made
     627                 :            :          * here are visible to and precede any updates by the next PENDING
     628                 :            :          * owner.
     629                 :            :          */
     630                 :     963425 :         smp_wmb();
     631                 :     963331 :         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
     632                 :     963331 : }
     633                 :            : 
     634                 :          0 : static void clear_work_data(struct work_struct *work)
     635                 :            : {
     636                 :        849 :         smp_wmb();      /* see set_work_pool_and_clear_pending() */
     637                 :            :         set_work_data(work, WORK_STRUCT_NO_POOL, 0);
     638                 :        849 : }
     639                 :            : 
     640                 :            : static struct pool_workqueue *get_work_pwq(struct work_struct *work)
     641                 :            : {
     642                 :     973253 :         unsigned long data = atomic_long_read(&work->data);
     643                 :            : 
     644 [ +  + ][ #  # ]:     973253 :         if (data & WORK_STRUCT_PWQ)
                 [ +  + ]
           [ #  #  +  + ]
         [ +  - ][ #  # ]
     645                 :     972864 :                 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
     646                 :            :         else
     647                 :            :                 return NULL;
     648                 :            : }
     649                 :            : 
     650                 :            : /**
     651                 :            :  * get_work_pool - return the worker_pool a given work was associated with
     652                 :            :  * @work: the work item of interest
     653                 :            :  *
     654                 :            :  * Pools are created and destroyed under wq_pool_mutex, and allows read
     655                 :            :  * access under sched-RCU read lock.  As such, this function should be
     656                 :            :  * called under wq_pool_mutex or with preemption disabled.
     657                 :            :  *
     658                 :            :  * All fields of the returned pool are accessible as long as the above
     659                 :            :  * mentioned locking is in effect.  If the returned pool needs to be used
     660                 :            :  * beyond the critical section, the caller is responsible for ensuring the
     661                 :            :  * returned pool is and stays online.
     662                 :            :  *
     663                 :            :  * Return: The worker_pool @work was last associated with.  %NULL if none.
     664                 :            :  */
     665                 :          0 : static struct worker_pool *get_work_pool(struct work_struct *work)
     666                 :            : {
     667                 :     972534 :         unsigned long data = atomic_long_read(&work->data);
     668                 :            :         int pool_id;
     669                 :            : 
     670                 :            :         assert_rcu_or_pool_mutex();
     671                 :            : 
     672         [ +  + ]:     972534 :         if (data & WORK_STRUCT_PWQ)
     673                 :       3633 :                 return ((struct pool_workqueue *)
     674                 :       3633 :                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
     675                 :            : 
     676                 :     968901 :         pool_id = data >> WORK_OFFQ_POOL_SHIFT;
     677         [ +  + ]:     968901 :         if (pool_id == WORK_OFFQ_POOL_NONE)
     678                 :            :                 return NULL;
     679                 :            : 
     680                 :     959542 :         return idr_find(&worker_pool_idr, pool_id);
     681                 :            : }
     682                 :            : 
     683                 :            : /**
     684                 :            :  * get_work_pool_id - return the worker pool ID a given work is associated with
     685                 :            :  * @work: the work item of interest
     686                 :            :  *
     687                 :            :  * Return: The worker_pool ID @work was last associated with.
     688                 :            :  * %WORK_OFFQ_POOL_NONE if none.
     689                 :            :  */
     690                 :            : static int get_work_pool_id(struct work_struct *work)
     691                 :            : {
     692                 :        849 :         unsigned long data = atomic_long_read(&work->data);
     693                 :            : 
     694 [ #  # ][ -  + ]:        849 :         if (data & WORK_STRUCT_PWQ)
     695                 :          0 :                 return ((struct pool_workqueue *)
     696                 :          0 :                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
     697                 :            : 
     698                 :        849 :         return data >> WORK_OFFQ_POOL_SHIFT;
     699                 :            : }
     700                 :            : 
     701                 :          0 : static void mark_work_canceling(struct work_struct *work)
     702                 :            : {
     703                 :        849 :         unsigned long pool_id = get_work_pool_id(work);
     704                 :            : 
     705                 :        849 :         pool_id <<= WORK_OFFQ_POOL_SHIFT;
     706                 :            :         set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
     707                 :        849 : }
     708                 :            : 
     709                 :            : static bool work_is_canceling(struct work_struct *work)
     710                 :            : {
     711                 :         59 :         unsigned long data = atomic_long_read(&work->data);
     712                 :            : 
     713                 :         59 :         return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
     714                 :            : }
     715                 :            : 
     716                 :            : /*
     717                 :            :  * Policy functions.  These define the policies on how the global worker
     718                 :            :  * pools are managed.  Unless noted otherwise, these functions assume that
     719                 :            :  * they're being called with pool->lock held.
     720                 :            :  */
     721                 :            : 
     722                 :            : static bool __need_more_worker(struct worker_pool *pool)
     723                 :            : {
     724                 :    1702244 :         return !atomic_read(&pool->nr_running);
     725                 :            : }
     726                 :            : 
     727                 :            : /*
     728                 :            :  * Need to wake up a worker?  Called from anything but currently
     729                 :            :  * running workers.
     730                 :            :  *
     731                 :            :  * Note that, because unbound workers never contribute to nr_running, this
     732                 :            :  * function will always return %true for unbound pools as long as the
     733                 :            :  * worklist isn't empty.
     734                 :            :  */
     735                 :            : static bool need_more_worker(struct worker_pool *pool)
     736                 :            : {
     737 [ +  + ][ +  + ]:    1564380 :         return !list_empty(&pool->worklist) && __need_more_worker(pool);
         [ +  + ][ -  + ]
         [ +  + ][ -  + ]
         [ +  + ][ +  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ +  + ][ +  - ]
     738                 :            : }
     739                 :            : 
     740                 :            : /* Can I start working?  Called from busy but !running workers. */
     741                 :            : static bool may_start_working(struct worker_pool *pool)
     742                 :            : {
     743                 :            :         return pool->nr_idle;
     744                 :            : }
     745                 :            : 
     746                 :            : /* Do I need to keep working?  Called from currently running workers. */
     747                 :            : static bool keep_working(struct worker_pool *pool)
     748                 :            : {
     749 [ #  # ][ #  # ]:     964713 :         return !list_empty(&pool->worklist) &&
         [ +  + ][ +  + ]
     750                 :     230253 :                 atomic_read(&pool->nr_running) <= 1;
     751                 :            : }
     752                 :            : 
     753                 :            : /* Do we need a new worker?  Called from manager. */
     754                 :        603 : static bool need_to_create_worker(struct worker_pool *pool)
     755                 :            : {
     756 [ +  + ][ -  + ]:     736647 :         return need_more_worker(pool) && !may_start_working(pool);
         [ +  + ][ +  - ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ -  + ][ #  # ]
     757                 :            : }
     758                 :            : 
     759                 :            : /* Do I need to be the manager? */
     760                 :          0 : static bool need_to_manage_workers(struct worker_pool *pool)
     761                 :            : {
     762 [ #  # ][ +  + ]:     735610 :         return need_to_create_worker(pool) ||
     763                 :     735587 :                 (pool->flags & POOL_MANAGE_WORKERS);
     764                 :            : }
     765                 :            : 
     766                 :            : /* Do we have too many workers and should some go away? */
     767                 :            : static bool too_many_workers(struct worker_pool *pool)
     768                 :            : {
     769                 :            :         bool managing = mutex_is_locked(&pool->manager_arb);
     770                 :     737003 :         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
     771                 :     737003 :         int nr_busy = pool->nr_workers - nr_idle;
     772                 :            : 
     773                 :            :         /*
     774                 :            :          * nr_idle and idle_list may disagree if idle rebinding is in
     775                 :            :          * progress.  Never return %true if idle_list is empty.
     776                 :            :          */
     777   [ +  +  +  - ]:     737695 :         if (list_empty(&pool->idle_list))
                 [ +  + ]
     778                 :            :                 return false;
     779                 :            : 
     780 [ +  + ][ -  + ]:     736627 :         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
         [ +  + ][ -  + ]
         [ +  + ][ +  + ]
     781                 :            : }
     782                 :            : 
     783                 :            : /*
     784                 :            :  * Wake up functions.
     785                 :            :  */
     786                 :            : 
     787                 :            : /* Return the first worker.  Safe with preemption disabled */
     788                 :            : static struct worker *first_worker(struct worker_pool *pool)
     789                 :            : {
     790 [ #  # ][ +  + ]:     969080 :         if (unlikely(list_empty(&pool->idle_list)))
                 [ +  + ]
     791                 :            :                 return NULL;
     792                 :            : 
     793                 :            :         return list_first_entry(&pool->idle_list, struct worker, entry);
     794                 :            : }
     795                 :            : 
     796                 :            : /**
     797                 :            :  * wake_up_worker - wake up an idle worker
     798                 :            :  * @pool: worker pool to wake worker from
     799                 :            :  *
     800                 :            :  * Wake up the first idle worker of @pool.
     801                 :            :  *
     802                 :            :  * CONTEXT:
     803                 :            :  * spin_lock_irq(pool->lock).
     804                 :            :  */
     805                 :          0 : static void wake_up_worker(struct worker_pool *pool)
     806                 :            : {
     807                 :            :         struct worker *worker = first_worker(pool);
     808                 :            : 
     809         [ +  + ]:     967224 :         if (likely(worker))
     810                 :     967182 :                 wake_up_process(worker->task);
     811                 :        140 : }
     812                 :            : 
     813                 :            : /**
     814                 :            :  * wq_worker_waking_up - a worker is waking up
     815                 :            :  * @task: task waking up
     816                 :            :  * @cpu: CPU @task is waking up to
     817                 :            :  *
     818                 :            :  * This function is called during try_to_wake_up() when a worker is
     819                 :            :  * being awoken.
     820                 :            :  *
     821                 :            :  * CONTEXT:
     822                 :            :  * spin_lock_irq(rq->lock)
     823                 :            :  */
     824                 :          0 : void wq_worker_waking_up(struct task_struct *task, int cpu)
     825                 :            : {
     826                 :     790403 :         struct worker *worker = kthread_data(task);
     827                 :            : 
     828         [ +  + ]:     789879 :         if (!(worker->flags & WORKER_NOT_RUNNING)) {
     829 [ -  + ][ #  # ]:      50300 :                 WARN_ON_ONCE(worker->pool->cpu != cpu);
                 [ #  # ]
     830                 :      50300 :                 atomic_inc(&worker->pool->nr_running);
     831                 :            :         }
     832                 :     789879 : }
     833                 :            : 
     834                 :            : /**
     835                 :            :  * wq_worker_sleeping - a worker is going to sleep
     836                 :            :  * @task: task going to sleep
     837                 :            :  * @cpu: CPU in question, must be the current CPU number
     838                 :            :  *
     839                 :            :  * This function is called during schedule() when a busy worker is
     840                 :            :  * going to sleep.  Worker on the same cpu can be woken up by
     841                 :            :  * returning pointer to its task.
     842                 :            :  *
     843                 :            :  * CONTEXT:
     844                 :            :  * spin_lock_irq(rq->lock)
     845                 :            :  *
     846                 :            :  * Return:
     847                 :            :  * Worker task on @cpu to wake up, %NULL if none.
     848                 :            :  */
     849                 :          0 : struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
     850                 :            : {
     851                 :     790404 :         struct worker *worker = kthread_data(task), *to_wakeup = NULL;
     852                 :            :         struct worker_pool *pool;
     853                 :            : 
     854                 :            :         /*
     855                 :            :          * Rescuers, which may not have all the fields set up like normal
     856                 :            :          * workers, also reach here, let's not access anything before
     857                 :            :          * checking NOT_RUNNING.
     858                 :            :          */
     859         [ +  + ]:     790404 :         if (worker->flags & WORKER_NOT_RUNNING)
     860                 :            :                 return NULL;
     861                 :            : 
     862                 :      50297 :         pool = worker->pool;
     863                 :            : 
     864                 :            :         /* this can only happen on the local cpu */
     865 [ -  + ][ #  # ]:      50297 :         if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
         [ -  + ][ +  - ]
     866                 :            :                 return NULL;
     867                 :            : 
     868                 :            :         /*
     869                 :            :          * The counterpart of the following dec_and_test, implied mb,
     870                 :            :          * worklist not empty test sequence is in insert_work().
     871                 :            :          * Please read comment there.
     872                 :            :          *
     873                 :            :          * NOT_RUNNING is clear.  This means that we're bound to and
     874                 :            :          * running on the local cpu w/ rq lock held and preemption
     875                 :            :          * disabled, which in turn means that none else could be
     876                 :            :          * manipulating idle_list, so dereferencing idle_list without pool
     877                 :            :          * lock is safe.
     878                 :            :          */
     879 [ +  + ][ +  + ]:     840704 :         if (atomic_dec_and_test(&pool->nr_running) &&
     880                 :      50296 :             !list_empty(&pool->worklist))
     881                 :            :                 to_wakeup = first_worker(pool);
     882         [ +  + ]:      50300 :         return to_wakeup ? to_wakeup->task : NULL;
     883                 :            : }
     884                 :            : 
     885                 :            : /**
     886                 :            :  * worker_set_flags - set worker flags and adjust nr_running accordingly
     887                 :            :  * @worker: self
     888                 :            :  * @flags: flags to set
     889                 :            :  * @wakeup: wakeup an idle worker if necessary
     890                 :            :  *
     891                 :            :  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
     892                 :            :  * nr_running becomes zero and @wakeup is %true, an idle worker is
     893                 :            :  * woken up.
     894                 :            :  *
     895                 :            :  * CONTEXT:
     896                 :            :  * spin_lock_irq(pool->lock)
     897                 :            :  */
     898                 :            : static inline void worker_set_flags(struct worker *worker, unsigned int flags,
     899                 :            :                                     bool wakeup)
     900                 :            : {
     901                 :     734514 :         struct worker_pool *pool = worker->pool;
     902                 :            : 
     903 [ -  + ][ #  # ]:     734514 :         WARN_ON_ONCE(worker->task != current);
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
     904                 :            : 
     905                 :            :         /*
     906                 :            :          * If transitioning into NOT_RUNNING, adjust nr_running and
     907                 :            :          * wake up an idle worker as necessary if requested by
     908                 :            :          * @wakeup.
     909                 :            :          */
     910 [ +  + ][ #  # ]:     734526 :         if ((flags & WORKER_NOT_RUNNING) &&
     911                 :     734526 :             !(worker->flags & WORKER_NOT_RUNNING)) {
     912                 :            :                 if (wakeup) {
     913 [ #  # ][ #  # ]:          0 :                         if (atomic_dec_and_test(&pool->nr_running) &&
     914                 :          0 :                             !list_empty(&pool->worklist))
     915                 :          0 :                                 wake_up_worker(pool);
     916                 :            :                 } else
     917                 :     641454 :                         atomic_dec(&pool->nr_running);
     918                 :            :         }
     919                 :            : 
     920                 :     734511 :         worker->flags |= flags;
     921                 :            : }
     922                 :            : 
     923                 :            : /**
     924                 :            :  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
     925                 :            :  * @worker: self
     926                 :            :  * @flags: flags to clear
     927                 :            :  *
     928                 :            :  * Clear @flags in @worker->flags and adjust nr_running accordingly.
     929                 :            :  *
     930                 :            :  * CONTEXT:
     931                 :            :  * spin_lock_irq(pool->lock)
     932                 :            :  */
     933                 :            : static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
     934                 :            : {
     935                 :     734526 :         struct worker_pool *pool = worker->pool;
     936                 :     734526 :         unsigned int oflags = worker->flags;
     937                 :            : 
     938 [ -  + ][ #  # ]:    1469788 :         WARN_ON_ONCE(worker->task != current);
         [ #  # ][ #  # ]
            [ #  # ][ - ]
         [ -  + ][ #  # ]
                 [ -  + ]
     939                 :            : 
     940                 :    1469787 :         worker->flags &= ~flags;
     941                 :            : 
     942                 :            :         /*
     943                 :            :          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
     944                 :            :          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
     945                 :            :          * of multiple flags, not a single flag.
     946                 :            :          */
     947         [ +  - ]:     734524 :         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
     948 [ +  + ][ #  # ]:     734527 :                 if (!(worker->flags & WORKER_NOT_RUNNING))
     949                 :     641427 :                         atomic_inc(&pool->nr_running);
     950                 :            : }
     951                 :            : 
     952                 :            : /**
     953                 :            :  * find_worker_executing_work - find worker which is executing a work
     954                 :            :  * @pool: pool of interest
     955                 :            :  * @work: work to find worker for
     956                 :            :  *
     957                 :            :  * Find a worker which is executing @work on @pool by searching
     958                 :            :  * @pool->busy_hash which is keyed by the address of @work.  For a worker
     959                 :            :  * to match, its current execution should match the address of @work and
     960                 :            :  * its work function.  This is to avoid unwanted dependency between
     961                 :            :  * unrelated work executions through a work item being recycled while still
     962                 :            :  * being executed.
     963                 :            :  *
     964                 :            :  * This is a bit tricky.  A work item may be freed once its execution
     965                 :            :  * starts and nothing prevents the freed area from being recycled for
     966                 :            :  * another work item.  If the same work item address ends up being reused
     967                 :            :  * before the original execution finishes, workqueue will identify the
     968                 :            :  * recycled work item as currently executing and make it wait until the
     969                 :            :  * current execution finishes, introducing an unwanted dependency.
     970                 :            :  *
     971                 :            :  * This function checks the work item address and work function to avoid
     972                 :            :  * false positives.  Note that this isn't complete as one may construct a
     973                 :            :  * work function which can introduce dependency onto itself through a
     974                 :            :  * recycled work item.  Well, if somebody wants to shoot oneself in the
     975                 :            :  * foot that badly, there's only so much we can do, and if such deadlock
     976                 :            :  * actually occurs, it should be easy to locate the culprit work function.
     977                 :            :  *
     978                 :            :  * CONTEXT:
     979                 :            :  * spin_lock_irq(pool->lock).
     980                 :            :  *
     981                 :            :  * Return:
     982                 :            :  * Pointer to worker which is executing @work if found, %NULL
     983                 :            :  * otherwise.
     984                 :            :  */
     985                 :          0 : static struct worker *find_worker_executing_work(struct worker_pool *pool,
     986                 :            :                                                  struct work_struct *work)
     987                 :            : {
     988                 :            :         struct worker *worker;
     989                 :            : 
     990 [ +  + ][ #  # ]:    1963898 :         hash_for_each_possible(pool->busy_hash, worker, hentry,
                 [ +  + ]
     991                 :            :                                (unsigned long)work)
     992    [ +  - ][ + ]:       2249 :                 if (worker->current_work == work &&
     993                 :       2249 :                     worker->current_func == work->func)
     994                 :            :                         return worker;
     995                 :            : 
     996                 :            :         return NULL;
     997                 :            : }
     998                 :            : 
     999                 :            : /**
    1000                 :            :  * move_linked_works - move linked works to a list
    1001                 :            :  * @work: start of series of works to be scheduled
    1002                 :            :  * @head: target list to append @work to
    1003                 :            :  * @nextp: out paramter for nested worklist walking
    1004                 :            :  *
    1005                 :            :  * Schedule linked works starting from @work to @head.  Work series to
    1006                 :            :  * be scheduled starts at @work and includes any consecutive work with
    1007                 :            :  * WORK_STRUCT_LINKED set in its predecessor.
    1008                 :            :  *
    1009                 :            :  * If @nextp is not NULL, it's updated to point to the next work of
    1010                 :            :  * the last scheduled work.  This allows move_linked_works() to be
    1011                 :            :  * nested inside outer list_for_each_entry_safe().
    1012                 :            :  *
    1013                 :            :  * CONTEXT:
    1014                 :            :  * spin_lock_irq(pool->lock).
    1015                 :            :  */
    1016                 :          0 : static void move_linked_works(struct work_struct *work, struct list_head *head,
    1017                 :            :                               struct work_struct **nextp)
    1018                 :            : {
    1019                 :            :         struct work_struct *n;
    1020                 :            : 
    1021                 :            :         /*
    1022                 :            :          * Linked worklist will always end before the end of the list,
    1023                 :            :          * use NULL for list head.
    1024                 :            :          */
    1025         [ +  - ]:       2279 :         list_for_each_entry_safe_from(work, n, NULL, entry) {
    1026                 :            :                 list_move_tail(&work->entry, head);
    1027         [ +  + ]:       2279 :                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
    1028                 :            :                         break;
    1029                 :            :         }
    1030                 :            : 
    1031                 :            :         /*
    1032                 :            :          * If we're already inside safe list traversal and have moved
    1033                 :            :          * multiple works to the scheduled queue, the next position
    1034                 :            :          * needs to be updated.
    1035                 :            :          */
    1036         [ -  - ]:       2248 :         if (nextp)
    1037                 :          0 :                 *nextp = n;
    1038                 :          0 : }
    1039                 :            : 
    1040                 :            : /**
    1041                 :            :  * get_pwq - get an extra reference on the specified pool_workqueue
    1042                 :            :  * @pwq: pool_workqueue to get
    1043                 :            :  *
    1044                 :            :  * Obtain an extra reference on @pwq.  The caller should guarantee that
    1045                 :            :  * @pwq has positive refcnt and be holding the matching pool->lock.
    1046                 :            :  */
    1047                 :          0 : static void get_pwq(struct pool_workqueue *pwq)
    1048                 :            : {
    1049                 :            :         lockdep_assert_held(&pwq->pool->lock);
    1050 [ -  + ][ #  # ]:     966729 :         WARN_ON_ONCE(pwq->refcnt <= 0);
                 [ #  # ]
    1051                 :     966729 :         pwq->refcnt++;
    1052                 :     966729 : }
    1053                 :            : 
    1054                 :            : /**
    1055                 :            :  * put_pwq - put a pool_workqueue reference
    1056                 :            :  * @pwq: pool_workqueue to put
    1057                 :            :  *
    1058                 :            :  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
    1059                 :            :  * destruction.  The caller should be holding the matching pool->lock.
    1060                 :            :  */
    1061                 :          0 : static void put_pwq(struct pool_workqueue *pwq)
    1062                 :            : {
    1063                 :            :         lockdep_assert_held(&pwq->pool->lock);
    1064         [ -  + ]:     967047 :         if (likely(--pwq->refcnt))
    1065                 :            :                 return;
    1066 [ #  # ][ #  # ]:          0 :         if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
         [ #  # ][ #  # ]
    1067                 :            :                 return;
    1068                 :            :         /*
    1069                 :            :          * @pwq can't be released under pool->lock, bounce to
    1070                 :            :          * pwq_unbound_release_workfn().  This never recurses on the same
    1071                 :            :          * pool->lock as this path is taken only for unbound workqueues and
    1072                 :            :          * the release work item is scheduled on a per-cpu workqueue.  To
    1073                 :            :          * avoid lockdep warning, unbound pool->locks are given lockdep
    1074                 :            :          * subclass of 1 in get_unbound_pool().
    1075                 :            :          */
    1076                 :          0 :         schedule_work(&pwq->unbound_release_work);
    1077                 :            : }
    1078                 :            : 
    1079                 :            : /**
    1080                 :            :  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
    1081                 :            :  * @pwq: pool_workqueue to put (can be %NULL)
    1082                 :            :  *
    1083                 :            :  * put_pwq() with locking.  This function also allows %NULL @pwq.
    1084                 :            :  */
    1085                 :          0 : static void put_pwq_unlocked(struct pool_workqueue *pwq)
    1086                 :            : {
    1087         [ #  # ]:          0 :         if (pwq) {
    1088                 :            :                 /*
    1089                 :            :                  * As both pwqs and pools are sched-RCU protected, the
    1090                 :            :                  * following lock operations are safe.
    1091                 :            :                  */
    1092                 :          0 :                 spin_lock_irq(&pwq->pool->lock);
    1093                 :          0 :                 put_pwq(pwq);
    1094                 :          0 :                 spin_unlock_irq(&pwq->pool->lock);
    1095                 :            :         }
    1096                 :          0 : }
    1097                 :            : 
    1098                 :          0 : static void pwq_activate_delayed_work(struct work_struct *work)
    1099                 :            : {
    1100                 :            :         struct pool_workqueue *pwq = get_work_pwq(work);
    1101                 :            : 
    1102                 :            :         trace_workqueue_activate_work(work);
    1103                 :          0 :         move_linked_works(work, &pwq->pool->worklist, NULL);
    1104                 :            :         __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
    1105                 :          0 :         pwq->nr_active++;
    1106                 :          0 : }
    1107                 :            : 
    1108                 :            : static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
    1109                 :            : {
    1110                 :          0 :         struct work_struct *work = list_first_entry(&pwq->delayed_works,
    1111                 :            :                                                     struct work_struct, entry);
    1112                 :            : 
    1113                 :          0 :         pwq_activate_delayed_work(work);
    1114                 :            : }
    1115                 :            : 
    1116                 :            : /**
    1117                 :            :  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
    1118                 :            :  * @pwq: pwq of interest
    1119                 :            :  * @color: color of work which left the queue
    1120                 :            :  *
    1121                 :            :  * A work either has completed or is removed from pending queue,
    1122                 :            :  * decrement nr_in_flight of its pwq and handle workqueue flushing.
    1123                 :            :  *
    1124                 :            :  * CONTEXT:
    1125                 :            :  * spin_lock_irq(pool->lock).
    1126                 :            :  */
    1127                 :          0 : static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
    1128                 :            : {
    1129                 :            :         /* uncolored work items don't participate in flushing or nr_active */
    1130         [ +  + ]:     967048 :         if (color == WORK_NO_COLOR)
    1131                 :            :                 goto out_put;
    1132                 :            : 
    1133                 :     967014 :         pwq->nr_in_flight[color]--;
    1134                 :            : 
    1135                 :     967014 :         pwq->nr_active--;
    1136         [ -  + ]:     967014 :         if (!list_empty(&pwq->delayed_works)) {
    1137                 :            :                 /* one down, submit a delayed one */
    1138         [ #  # ]:          0 :                 if (pwq->nr_active < pwq->max_active)
    1139                 :            :                         pwq_activate_first_delayed(pwq);
    1140                 :            :         }
    1141                 :            : 
    1142                 :            :         /* is flush in progress and are we at the flushing tip? */
    1143         [ -  + ]:     967010 :         if (likely(pwq->flush_color != color))
    1144                 :            :                 goto out_put;
    1145                 :            : 
    1146                 :            :         /* are there still in-flight works? */
    1147         [ #  # ]:          0 :         if (pwq->nr_in_flight[color])
    1148                 :            :                 goto out_put;
    1149                 :            : 
    1150                 :            :         /* this pwq is done, clear flush_color */
    1151                 :          0 :         pwq->flush_color = -1;
    1152                 :            : 
    1153                 :            :         /*
    1154                 :            :          * If this was the last pwq, wake up the first flusher.  It
    1155                 :            :          * will handle the rest.
    1156                 :            :          */
    1157         [ #  # ]:          0 :         if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
    1158                 :          0 :                 complete(&pwq->wq->first_flusher->done);
    1159                 :            : out_put:
    1160                 :     967044 :         put_pwq(pwq);
    1161                 :     967040 : }
    1162                 :            : 
    1163                 :            : /**
    1164                 :            :  * try_to_grab_pending - steal work item from worklist and disable irq
    1165                 :            :  * @work: work item to steal
    1166                 :            :  * @is_dwork: @work is a delayed_work
    1167                 :            :  * @flags: place to store irq state
    1168                 :            :  *
    1169                 :            :  * Try to grab PENDING bit of @work.  This function can handle @work in any
    1170                 :            :  * stable state - idle, on timer or on worklist.
    1171                 :            :  *
    1172                 :            :  * Return:
    1173                 :            :  *  1           if @work was pending and we successfully stole PENDING
    1174                 :            :  *  0           if @work was idle and we claimed PENDING
    1175                 :            :  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
    1176                 :            :  *  -ENOENT     if someone else is canceling @work, this state may persist
    1177                 :            :  *              for arbitrarily long
    1178                 :            :  *
    1179                 :            :  * Note:
    1180                 :            :  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
    1181                 :            :  * interrupted while holding PENDING and @work off queue, irq must be
    1182                 :            :  * disabled on entry.  This, combined with delayed_work->timer being
    1183                 :            :  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
    1184                 :            :  *
    1185                 :            :  * On successful return, >= 0, irq is disabled and the caller is
    1186                 :            :  * responsible for releasing it using local_irq_restore(*@flags).
    1187                 :            :  *
    1188                 :            :  * This function is safe to call from any context including IRQ handler.
    1189                 :            :  */
    1190                 :          0 : static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
    1191                 :            :                                unsigned long *flags)
    1192                 :            : {
    1193                 :            :         struct worker_pool *pool;
    1194                 :            :         struct pool_workqueue *pwq;
    1195                 :            : 
    1196                 :     170210 :         local_irq_save(*flags);
    1197                 :            : 
    1198                 :            :         /* try to steal the timer if it exists */
    1199         [ +  + ]:     170210 :         if (is_dwork) {
    1200                 :            :                 struct delayed_work *dwork = to_delayed_work(work);
    1201                 :            : 
    1202                 :            :                 /*
    1203                 :            :                  * dwork->timer is irqsafe.  If del_timer() fails, it's
    1204                 :            :                  * guaranteed that the timer is not queued anywhere and not
    1205                 :            :                  * running on the local CPU.
    1206                 :            :                  */
    1207         [ +  + ]:     169638 :                 if (likely(del_timer(&dwork->timer)))
    1208                 :            :                         return 1;
    1209                 :            :         }
    1210                 :            : 
    1211                 :            :         /* try to claim PENDING the normal way */
    1212         [ +  + ]:     168124 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
    1213                 :            :                 return 0;
    1214                 :            : 
    1215                 :            :         /*
    1216                 :            :          * The queueing is in progress, or it is already queued. Try to
    1217                 :            :          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
    1218                 :            :          */
    1219                 :       3662 :         pool = get_work_pool(work);
    1220         [ +  - ]:       3662 :         if (!pool)
    1221                 :            :                 goto fail;
    1222                 :            : 
    1223                 :            :         spin_lock(&pool->lock);
    1224                 :            :         /*
    1225                 :            :          * work->data is guaranteed to point to pwq only while the work
    1226                 :            :          * item is queued on pwq->wq, and both updating work->data to point
    1227                 :            :          * to pwq on queueing and to pool on dequeueing are done under
    1228                 :            :          * pwq->pool->lock.  This in turn guarantees that, if work->data
    1229                 :            :          * points to pwq which is associated with a locked pool, the work
    1230                 :            :          * item is currently queued on that pool.
    1231                 :            :          */
    1232                 :            :         pwq = get_work_pwq(work);
    1233 [ +  + ][ +  - ]:       3662 :         if (pwq && pwq->pool == pool) {
    1234                 :            :                 debug_work_deactivate(work);
    1235                 :            : 
    1236                 :            :                 /*
    1237                 :            :                  * A delayed work item cannot be grabbed directly because
    1238                 :            :                  * it might have linked NO_COLOR work items which, if left
    1239                 :            :                  * on the delayed_list, will confuse pwq->nr_active
    1240                 :            :                  * management later on and cause stall.  Make sure the work
    1241                 :            :                  * item is activated before grabbing.
    1242                 :            :                  */
    1243         [ -  + ]:       3603 :                 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
    1244                 :          0 :                         pwq_activate_delayed_work(work);
    1245                 :            : 
    1246                 :       3603 :                 list_del_init(&work->entry);
    1247                 :       3603 :                 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
    1248                 :            : 
    1249                 :            :                 /* work->data points to pwq iff queued, point to pool */
    1250                 :       3603 :                 set_work_pool_and_keep_pending(work, pool->id);
    1251                 :            : 
    1252                 :            :                 spin_unlock(&pool->lock);
    1253                 :       3603 :                 return 1;
    1254                 :            :         }
    1255                 :            :         spin_unlock(&pool->lock);
    1256                 :            : fail:
    1257         [ +  + ]:         59 :         local_irq_restore(*flags);
    1258         [ +  - ]:         59 :         if (work_is_canceling(work))
    1259                 :            :                 return -ENOENT;
    1260                 :         59 :         cpu_relax();
    1261                 :         59 :         return -EAGAIN;
    1262                 :            : }
    1263                 :            : 
    1264                 :            : /**
    1265                 :            :  * insert_work - insert a work into a pool
    1266                 :            :  * @pwq: pwq @work belongs to
    1267                 :            :  * @work: work to insert
    1268                 :            :  * @head: insertion point
    1269                 :            :  * @extra_flags: extra WORK_STRUCT_* flags to set
    1270                 :            :  *
    1271                 :            :  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
    1272                 :            :  * work_struct flags.
    1273                 :            :  *
    1274                 :            :  * CONTEXT:
    1275                 :            :  * spin_lock_irq(pool->lock).
    1276                 :            :  */
    1277                 :          0 : static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
    1278                 :            :                         struct list_head *head, unsigned int extra_flags)
    1279                 :            : {
    1280                 :     966665 :         struct worker_pool *pool = pwq->pool;
    1281                 :            : 
    1282                 :            :         /* we own @work, set data and link */
    1283                 :     966665 :         set_work_pwq(work, pwq, extra_flags);
    1284                 :     966818 :         list_add_tail(&work->entry, head);
    1285                 :     966818 :         get_pwq(pwq);
    1286                 :            : 
    1287                 :            :         /*
    1288                 :            :          * Ensure either wq_worker_sleeping() sees the above
    1289                 :            :          * list_add_tail() or we see zero nr_running to avoid workers lying
    1290                 :            :          * around lazily while there are works to be processed.
    1291                 :            :          */
    1292                 :     966800 :         smp_mb();
    1293                 :            : 
    1294         [ +  + ]:     966632 :         if (__need_more_worker(pool))
    1295                 :     966619 :                 wake_up_worker(pool);
    1296                 :        287 : }
    1297                 :            : 
    1298                 :            : /*
    1299                 :            :  * Test whether @work is being queued from another work executing on the
    1300                 :            :  * same workqueue.
    1301                 :            :  */
    1302                 :          0 : static bool is_chained_work(struct workqueue_struct *wq)
    1303                 :            : {
    1304                 :            :         struct worker *worker;
    1305                 :            : 
    1306                 :            :         worker = current_wq_worker();
    1307                 :            :         /*
    1308                 :            :          * Return %true iff I'm a worker execuing a work item on @wq.  If
    1309                 :            :          * I'm @worker, it's safe to dereference it without locking.
    1310                 :            :          */
    1311 [ #  # ][ #  # ]:          0 :         return worker && worker->current_pwq->wq == wq;
    1312                 :            : }
    1313                 :            : 
    1314                 :          0 : static void __queue_work(int cpu, struct workqueue_struct *wq,
    1315                 :            :                          struct work_struct *work)
    1316                 :            : {
    1317                 :            :         struct pool_workqueue *pwq;
    1318                 :            :         struct worker_pool *last_pool;
    1319                 :            :         struct list_head *worklist;
    1320                 :            :         unsigned int work_flags;
    1321                 :     966980 :         unsigned int req_cpu = cpu;
    1322                 :            : 
    1323                 :            :         /*
    1324                 :            :          * While a work item is PENDING && off queue, a task trying to
    1325                 :            :          * steal the PENDING will busy-loop waiting for it to either get
    1326                 :            :          * queued or lose PENDING.  Grabbing PENDING and queueing should
    1327                 :            :          * happen with IRQ disabled.
    1328                 :            :          */
    1329 [ -  + ][ #  # ]:     966945 :         WARN_ON_ONCE(!irqs_disabled());
                 [ -  + ]
    1330                 :            : 
    1331                 :            :         debug_work_activate(work);
    1332                 :            : 
    1333                 :            :         /* if draining, only works from the same workqueue are allowed */
    1334 [ +  - ][ #  # ]:     966966 :         if (unlikely(wq->flags & __WQ_DRAINING) &&
    1335 [ #  # ][ #  # ]:          0 :             WARN_ON_ONCE(!is_chained_work(wq)))
                 [ #  # ]
    1336                 :            :                 return;
    1337                 :            : retry:
    1338         [ +  + ]:     966966 :         if (req_cpu == WORK_CPU_UNBOUND)
    1339                 :     806535 :                 cpu = raw_smp_processor_id();
    1340                 :            : 
    1341                 :            :         /* pwq which will be used unless @work is executing elsewhere */
    1342         [ +  + ]:     966966 :         if (!(wq->flags & WQ_UNBOUND))
    1343                 :     871569 :                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
    1344                 :            :         else
    1345                 :            :                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
    1346                 :            : 
    1347                 :            :         /*
    1348                 :            :          * If @work was previously on a different pool, it might still be
    1349                 :            :          * running there, in which case the work needs to be queued on that
    1350                 :            :          * pool to guarantee non-reentrancy.
    1351                 :            :          */
    1352                 :     966966 :         last_pool = get_work_pool(work);
    1353 [ +  + ][ +  + ]:     966777 :         if (last_pool && last_pool != pwq->pool) {
    1354                 :            :                 struct worker *worker;
    1355                 :            : 
    1356                 :            :                 spin_lock(&last_pool->lock);
    1357                 :            : 
    1358                 :      16038 :                 worker = find_worker_executing_work(last_pool, work);
    1359                 :            : 
    1360    [ +  + ][ + ]:      16038 :                 if (worker && worker->current_pwq->wq == wq) {
    1361                 :            :                         pwq = worker->current_pwq;
    1362                 :            :                 } else {
    1363                 :            :                         /* meh... not running there, queue here */
    1364                 :            :                         spin_unlock(&last_pool->lock);
    1365                 :      16021 :                         spin_lock(&pwq->pool->lock);
    1366                 :            :                 }
    1367                 :            :         } else {
    1368                 :     950739 :                 spin_lock(&pwq->pool->lock);
    1369                 :            :         }
    1370                 :            : 
    1371                 :            :         /*
    1372                 :            :          * pwq is determined and locked.  For unbound pools, we could have
    1373                 :            :          * raced with pwq release and it could already be dead.  If its
    1374                 :            :          * refcnt is zero, repeat pwq selection.  Note that pwqs never die
    1375                 :            :          * without another pwq replacing it in the numa_pwq_tbl or while
    1376                 :            :          * work items are executing on it, so the retrying is guaranteed to
    1377                 :            :          * make forward-progress.
    1378                 :            :          */
    1379         [ -  + ]:    1933912 :         if (unlikely(!pwq->refcnt)) {
    1380         [ #  # ]:          0 :                 if (wq->flags & WQ_UNBOUND) {
    1381                 :          0 :                         spin_unlock(&pwq->pool->lock);
    1382                 :          0 :                         cpu_relax();
    1383                 :          0 :                         goto retry;
    1384                 :            :                 }
    1385                 :            :                 /* oops */
    1386 [ #  # ][ #  # ]:          0 :                 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
    1387                 :            :                           wq->name, cpu);
    1388                 :            :         }
    1389                 :            : 
    1390                 :            :         /* pwq determined, queue */
    1391                 :            :         trace_workqueue_queue_work(req_cpu, pwq, work);
    1392                 :            : 
    1393 [ -  + ][ -  + ]:     966932 :         if (WARN_ON(!list_empty(&work->entry))) {
    1394                 :          0 :                 spin_unlock(&pwq->pool->lock);
    1395                 :            :                 return;
    1396                 :            :         }
    1397                 :            : 
    1398                 :     966932 :         pwq->nr_in_flight[pwq->work_color]++;
    1399                 :            :         work_flags = work_color_to_flags(pwq->work_color);
    1400                 :            : 
    1401         [ +  - ]:     966932 :         if (likely(pwq->nr_active < pwq->max_active)) {
    1402                 :            :                 trace_workqueue_activate_work(work);
    1403                 :     966983 :                 pwq->nr_active++;
    1404                 :     966983 :                 worklist = &pwq->pool->worklist;
    1405                 :            :         } else {
    1406                 :          0 :                 work_flags |= WORK_STRUCT_DELAYED;
    1407                 :          0 :                 worklist = &pwq->delayed_works;
    1408                 :            :         }
    1409                 :            : 
    1410                 :     966983 :         insert_work(pwq, work, worklist, work_flags);
    1411                 :            : 
    1412                 :     966975 :         spin_unlock(&pwq->pool->lock);
    1413                 :            : }
    1414                 :            : 
    1415                 :            : /**
    1416                 :            :  * queue_work_on - queue work on specific cpu
    1417                 :            :  * @cpu: CPU number to execute work on
    1418                 :            :  * @wq: workqueue to use
    1419                 :            :  * @work: work to queue
    1420                 :            :  *
    1421                 :            :  * We queue the work to a specific CPU, the caller must ensure it
    1422                 :            :  * can't go away.
    1423                 :            :  *
    1424                 :            :  * Return: %false if @work was already on a queue, %true otherwise.
    1425                 :            :  */
    1426                 :          0 : bool queue_work_on(int cpu, struct workqueue_struct *wq,
    1427                 :            :                    struct work_struct *work)
    1428                 :            : {
    1429                 :            :         bool ret = false;
    1430                 :            :         unsigned long flags;
    1431                 :            : 
    1432                 :            :         local_irq_save(flags);
    1433                 :            : 
    1434         [ +  + ]:     447575 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1435                 :     337048 :                 __queue_work(cpu, wq, work);
    1436                 :            :                 ret = true;
    1437                 :            :         }
    1438                 :            : 
    1439         [ +  + ]:     447575 :         local_irq_restore(flags);
    1440                 :     447575 :         return ret;
    1441                 :            : }
    1442                 :            : EXPORT_SYMBOL(queue_work_on);
    1443                 :            : 
    1444                 :          0 : void delayed_work_timer_fn(unsigned long __data)
    1445                 :            : {
    1446                 :     622471 :         struct delayed_work *dwork = (struct delayed_work *)__data;
    1447                 :            : 
    1448                 :            :         /* should have been called from irqsafe timer with irq already off */
    1449                 :     622471 :         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
    1450                 :     622541 : }
    1451                 :            : EXPORT_SYMBOL(delayed_work_timer_fn);
    1452                 :            : 
    1453                 :          0 : static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
    1454                 :            :                                 struct delayed_work *dwork, unsigned long delay)
    1455                 :            : {
    1456                 :    1264091 :         struct timer_list *timer = &dwork->timer;
    1457                 :            :         struct work_struct *work = &dwork->work;
    1458                 :            : 
    1459    [ +  + ][ + ]:     632045 :         WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
         [ -  + ][ #  # ]
                 [ -  + ]
    1460                 :            :                      timer->data != (unsigned long)dwork);
    1461 [ -  + ][ #  # ]:    1264091 :         WARN_ON_ONCE(timer_pending(timer));
                 [ #  # ]
    1462 [ -  + ][ #  # ]:     632046 :         WARN_ON_ONCE(!list_empty(&work->entry));
                 [ #  # ]
    1463                 :            : 
    1464                 :            :         /*
    1465                 :            :          * If @delay is 0, queue @dwork->work immediately.  This is for
    1466                 :            :          * both optimization and correctness.  The earliest @timer can
    1467                 :            :          * expire is on the closest next tick and delayed_work users depend
    1468                 :            :          * on that there's no such delay when @delay is 0.
    1469                 :            :          */
    1470         [ +  + ]:     632046 :         if (!delay) {
    1471                 :       7388 :                 __queue_work(cpu, wq, &dwork->work);
    1472                 :       7388 :                 return;
    1473                 :            :         }
    1474                 :            : 
    1475                 :            :         timer_stats_timer_set_start_info(&dwork->timer);
    1476                 :            : 
    1477                 :     624660 :         dwork->wq = wq;
    1478                 :     624660 :         dwork->cpu = cpu;
    1479                 :     624660 :         timer->expires = jiffies + delay;
    1480                 :            : 
    1481         [ +  + ]:     624660 :         if (unlikely(cpu != WORK_CPU_UNBOUND))
    1482                 :     160414 :                 add_timer_on(timer, cpu);
    1483                 :            :         else
    1484                 :     464246 :                 add_timer(timer);
    1485                 :            : }
    1486                 :            : 
    1487                 :            : /**
    1488                 :            :  * queue_delayed_work_on - queue work on specific CPU after delay
    1489                 :            :  * @cpu: CPU number to execute work on
    1490                 :            :  * @wq: workqueue to use
    1491                 :            :  * @dwork: work to queue
    1492                 :            :  * @delay: number of jiffies to wait before queueing
    1493                 :            :  *
    1494                 :            :  * Return: %false if @work was already on a queue, %true otherwise.  If
    1495                 :            :  * @delay is zero and @dwork is idle, it will be scheduled for immediate
    1496                 :            :  * execution.
    1497                 :            :  */
    1498                 :          0 : bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
    1499                 :            :                            struct delayed_work *dwork, unsigned long delay)
    1500                 :            : {
    1501                 :            :         struct work_struct *work = &dwork->work;
    1502                 :            :         bool ret = false;
    1503                 :            :         unsigned long flags;
    1504                 :            : 
    1505                 :            :         /* read the comment in __queue_work() */
    1506                 :            :         local_irq_save(flags);
    1507                 :            : 
    1508         [ +  + ]:     462779 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1509                 :     462701 :                 __queue_delayed_work(cpu, wq, dwork, delay);
    1510                 :            :                 ret = true;
    1511                 :            :         }
    1512                 :            : 
    1513         [ +  + ]:     462782 :         local_irq_restore(flags);
    1514                 :     462782 :         return ret;
    1515                 :            : }
    1516                 :            : EXPORT_SYMBOL(queue_delayed_work_on);
    1517                 :            : 
    1518                 :            : /**
    1519                 :            :  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
    1520                 :            :  * @cpu: CPU number to execute work on
    1521                 :            :  * @wq: workqueue to use
    1522                 :            :  * @dwork: work to queue
    1523                 :            :  * @delay: number of jiffies to wait before queueing
    1524                 :            :  *
    1525                 :            :  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
    1526                 :            :  * modify @dwork's timer so that it expires after @delay.  If @delay is
    1527                 :            :  * zero, @work is guaranteed to be scheduled immediately regardless of its
    1528                 :            :  * current state.
    1529                 :            :  *
    1530                 :            :  * Return: %false if @dwork was idle and queued, %true if @dwork was
    1531                 :            :  * pending and its timer was modified.
    1532                 :            :  *
    1533                 :            :  * This function is safe to call from any context including IRQ handler.
    1534                 :            :  * See try_to_grab_pending() for details.
    1535                 :            :  */
    1536                 :     169308 : bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
    1537                 :            :                          struct delayed_work *dwork, unsigned long delay)
    1538                 :            : {
    1539                 :            :         unsigned long flags;
    1540                 :            :         int ret;
    1541                 :            : 
    1542                 :            :         do {
    1543                 :     169363 :                 ret = try_to_grab_pending(&dwork->work, true, &flags);
    1544         [ +  + ]:     169362 :         } while (unlikely(ret == -EAGAIN));
    1545                 :            : 
    1546         [ +  + ]:     169307 :         if (likely(ret >= 0)) {
    1547                 :     169298 :                 __queue_delayed_work(cpu, wq, dwork, delay);
    1548         [ +  + ]:     169297 :                 local_irq_restore(flags);
    1549                 :            :         }
    1550                 :            : 
    1551                 :            :         /* -ENOENT from try_to_grab_pending() becomes %true */
    1552                 :     169347 :         return ret;
    1553                 :            : }
    1554                 :            : EXPORT_SYMBOL_GPL(mod_delayed_work_on);
    1555                 :            : 
    1556                 :            : /**
    1557                 :            :  * worker_enter_idle - enter idle state
    1558                 :            :  * @worker: worker which is entering idle state
    1559                 :            :  *
    1560                 :            :  * @worker is entering idle state.  Update stats and idle timer if
    1561                 :            :  * necessary.
    1562                 :            :  *
    1563                 :            :  * LOCKING:
    1564                 :            :  * spin_lock_irq(pool->lock).
    1565                 :            :  */
    1566                 :          0 : static void worker_enter_idle(struct worker *worker)
    1567                 :            : {
    1568                 :     735615 :         struct worker_pool *pool = worker->pool;
    1569                 :            : 
    1570 [ -  + ][ #  # ]:    1471229 :         if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
         [ -  + ][ +  - ]
                 [ +  - ]
    1571 [ +  + ][ +  + ]:     735616 :             WARN_ON_ONCE(!list_empty(&worker->entry) &&
         [ +  + ][ -  + ]
            [ #  # ][ - ]
    1572                 :            :                          (worker->hentry.next || worker->hentry.pprev)))
    1573                 :     735591 :                 return;
    1574                 :            : 
    1575                 :            :         /* can't use worker_set_flags(), also called from start_worker() */
    1576                 :     735614 :         worker->flags |= WORKER_IDLE;
    1577                 :     735614 :         pool->nr_idle++;
    1578                 :     735614 :         worker->last_active = jiffies;
    1579                 :            : 
    1580                 :            :         /* idle_list is LIFO */
    1581                 :     735614 :         list_add(&worker->entry, &pool->idle_list);
    1582                 :            : 
    1583    [ + ][ +  + ]:     735614 :         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
    1584                 :        349 :                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
    1585                 :            : 
    1586                 :            :         /*
    1587                 :            :          * Sanity check nr_running.  Because wq_unbind_fn() releases
    1588                 :            :          * pool->lock between setting %WORKER_UNBOUND and zapping
    1589                 :            :          * nr_running, the warning may trigger spuriously.  Check iff
    1590                 :            :          * unbind is not in progress.
    1591                 :            :          */
    1592 [ +  + ][ +  + ]:     735591 :         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
         [ +  + ][ -  + ]
         [ #  # ][ #  # ]
    1593                 :            :                      pool->nr_workers == pool->nr_idle &&
    1594                 :            :                      atomic_read(&pool->nr_running));
    1595                 :            : }
    1596                 :            : 
    1597                 :            : /**
    1598                 :            :  * worker_leave_idle - leave idle state
    1599                 :            :  * @worker: worker which is leaving idle state
    1600                 :            :  *
    1601                 :            :  * @worker is leaving idle state.  Update stats.
    1602                 :            :  *
    1603                 :            :  * LOCKING:
    1604                 :            :  * spin_lock_irq(pool->lock).
    1605                 :            :  */
    1606                 :          0 : static void worker_leave_idle(struct worker *worker)
    1607                 :            : {
    1608                 :     735203 :         struct worker_pool *pool = worker->pool;
    1609                 :            : 
    1610 [ -  + ][ #  # ]:     735203 :         if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
         [ -  + ][ +  - ]
    1611                 :     735263 :                 return;
    1612                 :            :         worker_clr_flags(worker, WORKER_IDLE);
    1613                 :     735263 :         pool->nr_idle--;
    1614                 :     735263 :         list_del_init(&worker->entry);
    1615                 :            : }
    1616                 :            : 
    1617                 :            : /**
    1618                 :            :  * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
    1619                 :            :  * @pool: target worker_pool
    1620                 :            :  *
    1621                 :            :  * Bind %current to the cpu of @pool if it is associated and lock @pool.
    1622                 :            :  *
    1623                 :            :  * Works which are scheduled while the cpu is online must at least be
    1624                 :            :  * scheduled to a worker which is bound to the cpu so that if they are
    1625                 :            :  * flushed from cpu callbacks while cpu is going down, they are
    1626                 :            :  * guaranteed to execute on the cpu.
    1627                 :            :  *
    1628                 :            :  * This function is to be used by unbound workers and rescuers to bind
    1629                 :            :  * themselves to the target cpu and may race with cpu going down or
    1630                 :            :  * coming online.  kthread_bind() can't be used because it may put the
    1631                 :            :  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
    1632                 :            :  * verbatim as it's best effort and blocking and pool may be
    1633                 :            :  * [dis]associated in the meantime.
    1634                 :            :  *
    1635                 :            :  * This function tries set_cpus_allowed() and locks pool and verifies the
    1636                 :            :  * binding against %POOL_DISASSOCIATED which is set during
    1637                 :            :  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
    1638                 :            :  * enters idle state or fetches works without dropping lock, it can
    1639                 :            :  * guarantee the scheduling requirement described in the first paragraph.
    1640                 :            :  *
    1641                 :            :  * CONTEXT:
    1642                 :            :  * Might sleep.  Called without any lock but returns with pool->lock
    1643                 :            :  * held.
    1644                 :            :  *
    1645                 :            :  * Return:
    1646                 :            :  * %true if the associated pool is online (@worker is successfully
    1647                 :            :  * bound), %false if offline.
    1648                 :            :  */
    1649                 :          0 : static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
    1650                 :            : __acquires(&pool->lock)
    1651                 :            : {
    1652                 :            :         while (true) {
    1653                 :            :                 /*
    1654                 :            :                  * The following call may fail, succeed or succeed
    1655                 :            :                  * without actually migrating the task to the cpu if
    1656                 :            :                  * it races with cpu hotunplug operation.  Verify
    1657                 :            :                  * against POOL_DISASSOCIATED.
    1658                 :            :                  */
    1659         [ #  # ]:          0 :                 if (!(pool->flags & POOL_DISASSOCIATED))
    1660                 :          0 :                         set_cpus_allowed_ptr(current, pool->attrs->cpumask);
    1661                 :            : 
    1662                 :            :                 spin_lock_irq(&pool->lock);
    1663         [ #  # ]:          0 :                 if (pool->flags & POOL_DISASSOCIATED)
    1664                 :            :                         return false;
    1665 [ #  # ][ #  # ]:          0 :                 if (task_cpu(current) == pool->cpu &&
    1666                 :          0 :                     cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
    1667                 :            :                         return true;
    1668                 :            :                 spin_unlock_irq(&pool->lock);
    1669                 :            : 
    1670                 :            :                 /*
    1671                 :            :                  * We've raced with CPU hot[un]plug.  Give it a breather
    1672                 :            :                  * and retry migration.  cond_resched() is required here;
    1673                 :            :                  * otherwise, we might deadlock against cpu_stop trying to
    1674                 :            :                  * bring down the CPU on non-preemptive kernel.
    1675                 :            :                  */
    1676                 :          0 :                 cpu_relax();
    1677                 :          0 :                 cond_resched();
    1678                 :          0 :         }
    1679                 :            : }
    1680                 :            : 
    1681                 :          0 : static struct worker *alloc_worker(void)
    1682                 :            : {
    1683                 :            :         struct worker *worker;
    1684                 :            : 
    1685                 :            :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
    1686         [ +  - ]:        345 :         if (worker) {
    1687                 :        345 :                 INIT_LIST_HEAD(&worker->entry);
    1688                 :        345 :                 INIT_LIST_HEAD(&worker->scheduled);
    1689                 :            :                 /* on creation a worker is in !idle && prep state */
    1690                 :        345 :                 worker->flags = WORKER_PREP;
    1691                 :            :         }
    1692                 :          0 :         return worker;
    1693                 :            : }
    1694                 :            : 
    1695                 :            : /**
    1696                 :            :  * create_worker - create a new workqueue worker
    1697                 :            :  * @pool: pool the new worker will belong to
    1698                 :            :  *
    1699                 :            :  * Create a new worker which is bound to @pool.  The returned worker
    1700                 :            :  * can be started by calling start_worker() or destroyed using
    1701                 :            :  * destroy_worker().
    1702                 :            :  *
    1703                 :            :  * CONTEXT:
    1704                 :            :  * Might sleep.  Does GFP_KERNEL allocations.
    1705                 :            :  *
    1706                 :            :  * Return:
    1707                 :            :  * Pointer to the newly created worker.
    1708                 :            :  */
    1709                 :          0 : static struct worker *create_worker(struct worker_pool *pool)
    1710                 :            : {
    1711                 :            :         struct worker *worker = NULL;
    1712                 :            :         int id = -1;
    1713                 :            :         char id_buf[16];
    1714                 :            : 
    1715                 :            :         lockdep_assert_held(&pool->manager_mutex);
    1716                 :            : 
    1717                 :            :         /*
    1718                 :            :          * ID is needed to determine kthread name.  Allocate ID first
    1719                 :            :          * without installing the pointer.
    1720                 :            :          */
    1721                 :        345 :         idr_preload(GFP_KERNEL);
    1722                 :            :         spin_lock_irq(&pool->lock);
    1723                 :            : 
    1724                 :        345 :         id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
    1725                 :            : 
    1726                 :            :         spin_unlock_irq(&pool->lock);
    1727                 :            :         idr_preload_end();
    1728         [ +  - ]:        345 :         if (id < 0)
    1729                 :            :                 goto fail;
    1730                 :            : 
    1731                 :        345 :         worker = alloc_worker();
    1732         [ +  - ]:        345 :         if (!worker)
    1733                 :            :                 goto fail;
    1734                 :            : 
    1735                 :        345 :         worker->pool = pool;
    1736                 :        345 :         worker->id = id;
    1737                 :            : 
    1738         [ +  + ]:        345 :         if (pool->cpu >= 0)
    1739         [ +  - ]:        320 :                 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
    1740                 :        320 :                          pool->attrs->nice < 0  ? "H" : "");
    1741                 :            :         else
    1742                 :         25 :                 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
    1743                 :            : 
    1744                 :        345 :         worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
    1745                 :            :                                               "kworker/%s", id_buf);
    1746         [ +  - ]:        345 :         if (IS_ERR(worker->task))
    1747                 :            :                 goto fail;
    1748                 :            : 
    1749                 :        345 :         set_user_nice(worker->task, pool->attrs->nice);
    1750                 :            : 
    1751                 :            :         /* prevent userland from meddling with cpumask of workqueue workers */
    1752                 :        345 :         worker->task->flags |= PF_NO_SETAFFINITY;
    1753                 :            : 
    1754                 :            :         /*
    1755                 :            :          * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
    1756                 :            :          * online CPUs.  It'll be re-applied when any of the CPUs come up.
    1757                 :            :          */
    1758                 :        345 :         set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
    1759                 :            : 
    1760                 :            :         /*
    1761                 :            :          * The caller is responsible for ensuring %POOL_DISASSOCIATED
    1762                 :            :          * remains stable across this function.  See the comments above the
    1763                 :            :          * flag definition for details.
    1764                 :            :          */
    1765         [ +  + ]:        345 :         if (pool->flags & POOL_DISASSOCIATED)
    1766                 :         25 :                 worker->flags |= WORKER_UNBOUND;
    1767                 :            : 
    1768                 :            :         /* successful, commit the pointer to idr */
    1769                 :            :         spin_lock_irq(&pool->lock);
    1770                 :        345 :         idr_replace(&pool->worker_idr, worker, worker->id);
    1771                 :            :         spin_unlock_irq(&pool->lock);
    1772                 :            : 
    1773                 :        345 :         return worker;
    1774                 :            : 
    1775                 :            : fail:
    1776            [ - ]:          0 :         if (id >= 0) {
    1777                 :            :                 spin_lock_irq(&pool->lock);
    1778                 :          0 :                 idr_remove(&pool->worker_idr, id);
    1779                 :            :                 spin_unlock_irq(&pool->lock);
    1780                 :            :         }
    1781                 :          0 :         kfree(worker);
    1782                 :          0 :         return NULL;
    1783                 :            : }
    1784                 :            : 
    1785                 :            : /**
    1786                 :            :  * start_worker - start a newly created worker
    1787                 :            :  * @worker: worker to start
    1788                 :            :  *
    1789                 :            :  * Make the pool aware of @worker and start it.
    1790                 :            :  *
    1791                 :            :  * CONTEXT:
    1792                 :            :  * spin_lock_irq(pool->lock).
    1793                 :            :  */
    1794                 :          0 : static void start_worker(struct worker *worker)
    1795                 :            : {
    1796                 :        345 :         worker->flags |= WORKER_STARTED;
    1797                 :        345 :         worker->pool->nr_workers++;
    1798                 :        345 :         worker_enter_idle(worker);
    1799                 :        345 :         wake_up_process(worker->task);
    1800                 :        345 : }
    1801                 :            : 
    1802                 :            : /**
    1803                 :            :  * create_and_start_worker - create and start a worker for a pool
    1804                 :            :  * @pool: the target pool
    1805                 :            :  *
    1806                 :            :  * Grab the managership of @pool and create and start a new worker for it.
    1807                 :            :  *
    1808                 :            :  * Return: 0 on success. A negative error code otherwise.
    1809                 :            :  */
    1810                 :          0 : static int create_and_start_worker(struct worker_pool *pool)
    1811                 :            : {
    1812                 :            :         struct worker *worker;
    1813                 :            : 
    1814                 :          0 :         mutex_lock(&pool->manager_mutex);
    1815                 :            : 
    1816                 :          0 :         worker = create_worker(pool);
    1817         [ #  # ]:          0 :         if (worker) {
    1818                 :            :                 spin_lock_irq(&pool->lock);
    1819                 :          0 :                 start_worker(worker);
    1820                 :            :                 spin_unlock_irq(&pool->lock);
    1821                 :            :         }
    1822                 :            : 
    1823                 :          0 :         mutex_unlock(&pool->manager_mutex);
    1824                 :            : 
    1825         [ #  # ]:          0 :         return worker ? 0 : -ENOMEM;
    1826                 :            : }
    1827                 :            : 
    1828                 :            : /**
    1829                 :            :  * destroy_worker - destroy a workqueue worker
    1830                 :            :  * @worker: worker to be destroyed
    1831                 :            :  *
    1832                 :            :  * Destroy @worker and adjust @pool stats accordingly.
    1833                 :            :  *
    1834                 :            :  * CONTEXT:
    1835                 :            :  * spin_lock_irq(pool->lock) which is released and regrabbed.
    1836                 :            :  */
    1837                 :          0 : static void destroy_worker(struct worker *worker)
    1838                 :            : {
    1839                 :        345 :         struct worker_pool *pool = worker->pool;
    1840                 :            : 
    1841                 :            :         lockdep_assert_held(&pool->manager_mutex);
    1842                 :            :         lockdep_assert_held(&pool->lock);
    1843                 :            : 
    1844                 :            :         /* sanity check frenzy */
    1845 [ -  + ][ +  - ]:        690 :         if (WARN_ON(worker->current_work) ||
                 [ +  - ]
    1846         [ -  + ]:        345 :             WARN_ON(!list_empty(&worker->scheduled)))
    1847                 :        345 :                 return;
    1848                 :            : 
    1849         [ +  - ]:        345 :         if (worker->flags & WORKER_STARTED)
    1850                 :        345 :                 pool->nr_workers--;
    1851         [ +  - ]:        345 :         if (worker->flags & WORKER_IDLE)
    1852                 :        345 :                 pool->nr_idle--;
    1853                 :            : 
    1854                 :          0 :         list_del_init(&worker->entry);
    1855                 :          0 :         worker->flags |= WORKER_DIE;
    1856                 :            : 
    1857                 :        345 :         idr_remove(&pool->worker_idr, worker->id);
    1858                 :            : 
    1859                 :            :         spin_unlock_irq(&pool->lock);
    1860                 :            : 
    1861                 :        345 :         kthread_stop(worker->task);
    1862                 :        345 :         kfree(worker);
    1863                 :            : 
    1864                 :            :         spin_lock_irq(&pool->lock);
    1865                 :            : }
    1866                 :            : 
    1867                 :          0 : static void idle_worker_timeout(unsigned long __pool)
    1868                 :            : {
    1869                 :        352 :         struct worker_pool *pool = (void *)__pool;
    1870                 :            : 
    1871                 :            :         spin_lock_irq(&pool->lock);
    1872                 :            : 
    1873         [ #  # ]:        352 :         if (too_many_workers(pool)) {
    1874                 :            :                 struct worker *worker;
    1875                 :            :                 unsigned long expires;
    1876                 :            : 
    1877                 :            :                 /* idle_list is kept in LIFO order, check the last one */
    1878                 :        350 :                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
    1879                 :        350 :                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
    1880                 :            : 
    1881         [ +  + ]:        350 :                 if (time_before(jiffies, expires))
    1882                 :          3 :                         mod_timer(&pool->idle_timer, expires);
    1883                 :            :                 else {
    1884                 :            :                         /* it's been idle for too long, wake up manager */
    1885                 :        347 :                         pool->flags |= POOL_MANAGE_WORKERS;
    1886                 :        347 :                         wake_up_worker(pool);
    1887                 :            :                 }
    1888                 :            :         }
    1889                 :            : 
    1890                 :            :         spin_unlock_irq(&pool->lock);
    1891                 :        352 : }
    1892                 :            : 
    1893                 :          0 : static void send_mayday(struct work_struct *work)
    1894                 :            : {
    1895                 :            :         struct pool_workqueue *pwq = get_work_pwq(work);
    1896                 :          0 :         struct workqueue_struct *wq = pwq->wq;
    1897                 :            : 
    1898                 :            :         lockdep_assert_held(&wq_mayday_lock);
    1899                 :            : 
    1900         [ #  # ]:          0 :         if (!wq->rescuer)
    1901                 :          0 :                 return;
    1902                 :            : 
    1903                 :            :         /* mayday mayday mayday */
    1904         [ #  # ]:          0 :         if (list_empty(&pwq->mayday_node)) {
    1905                 :          0 :                 list_add_tail(&pwq->mayday_node, &wq->maydays);
    1906                 :          0 :                 wake_up_process(wq->rescuer->task);
    1907                 :            :         }
    1908                 :            : }
    1909                 :            : 
    1910                 :          0 : static void pool_mayday_timeout(unsigned long __pool)
    1911                 :            : {
    1912                 :          0 :         struct worker_pool *pool = (void *)__pool;
    1913                 :            :         struct work_struct *work;
    1914                 :            : 
    1915                 :            :         spin_lock_irq(&wq_mayday_lock);             /* for wq->maydays */
    1916                 :            :         spin_lock(&pool->lock);
    1917                 :            : 
    1918         [ #  # ]:          0 :         if (need_to_create_worker(pool)) {
    1919                 :            :                 /*
    1920                 :            :                  * We've been trying to create a new worker but
    1921                 :            :                  * haven't been successful.  We might be hitting an
    1922                 :            :                  * allocation deadlock.  Send distress signals to
    1923                 :            :                  * rescuers.
    1924                 :            :                  */
    1925         [ #  # ]:          0 :                 list_for_each_entry(work, &pool->worklist, entry)
    1926                 :          0 :                         send_mayday(work);
    1927                 :            :         }
    1928                 :            : 
    1929                 :            :         spin_unlock(&pool->lock);
    1930                 :            :         spin_unlock_irq(&wq_mayday_lock);
    1931                 :            : 
    1932                 :          0 :         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
    1933                 :          0 : }
    1934                 :            : 
    1935                 :            : /**
    1936                 :            :  * maybe_create_worker - create a new worker if necessary
    1937                 :            :  * @pool: pool to create a new worker for
    1938                 :            :  *
    1939                 :            :  * Create a new worker for @pool if necessary.  @pool is guaranteed to
    1940                 :            :  * have at least one idle worker on return from this function.  If
    1941                 :            :  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
    1942                 :            :  * sent to all rescuers with works scheduled on @pool to resolve
    1943                 :            :  * possible allocation deadlock.
    1944                 :            :  *
    1945                 :            :  * On return, need_to_create_worker() is guaranteed to be %false and
    1946                 :            :  * may_start_working() %true.
    1947                 :            :  *
    1948                 :            :  * LOCKING:
    1949                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    1950                 :            :  * multiple times.  Does GFP_KERNEL allocations.  Called only from
    1951                 :            :  * manager.
    1952                 :            :  *
    1953                 :            :  * Return:
    1954                 :            :  * %false if no action was taken and pool->lock stayed locked, %true
    1955                 :            :  * otherwise.
    1956                 :            :  */
    1957                 :          0 : static bool maybe_create_worker(struct worker_pool *pool)
    1958                 :            : __releases(&pool->lock)
    1959                 :            : __acquires(&pool->lock)
    1960                 :            : {
    1961         [ +  + ]:        692 :         if (!need_to_create_worker(pool))
    1962                 :            :                 return false;
    1963                 :            : restart:
    1964                 :            :         spin_unlock_irq(&pool->lock);
    1965                 :            : 
    1966                 :            :         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
    1967                 :        345 :         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
    1968                 :            : 
    1969                 :            :         while (true) {
    1970                 :            :                 struct worker *worker;
    1971                 :            : 
    1972                 :        345 :                 worker = create_worker(pool);
    1973         [ +  - ]:        345 :                 if (worker) {
    1974                 :        345 :                         del_timer_sync(&pool->mayday_timer);
    1975                 :            :                         spin_lock_irq(&pool->lock);
    1976                 :        345 :                         start_worker(worker);
    1977 [ -  + ][ #  # ]:        345 :                         if (WARN_ON_ONCE(need_to_create_worker(pool)))
         [ #  # ][ -  + ]
    1978                 :            :                                 goto restart;
    1979                 :            :                         return true;
    1980                 :            :                 }
    1981                 :            : 
    1982         [ #  # ]:          0 :                 if (!need_to_create_worker(pool))
    1983                 :            :                         break;
    1984                 :            : 
    1985                 :          0 :                 __set_current_state(TASK_INTERRUPTIBLE);
    1986                 :          0 :                 schedule_timeout(CREATE_COOLDOWN);
    1987                 :            : 
    1988         [ #  # ]:          0 :                 if (!need_to_create_worker(pool))
    1989                 :            :                         break;
    1990                 :            :         }
    1991                 :            : 
    1992                 :          0 :         del_timer_sync(&pool->mayday_timer);
    1993                 :            :         spin_lock_irq(&pool->lock);
    1994         [ #  # ]:          0 :         if (need_to_create_worker(pool))
    1995                 :            :                 goto restart;
    1996                 :            :         return true;
    1997                 :            : }
    1998                 :            : 
    1999                 :            : /**
    2000                 :            :  * maybe_destroy_worker - destroy workers which have been idle for a while
    2001                 :            :  * @pool: pool to destroy workers for
    2002                 :            :  *
    2003                 :            :  * Destroy @pool workers which have been idle for longer than
    2004                 :            :  * IDLE_WORKER_TIMEOUT.
    2005                 :            :  *
    2006                 :            :  * LOCKING:
    2007                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2008                 :            :  * multiple times.  Called only from manager.
    2009                 :            :  *
    2010                 :            :  * Return:
    2011                 :            :  * %false if no action was taken and pool->lock stayed locked, %true
    2012                 :            :  * otherwise.
    2013                 :            :  */
    2014                 :          0 : static bool maybe_destroy_workers(struct worker_pool *pool)
    2015                 :            : {
    2016                 :            :         bool ret = false;
    2017                 :            : 
    2018         [ +  + ]:       1729 :         while (too_many_workers(pool)) {
    2019                 :            :                 struct worker *worker;
    2020                 :            :                 unsigned long expires;
    2021                 :            : 
    2022                 :        346 :                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
    2023                 :        346 :                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
    2024                 :            : 
    2025         [ +  + ]:        346 :                 if (time_before(jiffies, expires)) {
    2026                 :          1 :                         mod_timer(&pool->idle_timer, expires);
    2027                 :          1 :                         break;
    2028                 :            :                 }
    2029                 :            : 
    2030                 :        345 :                 destroy_worker(worker);
    2031                 :            :                 ret = true;
    2032                 :            :         }
    2033                 :            : 
    2034                 :        692 :         return ret;
    2035                 :            : }
    2036                 :            : 
    2037                 :            : /**
    2038                 :            :  * manage_workers - manage worker pool
    2039                 :            :  * @worker: self
    2040                 :            :  *
    2041                 :            :  * Assume the manager role and manage the worker pool @worker belongs
    2042                 :            :  * to.  At any given time, there can be only zero or one manager per
    2043                 :            :  * pool.  The exclusion is handled automatically by this function.
    2044                 :            :  *
    2045                 :            :  * The caller can safely start processing works on false return.  On
    2046                 :            :  * true return, it's guaranteed that need_to_create_worker() is false
    2047                 :            :  * and may_start_working() is true.
    2048                 :            :  *
    2049                 :            :  * CONTEXT:
    2050                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2051                 :            :  * multiple times.  Does GFP_KERNEL allocations.
    2052                 :            :  *
    2053                 :            :  * Return:
    2054                 :            :  * %false if the pool don't need management and the caller can safely start
    2055                 :            :  * processing works, %true indicates that the function released pool->lock
    2056                 :            :  * and reacquired it to perform some management function and that the
    2057                 :            :  * conditions that the caller verified while holding the lock before
    2058                 :            :  * calling the function might no longer be true.
    2059                 :            :  */
    2060                 :          0 : static bool manage_workers(struct worker *worker)
    2061                 :            : {
    2062                 :        921 :         struct worker_pool *pool = worker->pool;
    2063                 :            :         bool ret = false;
    2064                 :            : 
    2065                 :            :         /*
    2066                 :            :          * Managership is governed by two mutexes - manager_arb and
    2067                 :            :          * manager_mutex.  manager_arb handles arbitration of manager role.
    2068                 :            :          * Anyone who successfully grabs manager_arb wins the arbitration
    2069                 :            :          * and becomes the manager.  mutex_trylock() on pool->manager_arb
    2070                 :            :          * failure while holding pool->lock reliably indicates that someone
    2071                 :            :          * else is managing the pool and the worker which failed trylock
    2072                 :            :          * can proceed to executing work items.  This means that anyone
    2073                 :            :          * grabbing manager_arb is responsible for actually performing
    2074                 :            :          * manager duties.  If manager_arb is grabbed and released without
    2075                 :            :          * actual management, the pool may stall indefinitely.
    2076                 :            :          *
    2077                 :            :          * manager_mutex is used for exclusion of actual management
    2078                 :            :          * operations.  The holder of manager_mutex can be sure that none
    2079                 :            :          * of management operations, including creation and destruction of
    2080                 :            :          * workers, won't take place until the mutex is released.  Because
    2081                 :            :          * manager_mutex doesn't interfere with manager role arbitration,
    2082                 :            :          * it is guaranteed that the pool's management, while may be
    2083                 :            :          * delayed, won't be disturbed by someone else grabbing
    2084                 :            :          * manager_mutex.
    2085                 :            :          */
    2086         [ +  + ]:        921 :         if (!mutex_trylock(&pool->manager_arb))
    2087                 :            :                 return ret;
    2088                 :            : 
    2089                 :            :         /*
    2090                 :            :          * With manager arbitration won, manager_mutex would be free in
    2091                 :            :          * most cases.  trylock first without dropping @pool->lock.
    2092                 :            :          */
    2093         [ -  + ]:        692 :         if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
    2094                 :            :                 spin_unlock_irq(&pool->lock);
    2095                 :          0 :                 mutex_lock(&pool->manager_mutex);
    2096                 :            :                 spin_lock_irq(&pool->lock);
    2097                 :            :                 ret = true;
    2098                 :            :         }
    2099                 :            : 
    2100                 :        692 :         pool->flags &= ~POOL_MANAGE_WORKERS;
    2101                 :            : 
    2102                 :            :         /*
    2103                 :            :          * Destroy and then create so that may_start_working() is true
    2104                 :            :          * on return.
    2105                 :            :          */
    2106                 :        692 :         ret |= maybe_destroy_workers(pool);
    2107                 :        692 :         ret |= maybe_create_worker(pool);
    2108                 :            : 
    2109                 :        692 :         mutex_unlock(&pool->manager_mutex);
    2110                 :        692 :         mutex_unlock(&pool->manager_arb);
    2111                 :            :         return ret;
    2112                 :            : }
    2113                 :            : 
    2114                 :            : /**
    2115                 :            :  * process_one_work - process single work
    2116                 :            :  * @worker: self
    2117                 :            :  * @work: work to process
    2118                 :            :  *
    2119                 :            :  * Process @work.  This function contains all the logics necessary to
    2120                 :            :  * process a single work including synchronization against and
    2121                 :            :  * interaction with other workers on the same cpu, queueing and
    2122                 :            :  * flushing.  As long as context requirement is met, any worker can
    2123                 :            :  * call this function to process a work.
    2124                 :            :  *
    2125                 :            :  * CONTEXT:
    2126                 :            :  * spin_lock_irq(pool->lock) which is released and regrabbed.
    2127                 :            :  */
    2128                 :          0 : static void process_one_work(struct worker *worker, struct work_struct *work)
    2129                 :            : __releases(&pool->lock)
    2130                 :            : __acquires(&pool->lock)
    2131                 :            : {
    2132                 :            :         struct pool_workqueue *pwq = get_work_pwq(work);
    2133                 :     965663 :         struct worker_pool *pool = worker->pool;
    2134                 :     965663 :         bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
    2135                 :            :         int work_color;
    2136                 :            :         struct worker *collision;
    2137                 :            : #ifdef CONFIG_LOCKDEP
    2138                 :            :         /*
    2139                 :            :          * It is permissible to free the struct work_struct from
    2140                 :            :          * inside the function that is called from it, this we need to
    2141                 :            :          * take into account for lockdep too.  To avoid bogus "held
    2142                 :            :          * lock freed" warnings as well as problems when looking into
    2143                 :            :          * work->lockdep_map, make a copy and use that here.
    2144                 :            :          */
    2145                 :            :         struct lockdep_map lockdep_map;
    2146                 :            : 
    2147                 :            :         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
    2148                 :            : #endif
    2149                 :            :         /*
    2150                 :            :          * Ensure we're on the correct CPU.  DISASSOCIATED test is
    2151                 :            :          * necessary to avoid spurious warnings from rescuers servicing the
    2152                 :            :          * unbound or a disassociated pool.
    2153                 :            :          */
    2154 [ +  + ][ +  + ]:     965663 :         WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
         [ +  + ][ -  + ]
         [ #  # ][ #  # ]
    2155                 :            :                      !(pool->flags & POOL_DISASSOCIATED) &&
    2156                 :            :                      raw_smp_processor_id() != pool->cpu);
    2157                 :            : 
    2158                 :            :         /*
    2159                 :            :          * A single work shouldn't be executed concurrently by
    2160                 :            :          * multiple workers on a single cpu.  Check whether anyone is
    2161                 :            :          * already processing the work.  If so, defer the work to the
    2162                 :            :          * currently executing one.
    2163                 :            :          */
    2164                 :     965663 :         collision = find_worker_executing_work(pool, work);
    2165         [ +  + ]:     965651 :         if (unlikely(collision)) {
    2166                 :       2217 :                 move_linked_works(work, &collision->scheduled, NULL);
    2167                 :       2217 :                 return;
    2168                 :            :         }
    2169                 :            : 
    2170                 :            :         /* claim and dequeue */
    2171                 :            :         debug_work_deactivate(work);
    2172                 :    1926868 :         hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
    2173                 :     963434 :         worker->current_work = work;
    2174                 :     963434 :         worker->current_func = work->func;
    2175                 :     963434 :         worker->current_pwq = pwq;
    2176                 :            :         work_color = get_work_color(work);
    2177                 :            : 
    2178                 :     963434 :         list_del_init(&work->entry);
    2179                 :            : 
    2180                 :            :         /*
    2181                 :            :          * CPU intensive works don't participate in concurrency
    2182                 :            :          * management.  They're the scheduler's responsibility.
    2183                 :            :          */
    2184         [ -  + ]:     963434 :         if (unlikely(cpu_intensive))
    2185                 :            :                 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
    2186                 :            : 
    2187                 :            :         /*
    2188                 :            :          * Unbound pool isn't concurrency managed and work items should be
    2189                 :            :          * executed ASAP.  Wake up another worker if necessary.
    2190                 :            :          */
    2191 [ +  + ][ +  + ]:    1055230 :         if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
    2192                 :         26 :                 wake_up_worker(pool);
    2193                 :            : 
    2194                 :            :         /*
    2195                 :            :          * Record the last pool and clear PENDING which should be the last
    2196                 :            :          * update to @work.  Also, do this inside @pool->lock so that
    2197                 :            :          * PENDING and queued state changes happen together while IRQ is
    2198                 :            :          * disabled.
    2199                 :            :          */
    2200                 :     963434 :         set_work_pool_and_clear_pending(work, pool->id);
    2201                 :            : 
    2202                 :            :         spin_unlock_irq(&pool->lock);
    2203                 :            : 
    2204                 :            :         lock_map_acquire_read(&pwq->wq->lockdep_map);
    2205                 :            :         lock_map_acquire(&lockdep_map);
    2206                 :            :         trace_workqueue_execute_start(work);
    2207                 :     963407 :         worker->current_func(work);
    2208                 :            :         /*
    2209                 :            :          * While we must be careful to not use "work" after this, the trace
    2210                 :            :          * point will only record its address.
    2211                 :            :          */
    2212                 :            :         trace_workqueue_execute_end(work);
    2213                 :            :         lock_map_release(&lockdep_map);
    2214                 :            :         lock_map_release(&pwq->wq->lockdep_map);
    2215                 :            : 
    2216         [ -  + ]:     963448 :         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
    2217                 :          0 :                 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
    2218                 :            :                        "     last function: %pf\n",
    2219                 :            :                        current->comm, preempt_count(), task_pid_nr(current),
    2220                 :            :                        worker->current_func);
    2221                 :            :                 debug_show_held_locks(current);
    2222                 :          0 :                 dump_stack();
    2223                 :            :         }
    2224                 :            : 
    2225                 :            :         /*
    2226                 :            :          * The following prevents a kworker from hogging CPU on !PREEMPT
    2227                 :            :          * kernels, where a requeueing work item waiting for something to
    2228                 :            :          * happen could deadlock with stop_machine as such work item could
    2229                 :            :          * indefinitely requeue itself while all other CPUs are trapped in
    2230                 :            :          * stop_machine.
    2231                 :            :          */
    2232                 :     963448 :         cond_resched();
    2233                 :            : 
    2234                 :            :         spin_lock_irq(&pool->lock);
    2235                 :            : 
    2236                 :            :         /* clear cpu intensive status */
    2237         [ -  + ]:     963446 :         if (unlikely(cpu_intensive))
    2238                 :            :                 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
    2239                 :            : 
    2240                 :            :         /* we're done with it, release */
    2241                 :            :         hash_del(&worker->hentry);
    2242                 :     963444 :         worker->current_work = NULL;
    2243                 :     963444 :         worker->current_func = NULL;
    2244                 :     963444 :         worker->current_pwq = NULL;
    2245                 :     963444 :         worker->desc_valid = false;
    2246                 :     963444 :         pwq_dec_nr_in_flight(pwq, work_color);
    2247                 :            : }
    2248                 :            : 
    2249                 :            : /**
    2250                 :            :  * process_scheduled_works - process scheduled works
    2251                 :            :  * @worker: self
    2252                 :            :  *
    2253                 :            :  * Process all scheduled works.  Please note that the scheduled list
    2254                 :            :  * may change while processing a work, so this function repeatedly
    2255                 :            :  * fetches a work from the top and executes it.
    2256                 :            :  *
    2257                 :            :  * CONTEXT:
    2258                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2259                 :            :  * multiple times.
    2260                 :            :  */
    2261                 :            : static void process_scheduled_works(struct worker *worker)
    2262                 :            : {
    2263 [ #  # ][ +  + ]:       2200 :         while (!list_empty(&worker->scheduled)) {
                 [ +  + ]
    2264                 :        952 :                 struct work_struct *work = list_first_entry(&worker->scheduled,
    2265                 :            :                                                 struct work_struct, entry);
    2266                 :        952 :                 process_one_work(worker, work);
    2267                 :            :         }
    2268                 :            : }
    2269                 :            : 
    2270                 :            : /**
    2271                 :            :  * worker_thread - the worker thread function
    2272                 :            :  * @__worker: self
    2273                 :            :  *
    2274                 :            :  * The worker thread function.  All workers belong to a worker_pool -
    2275                 :            :  * either a per-cpu one or dynamic unbound one.  These workers process all
    2276                 :            :  * work items regardless of their specific target workqueue.  The only
    2277                 :            :  * exception is work items which belong to workqueues with a rescuer which
    2278                 :            :  * will be explained in rescuer_thread().
    2279                 :            :  *
    2280                 :            :  * Return: 0
    2281                 :            :  */
    2282                 :          0 : static int worker_thread(void *__worker)
    2283                 :            : {
    2284                 :        921 :         struct worker *worker = __worker;
    2285                 :     735192 :         struct worker_pool *pool = worker->pool;
    2286                 :            : 
    2287                 :            :         /* tell the scheduler that this is a workqueue worker */
    2288                 :        345 :         worker->task->flags |= PF_WQ_WORKER;
    2289                 :            : woke_up:
    2290                 :            :         spin_lock_irq(&pool->lock);
    2291                 :            : 
    2292                 :            :         /* am I supposed to die? */
    2293         [ +  + ]:     735617 :         if (unlikely(worker->flags & WORKER_DIE)) {
    2294                 :            :                 spin_unlock_irq(&pool->lock);
    2295 [ -  + ][ #  # ]:        345 :                 WARN_ON_ONCE(!list_empty(&worker->entry));
                 [ #  # ]
    2296                 :        345 :                 worker->task->flags &= ~PF_WQ_WORKER;
    2297                 :        345 :                 return 0;
    2298                 :            :         }
    2299                 :            : 
    2300                 :     735961 :         worker_leave_idle(worker);
    2301                 :            : recheck:
    2302                 :            :         /* no more worker necessary? */
    2303         [ +  + ]:     735937 :         if (!need_more_worker(pool))
    2304                 :            :                 goto sleep;
    2305                 :            : 
    2306                 :            :         /* do we need to manage? */
    2307 [ +  + ][ +  + ]:     734847 :         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
    2308                 :            :                 goto recheck;
    2309                 :            : 
    2310                 :            :         /*
    2311                 :            :          * ->scheduled list can only be filled while a worker is
    2312                 :            :          * preparing to process a work or actually processing it.
    2313                 :            :          * Make sure nobody diddled with it while I was sleeping.
    2314                 :            :          */
    2315 [ -  + ][ #  # ]:     734502 :         WARN_ON_ONCE(!list_empty(&worker->scheduled));
                 [ -  + ]
    2316                 :            : 
    2317                 :            :         /*
    2318                 :            :          * Finish PREP stage.  We're guaranteed to have at least one idle
    2319                 :            :          * worker or that someone else has already assumed the manager
    2320                 :            :          * role.  This is where @worker starts participating in concurrency
    2321                 :            :          * management if applicable and concurrency management is restored
    2322                 :            :          * after being rebound.  See rebind_workers() for details.
    2323                 :            :          */
    2324                 :            :         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
    2325                 :            : 
    2326                 :            :         do {
    2327                 :     964697 :                 struct work_struct *work =
    2328                 :     964697 :                         list_first_entry(&pool->worklist,
    2329                 :            :                                          struct work_struct, entry);
    2330                 :            : 
    2331         [ +  + ]:     964697 :                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
    2332                 :            :                         /* optimization path, not strictly necessary */
    2333                 :     964666 :                         process_one_work(worker, work);
    2334         [ +  + ]:     964700 :                         if (unlikely(!list_empty(&worker->scheduled)))
    2335                 :            :                                 process_scheduled_works(worker);
    2336                 :            :                 } else {
    2337                 :         31 :                         move_linked_works(work, &worker->scheduled, NULL);
    2338                 :            :                         process_scheduled_works(worker);
    2339                 :            :                 }
    2340         [ +  + ]:     964713 :         } while (keep_working(pool));
    2341                 :            : 
    2342                 :            :         worker_set_flags(worker, WORKER_PREP, false);
    2343                 :            : sleep:
    2344 [ +  + ][ +  + ]:     735601 :         if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
    2345                 :            :                 goto recheck;
    2346                 :            : 
    2347                 :            :         /*
    2348                 :            :          * pool->lock is held and there's no work to process and no need to
    2349                 :            :          * manage, sleep.  Workers are woken up only while holding
    2350                 :            :          * pool->lock or from local cpu, so setting the current state
    2351                 :            :          * before releasing pool->lock is enough to prevent losing any
    2352                 :            :          * event.
    2353                 :            :          */
    2354                 :     735253 :         worker_enter_idle(worker);
    2355                 :     735252 :         __set_current_state(TASK_INTERRUPTIBLE);
    2356                 :            :         spin_unlock_irq(&pool->lock);
    2357                 :     735274 :         schedule();
    2358                 :     735272 :         goto woke_up;
    2359                 :            : }
    2360                 :            : 
    2361                 :            : /**
    2362                 :            :  * rescuer_thread - the rescuer thread function
    2363                 :            :  * @__rescuer: self
    2364                 :            :  *
    2365                 :            :  * Workqueue rescuer thread function.  There's one rescuer for each
    2366                 :            :  * workqueue which has WQ_MEM_RECLAIM set.
    2367                 :            :  *
    2368                 :            :  * Regular work processing on a pool may block trying to create a new
    2369                 :            :  * worker which uses GFP_KERNEL allocation which has slight chance of
    2370                 :            :  * developing into deadlock if some works currently on the same queue
    2371                 :            :  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
    2372                 :            :  * the problem rescuer solves.
    2373                 :            :  *
    2374                 :            :  * When such condition is possible, the pool summons rescuers of all
    2375                 :            :  * workqueues which have works queued on the pool and let them process
    2376                 :            :  * those works so that forward progress can be guaranteed.
    2377                 :            :  *
    2378                 :            :  * This should happen rarely.
    2379                 :            :  *
    2380                 :            :  * Return: 0
    2381                 :            :  */
    2382                 :          0 : static int rescuer_thread(void *__rescuer)
    2383                 :            : {
    2384                 :            :         struct worker *rescuer = __rescuer;
    2385                 :          0 :         struct workqueue_struct *wq = rescuer->rescue_wq;
    2386                 :          0 :         struct list_head *scheduled = &rescuer->scheduled;
    2387                 :            : 
    2388                 :          0 :         set_user_nice(current, RESCUER_NICE_LEVEL);
    2389                 :            : 
    2390                 :            :         /*
    2391                 :            :          * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
    2392                 :            :          * doesn't participate in concurrency management.
    2393                 :            :          */
    2394                 :          0 :         rescuer->task->flags |= PF_WQ_WORKER;
    2395                 :            : repeat:
    2396                 :          0 :         set_current_state(TASK_INTERRUPTIBLE);
    2397                 :            : 
    2398         [ #  # ]:          0 :         if (kthread_should_stop()) {
    2399                 :          0 :                 __set_current_state(TASK_RUNNING);
    2400                 :          0 :                 rescuer->task->flags &= ~PF_WQ_WORKER;
    2401                 :          0 :                 return 0;
    2402                 :            :         }
    2403                 :            : 
    2404                 :            :         /* see whether any pwq is asking for help */
    2405                 :            :         spin_lock_irq(&wq_mayday_lock);
    2406                 :            : 
    2407         [ #  # ]:          0 :         while (!list_empty(&wq->maydays)) {
    2408                 :          0 :                 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
    2409                 :            :                                         struct pool_workqueue, mayday_node);
    2410                 :          0 :                 struct worker_pool *pool = pwq->pool;
    2411                 :            :                 struct work_struct *work, *n;
    2412                 :            : 
    2413                 :          0 :                 __set_current_state(TASK_RUNNING);
    2414                 :          0 :                 list_del_init(&pwq->mayday_node);
    2415                 :            : 
    2416                 :            :                 spin_unlock_irq(&wq_mayday_lock);
    2417                 :            : 
    2418                 :            :                 /* migrate to the target cpu if possible */
    2419                 :          0 :                 worker_maybe_bind_and_lock(pool);
    2420                 :          0 :                 rescuer->pool = pool;
    2421                 :            : 
    2422                 :            :                 /*
    2423                 :            :                  * Slurp in all works issued via this workqueue and
    2424                 :            :                  * process'em.
    2425                 :            :                  */
    2426 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
                 [ #  # ]
    2427         [ #  # ]:          0 :                 list_for_each_entry_safe(work, n, &pool->worklist, entry)
    2428         [ #  # ]:          0 :                         if (get_work_pwq(work) == pwq)
    2429                 :          0 :                                 move_linked_works(work, scheduled, &n);
    2430                 :            : 
    2431                 :            :                 process_scheduled_works(rescuer);
    2432                 :            : 
    2433                 :            :                 /*
    2434                 :            :                  * Leave this pool.  If keep_working() is %true, notify a
    2435                 :            :                  * regular worker; otherwise, we end up with 0 concurrency
    2436                 :            :                  * and stalling the execution.
    2437                 :            :                  */
    2438         [ #  # ]:          0 :                 if (keep_working(pool))
    2439                 :          0 :                         wake_up_worker(pool);
    2440                 :            : 
    2441                 :          0 :                 rescuer->pool = NULL;
    2442                 :            :                 spin_unlock(&pool->lock);
    2443                 :            :                 spin_lock(&wq_mayday_lock);
    2444                 :            :         }
    2445                 :            : 
    2446                 :            :         spin_unlock_irq(&wq_mayday_lock);
    2447                 :            : 
    2448                 :            :         /* rescuers should never participate in concurrency management */
    2449 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
                 [ #  # ]
    2450                 :          0 :         schedule();
    2451                 :          0 :         goto repeat;
    2452                 :            : }
    2453                 :            : 
    2454                 :            : struct wq_barrier {
    2455                 :            :         struct work_struct      work;
    2456                 :            :         struct completion       done;
    2457                 :            : };
    2458                 :            : 
    2459                 :          0 : static void wq_barrier_func(struct work_struct *work)
    2460                 :            : {
    2461                 :            :         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
    2462                 :         33 :         complete(&barr->done);
    2463                 :         33 : }
    2464                 :            : 
    2465                 :            : /**
    2466                 :            :  * insert_wq_barrier - insert a barrier work
    2467                 :            :  * @pwq: pwq to insert barrier into
    2468                 :            :  * @barr: wq_barrier to insert
    2469                 :            :  * @target: target work to attach @barr to
    2470                 :            :  * @worker: worker currently executing @target, NULL if @target is not executing
    2471                 :            :  *
    2472                 :            :  * @barr is linked to @target such that @barr is completed only after
    2473                 :            :  * @target finishes execution.  Please note that the ordering
    2474                 :            :  * guarantee is observed only with respect to @target and on the local
    2475                 :            :  * cpu.
    2476                 :            :  *
    2477                 :            :  * Currently, a queued barrier can't be canceled.  This is because
    2478                 :            :  * try_to_grab_pending() can't determine whether the work to be
    2479                 :            :  * grabbed is at the head of the queue and thus can't clear LINKED
    2480                 :            :  * flag of the previous work while there must be a valid next work
    2481                 :            :  * after a work with LINKED flag set.
    2482                 :            :  *
    2483                 :            :  * Note that when @worker is non-NULL, @target may be modified
    2484                 :            :  * underneath us, so we can't reliably determine pwq from @target.
    2485                 :            :  *
    2486                 :            :  * CONTEXT:
    2487                 :            :  * spin_lock_irq(pool->lock).
    2488                 :            :  */
    2489                 :          0 : static void insert_wq_barrier(struct pool_workqueue *pwq,
    2490                 :            :                               struct wq_barrier *barr,
    2491                 :            :                               struct work_struct *target, struct worker *worker)
    2492                 :            : {
    2493                 :            :         struct list_head *head;
    2494                 :            :         unsigned int linked = 0;
    2495                 :            : 
    2496                 :            :         /*
    2497                 :            :          * debugobject calls are safe here even with pool->lock locked
    2498                 :            :          * as we know for sure that this will not trigger any of the
    2499                 :            :          * checks and call back into the fixup functions where we
    2500                 :            :          * might deadlock.
    2501                 :            :          */
    2502                 :         66 :         INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
    2503                 :            :         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
    2504                 :            :         init_completion(&barr->done);
    2505                 :            : 
    2506                 :            :         /*
    2507                 :            :          * If @target is currently being executed, schedule the
    2508                 :            :          * barrier to the worker; otherwise, put it after @target.
    2509                 :            :          */
    2510         [ +  + ]:         66 :         if (worker)
    2511                 :          2 :                 head = worker->scheduled.next;
    2512                 :            :         else {
    2513                 :            :                 unsigned long *bits = work_data_bits(target);
    2514                 :            : 
    2515                 :         31 :                 head = target->entry.next;
    2516                 :            :                 /* there can already be other linked works, inherit and set */
    2517                 :         31 :                 linked = *bits & WORK_STRUCT_LINKED;
    2518                 :            :                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
    2519                 :            :         }
    2520                 :            : 
    2521                 :            :         debug_work_activate(&barr->work);
    2522                 :         33 :         insert_work(pwq, &barr->work, head,
    2523                 :            :                     work_color_to_flags(WORK_NO_COLOR) | linked);
    2524                 :         33 : }
    2525                 :            : 
    2526                 :            : /**
    2527                 :            :  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
    2528                 :            :  * @wq: workqueue being flushed
    2529                 :            :  * @flush_color: new flush color, < 0 for no-op
    2530                 :            :  * @work_color: new work color, < 0 for no-op
    2531                 :            :  *
    2532                 :            :  * Prepare pwqs for workqueue flushing.
    2533                 :            :  *
    2534                 :            :  * If @flush_color is non-negative, flush_color on all pwqs should be
    2535                 :            :  * -1.  If no pwq has in-flight commands at the specified color, all
    2536                 :            :  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
    2537                 :            :  * has in flight commands, its pwq->flush_color is set to
    2538                 :            :  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
    2539                 :            :  * wakeup logic is armed and %true is returned.
    2540                 :            :  *
    2541                 :            :  * The caller should have initialized @wq->first_flusher prior to
    2542                 :            :  * calling this function with non-negative @flush_color.  If
    2543                 :            :  * @flush_color is negative, no flush color update is done and %false
    2544                 :            :  * is returned.
    2545                 :            :  *
    2546                 :            :  * If @work_color is non-negative, all pwqs should have the same
    2547                 :            :  * work_color which is previous to @work_color and all will be
    2548                 :            :  * advanced to @work_color.
    2549                 :            :  *
    2550                 :            :  * CONTEXT:
    2551                 :            :  * mutex_lock(wq->mutex).
    2552                 :            :  *
    2553                 :            :  * Return:
    2554                 :            :  * %true if @flush_color >= 0 and there's something to flush.  %false
    2555                 :            :  * otherwise.
    2556                 :            :  */
    2557                 :          0 : static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
    2558                 :            :                                       int flush_color, int work_color)
    2559                 :            : {
    2560                 :            :         bool wait = false;
    2561                 :            :         struct pool_workqueue *pwq;
    2562                 :            : 
    2563         [ +  - ]:       5916 :         if (flush_color >= 0) {
    2564 [ -  + ][ #  # ]:       5916 :                 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
                 [ #  # ]
    2565                 :       5916 :                 atomic_set(&wq->nr_pwqs_to_flush, 1);
    2566                 :            :         }
    2567                 :            : 
    2568         [ +  + ]:      11832 :         for_each_pwq(pwq, wq) {
    2569                 :       5916 :                 struct worker_pool *pool = pwq->pool;
    2570                 :            : 
    2571                 :            :                 spin_lock_irq(&pool->lock);
    2572                 :            : 
    2573         [ +  - ]:       5916 :                 if (flush_color >= 0) {
    2574 [ -  + ][ #  # ]:       5916 :                         WARN_ON_ONCE(pwq->flush_color != -1);
                 [ #  # ]
    2575                 :            : 
    2576         [ -  + ]:       5916 :                         if (pwq->nr_in_flight[flush_color]) {
    2577                 :          0 :                                 pwq->flush_color = flush_color;
    2578                 :          0 :                                 atomic_inc(&wq->nr_pwqs_to_flush);
    2579                 :            :                                 wait = true;
    2580                 :            :                         }
    2581                 :            :                 }
    2582                 :            : 
    2583         [ +  - ]:       5916 :                 if (work_color >= 0) {
    2584 [ -  + ][ #  # ]:       5916 :                         WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
                 [ #  # ]
    2585                 :       5916 :                         pwq->work_color = work_color;
    2586                 :            :                 }
    2587                 :            : 
    2588                 :            :                 spin_unlock_irq(&pool->lock);
    2589                 :            :         }
    2590                 :            : 
    2591   [ +  -  +  - ]:      11832 :         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
    2592                 :       5916 :                 complete(&wq->first_flusher->done);
    2593                 :            : 
    2594                 :       5916 :         return wait;
    2595                 :            : }
    2596                 :            : 
    2597                 :            : /**
    2598                 :            :  * flush_workqueue - ensure that any scheduled work has run to completion.
    2599                 :            :  * @wq: workqueue to flush
    2600                 :            :  *
    2601                 :            :  * This function sleeps until all work items which were queued on entry
    2602                 :            :  * have finished execution, but it is not livelocked by new incoming ones.
    2603                 :            :  */
    2604                 :          0 : void flush_workqueue(struct workqueue_struct *wq)
    2605                 :            : {
    2606                 :      11785 :         struct wq_flusher this_flusher = {
    2607                 :            :                 .list = LIST_HEAD_INIT(this_flusher.list),
    2608                 :            :                 .flush_color = -1,
    2609                 :       5894 :                 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
    2610                 :            :         };
    2611                 :            :         int next_color;
    2612                 :            : 
    2613                 :            :         lock_map_acquire(&wq->lockdep_map);
    2614                 :            :         lock_map_release(&wq->lockdep_map);
    2615                 :            : 
    2616                 :       5894 :         mutex_lock(&wq->mutex);
    2617                 :            : 
    2618                 :            :         /*
    2619                 :            :          * Start-to-wait phase
    2620                 :            :          */
    2621                 :       5916 :         next_color = work_next_color(wq->work_color);
    2622                 :            : 
    2623         [ +  - ]:       5916 :         if (next_color != wq->flush_color) {
    2624                 :            :                 /*
    2625                 :            :                  * Color space is not full.  The current work_color
    2626                 :            :                  * becomes our flush_color and work_color is advanced
    2627                 :            :                  * by one.
    2628                 :            :                  */
    2629 [ -  + ][ #  # ]:       5916 :                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
                 [ #  # ]
    2630                 :       5916 :                 this_flusher.flush_color = wq->work_color;
    2631                 :       5916 :                 wq->work_color = next_color;
    2632                 :            : 
    2633         [ +  - ]:       5916 :                 if (!wq->first_flusher) {
    2634                 :            :                         /* no flush in progress, become the first flusher */
    2635 [ -  + ][ #  # ]:       5916 :                         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
                 [ #  # ]
    2636                 :            : 
    2637                 :       5916 :                         wq->first_flusher = &this_flusher;
    2638                 :            : 
    2639         [ +  - ]:       5916 :                         if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
    2640                 :            :                                                        wq->work_color)) {
    2641                 :            :                                 /* nothing to flush, done */
    2642                 :       5916 :                                 wq->flush_color = next_color;
    2643                 :       5916 :                                 wq->first_flusher = NULL;
    2644                 :       5916 :                                 goto out_unlock;
    2645                 :            :                         }
    2646                 :            :                 } else {
    2647                 :            :                         /* wait in queue */
    2648 [ #  # ][ #  # ]:          0 :                         WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
                 [ #  # ]
    2649                 :          0 :                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
    2650                 :          0 :                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
    2651                 :            :                 }
    2652                 :            :         } else {
    2653                 :            :                 /*
    2654                 :            :                  * Oops, color space is full, wait on overflow queue.
    2655                 :            :                  * The next flush completion will assign us
    2656                 :            :                  * flush_color and transfer to flusher_queue.
    2657                 :            :                  */
    2658                 :          0 :                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
    2659                 :            :         }
    2660                 :            : 
    2661                 :          0 :         mutex_unlock(&wq->mutex);
    2662                 :            : 
    2663                 :          0 :         wait_for_completion(&this_flusher.done);
    2664                 :            : 
    2665                 :            :         /*
    2666                 :            :          * Wake-up-and-cascade phase
    2667                 :            :          *
    2668                 :            :          * First flushers are responsible for cascading flushes and
    2669                 :            :          * handling overflow.  Non-first flushers can simply return.
    2670                 :            :          */
    2671         [ #  # ]:          0 :         if (wq->first_flusher != &this_flusher)
    2672                 :          0 :                 return;
    2673                 :            : 
    2674                 :          0 :         mutex_lock(&wq->mutex);
    2675                 :            : 
    2676                 :            :         /* we might have raced, check again with mutex held */
    2677         [ #  # ]:          0 :         if (wq->first_flusher != &this_flusher)
    2678                 :            :                 goto out_unlock;
    2679                 :            : 
    2680                 :          0 :         wq->first_flusher = NULL;
    2681                 :            : 
    2682 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(!list_empty(&this_flusher.list));
                 [ #  # ]
    2683 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
                 [ #  # ]
    2684                 :            : 
    2685                 :            :         while (true) {
    2686                 :            :                 struct wq_flusher *next, *tmp;
    2687                 :            : 
    2688                 :            :                 /* complete all the flushers sharing the current flush color */
    2689         [ #  # ]:          0 :                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
    2690         [ #  # ]:          0 :                         if (next->flush_color != wq->flush_color)
    2691                 :            :                                 break;
    2692                 :            :                         list_del_init(&next->list);
    2693                 :          0 :                         complete(&next->done);
    2694                 :            :                 }
    2695                 :            : 
    2696 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
         [ #  # ][ #  # ]
                 [ #  # ]
    2697                 :            :                              wq->flush_color != work_next_color(wq->work_color));
    2698                 :            : 
    2699                 :            :                 /* this flush_color is finished, advance by one */
    2700                 :          0 :                 wq->flush_color = work_next_color(wq->flush_color);
    2701                 :            : 
    2702                 :            :                 /* one color has been freed, handle overflow queue */
    2703         [ #  # ]:          0 :                 if (!list_empty(&wq->flusher_overflow)) {
    2704                 :            :                         /*
    2705                 :            :                          * Assign the same color to all overflowed
    2706                 :            :                          * flushers, advance work_color and append to
    2707                 :            :                          * flusher_queue.  This is the start-to-wait
    2708                 :            :                          * phase for these overflowed flushers.
    2709                 :            :                          */
    2710         [ #  # ]:          0 :                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
    2711                 :          0 :                                 tmp->flush_color = wq->work_color;
    2712                 :            : 
    2713                 :          0 :                         wq->work_color = work_next_color(wq->work_color);
    2714                 :            : 
    2715                 :            :                         list_splice_tail_init(&wq->flusher_overflow,
    2716                 :            :                                               &wq->flusher_queue);
    2717                 :          0 :                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
    2718                 :            :                 }
    2719                 :            : 
    2720         [ #  # ]:          0 :                 if (list_empty(&wq->flusher_queue)) {
    2721 [ #  # ][ #  # ]:          0 :                         WARN_ON_ONCE(wq->flush_color != wq->work_color);
                 [ #  # ]
    2722                 :            :                         break;
    2723                 :            :                 }
    2724                 :            : 
    2725                 :            :                 /*
    2726                 :            :                  * Need to flush more colors.  Make the next flusher
    2727                 :            :                  * the new first flusher and arm pwqs.
    2728                 :            :                  */
    2729 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(wq->flush_color == wq->work_color);
                 [ #  # ]
    2730 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(wq->flush_color != next->flush_color);
                 [ #  # ]
    2731                 :            : 
    2732                 :            :                 list_del_init(&next->list);
    2733                 :          0 :                 wq->first_flusher = next;
    2734                 :            : 
    2735         [ #  # ]:          0 :                 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
    2736                 :            :                         break;
    2737                 :            : 
    2738                 :            :                 /*
    2739                 :            :                  * Meh... this color is already done, clear first
    2740                 :            :                  * flusher and repeat cascading.
    2741                 :            :                  */
    2742                 :          0 :                 wq->first_flusher = NULL;
    2743                 :          0 :         }
    2744                 :            : 
    2745                 :            : out_unlock:
    2746                 :       5916 :         mutex_unlock(&wq->mutex);
    2747                 :            : }
    2748                 :            : EXPORT_SYMBOL_GPL(flush_workqueue);
    2749                 :            : 
    2750                 :            : /**
    2751                 :            :  * drain_workqueue - drain a workqueue
    2752                 :            :  * @wq: workqueue to drain
    2753                 :            :  *
    2754                 :            :  * Wait until the workqueue becomes empty.  While draining is in progress,
    2755                 :            :  * only chain queueing is allowed.  IOW, only currently pending or running
    2756                 :            :  * work items on @wq can queue further work items on it.  @wq is flushed
    2757                 :            :  * repeatedly until it becomes empty.  The number of flushing is detemined
    2758                 :            :  * by the depth of chaining and should be relatively short.  Whine if it
    2759                 :            :  * takes too long.
    2760                 :            :  */
    2761                 :          0 : void drain_workqueue(struct workqueue_struct *wq)
    2762                 :            : {
    2763                 :            :         unsigned int flush_cnt = 0;
    2764                 :            :         struct pool_workqueue *pwq;
    2765                 :            : 
    2766                 :            :         /*
    2767                 :            :          * __queue_work() needs to test whether there are drainers, is much
    2768                 :            :          * hotter than drain_workqueue() and already looks at @wq->flags.
    2769                 :            :          * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
    2770                 :            :          */
    2771                 :          0 :         mutex_lock(&wq->mutex);
    2772         [ #  # ]:          0 :         if (!wq->nr_drainers++)
    2773                 :          0 :                 wq->flags |= __WQ_DRAINING;
    2774                 :          0 :         mutex_unlock(&wq->mutex);
    2775                 :            : reflush:
    2776                 :          0 :         flush_workqueue(wq);
    2777                 :            : 
    2778                 :          0 :         mutex_lock(&wq->mutex);
    2779                 :            : 
    2780         [ #  # ]:          0 :         for_each_pwq(pwq, wq) {
    2781                 :            :                 bool drained;
    2782                 :            : 
    2783                 :          0 :                 spin_lock_irq(&pwq->pool->lock);
    2784 [ #  # ][ #  # ]:          0 :                 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
    2785                 :          0 :                 spin_unlock_irq(&pwq->pool->lock);
    2786                 :            : 
    2787         [ #  # ]:          0 :                 if (drained)
    2788                 :          0 :                         continue;
    2789                 :            : 
    2790 [ #  # ][ #  # ]:          0 :                 if (++flush_cnt == 10 ||
    2791         [ #  # ]:          0 :                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
    2792                 :          0 :                         pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
    2793                 :            :                                 wq->name, flush_cnt);
    2794                 :            : 
    2795                 :          0 :                 mutex_unlock(&wq->mutex);
    2796                 :          0 :                 goto reflush;
    2797                 :            :         }
    2798                 :            : 
    2799         [ #  # ]:          0 :         if (!--wq->nr_drainers)
    2800                 :          0 :                 wq->flags &= ~__WQ_DRAINING;
    2801                 :          0 :         mutex_unlock(&wq->mutex);
    2802                 :          0 : }
    2803                 :            : EXPORT_SYMBOL_GPL(drain_workqueue);
    2804                 :            : 
    2805                 :          0 : static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
    2806                 :            : {
    2807                 :            :         struct worker *worker = NULL;
    2808                 :            :         struct worker_pool *pool;
    2809                 :            :         struct pool_workqueue *pwq;
    2810                 :            : 
    2811                 :            :         might_sleep();
    2812                 :            : 
    2813                 :            :         local_irq_disable();
    2814                 :       1944 :         pool = get_work_pool(work);
    2815         [ +  + ]:       1944 :         if (!pool) {
    2816                 :            :                 local_irq_enable();
    2817                 :       1619 :                 return false;
    2818                 :            :         }
    2819                 :            : 
    2820                 :            :         spin_lock(&pool->lock);
    2821                 :            :         /* see the comment in try_to_grab_pending() with the same code */
    2822                 :            :         pwq = get_work_pwq(work);
    2823         [ +  + ]:        325 :         if (pwq) {
    2824         [ +  - ]:         31 :                 if (unlikely(pwq->pool != pool))
    2825                 :            :                         goto already_gone;
    2826                 :            :         } else {
    2827                 :        294 :                 worker = find_worker_executing_work(pool, work);
    2828         [ +  + ]:        294 :                 if (!worker)
    2829                 :            :                         goto already_gone;
    2830                 :          2 :                 pwq = worker->current_pwq;
    2831                 :            :         }
    2832                 :            : 
    2833                 :         33 :         insert_wq_barrier(pwq, barr, work, worker);
    2834                 :            :         spin_unlock_irq(&pool->lock);
    2835                 :            : 
    2836                 :            :         /*
    2837                 :            :          * If @max_active is 1 or rescuer is in use, flushing another work
    2838                 :            :          * item on the same workqueue may lead to deadlock.  Make sure the
    2839                 :            :          * flusher is not running on the same workqueue by verifying write
    2840                 :            :          * access.
    2841                 :            :          */
    2842                 :            :         if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
    2843                 :            :                 lock_map_acquire(&pwq->wq->lockdep_map);
    2844                 :            :         else
    2845                 :            :                 lock_map_acquire_read(&pwq->wq->lockdep_map);
    2846                 :            :         lock_map_release(&pwq->wq->lockdep_map);
    2847                 :            : 
    2848                 :         33 :         return true;
    2849                 :            : already_gone:
    2850                 :            :         spin_unlock_irq(&pool->lock);
    2851                 :        292 :         return false;
    2852                 :            : }
    2853                 :            : 
    2854                 :            : /**
    2855                 :            :  * flush_work - wait for a work to finish executing the last queueing instance
    2856                 :            :  * @work: the work to flush
    2857                 :            :  *
    2858                 :            :  * Wait until @work has finished execution.  @work is guaranteed to be idle
    2859                 :            :  * on return if it hasn't been requeued since flush started.
    2860                 :            :  *
    2861                 :            :  * Return:
    2862                 :            :  * %true if flush_work() waited for the work to finish execution,
    2863                 :            :  * %false if it was already idle.
    2864                 :            :  */
    2865                 :          0 : bool flush_work(struct work_struct *work)
    2866                 :            : {
    2867                 :            :         struct wq_barrier barr;
    2868                 :            : 
    2869                 :            :         lock_map_acquire(&work->lockdep_map);
    2870                 :            :         lock_map_release(&work->lockdep_map);
    2871                 :            : 
    2872         [ +  + ]:       1944 :         if (start_flush_work(work, &barr)) {
    2873                 :         33 :                 wait_for_completion(&barr.done);
    2874                 :            :                 destroy_work_on_stack(&barr.work);
    2875                 :         33 :                 return true;
    2876                 :            :         } else {
    2877                 :            :                 return false;
    2878                 :            :         }
    2879                 :            : }
    2880                 :            : EXPORT_SYMBOL_GPL(flush_work);
    2881                 :            : 
    2882                 :        849 : static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
    2883                 :            : {
    2884                 :            :         unsigned long flags;
    2885                 :            :         int ret;
    2886                 :            : 
    2887                 :            :         do {
    2888                 :        849 :                 ret = try_to_grab_pending(work, is_dwork, &flags);
    2889                 :            :                 /*
    2890                 :            :                  * If someone else is canceling, wait for the same event it
    2891                 :            :                  * would be waiting for before retrying.
    2892                 :            :                  */
    2893         [ -  + ]:        849 :                 if (unlikely(ret == -ENOENT))
    2894                 :          0 :                         flush_work(work);
    2895         [ -  + ]:        849 :         } while (unlikely(ret < 0));
    2896                 :            : 
    2897                 :            :         /* tell other tasks trying to grab @work to back off */
    2898                 :        849 :         mark_work_canceling(work);
    2899         [ -  + ]:        849 :         local_irq_restore(flags);
    2900                 :            : 
    2901                 :        849 :         flush_work(work);
    2902                 :        849 :         clear_work_data(work);
    2903                 :        849 :         return ret;
    2904                 :            : }
    2905                 :            : 
    2906                 :            : /**
    2907                 :            :  * cancel_work_sync - cancel a work and wait for it to finish
    2908                 :            :  * @work: the work to cancel
    2909                 :            :  *
    2910                 :            :  * Cancel @work and wait for its execution to finish.  This function
    2911                 :            :  * can be used even if the work re-queues itself or migrates to
    2912                 :            :  * another workqueue.  On return from this function, @work is
    2913                 :            :  * guaranteed to be not pending or executing on any CPU.
    2914                 :            :  *
    2915                 :            :  * cancel_work_sync(&delayed_work->work) must not be used for
    2916                 :            :  * delayed_work's.  Use cancel_delayed_work_sync() instead.
    2917                 :            :  *
    2918                 :            :  * The caller must ensure that the workqueue on which @work was last
    2919                 :            :  * queued can't be destroyed before this function returns.
    2920                 :            :  *
    2921                 :            :  * Return:
    2922                 :            :  * %true if @work was pending, %false otherwise.
    2923                 :            :  */
    2924                 :          0 : bool cancel_work_sync(struct work_struct *work)
    2925                 :            : {
    2926                 :        573 :         return __cancel_work_timer(work, false);
    2927                 :            : }
    2928                 :            : EXPORT_SYMBOL_GPL(cancel_work_sync);
    2929                 :            : 
    2930                 :            : /**
    2931                 :            :  * flush_delayed_work - wait for a dwork to finish executing the last queueing
    2932                 :            :  * @dwork: the delayed work to flush
    2933                 :            :  *
    2934                 :            :  * Delayed timer is cancelled and the pending work is queued for
    2935                 :            :  * immediate execution.  Like flush_work(), this function only
    2936                 :            :  * considers the last queueing instance of @dwork.
    2937                 :            :  *
    2938                 :            :  * Return:
    2939                 :            :  * %true if flush_work() waited for the work to finish execution,
    2940                 :            :  * %false if it was already idle.
    2941                 :            :  */
    2942                 :          0 : bool flush_delayed_work(struct delayed_work *dwork)
    2943                 :            : {
    2944                 :            :         local_irq_disable();
    2945         [ #  # ]:          0 :         if (del_timer_sync(&dwork->timer))
    2946                 :          0 :                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
    2947                 :            :         local_irq_enable();
    2948                 :          0 :         return flush_work(&dwork->work);
    2949                 :            : }
    2950                 :            : EXPORT_SYMBOL(flush_delayed_work);
    2951                 :            : 
    2952                 :            : /**
    2953                 :            :  * cancel_delayed_work - cancel a delayed work
    2954                 :            :  * @dwork: delayed_work to cancel
    2955                 :            :  *
    2956                 :            :  * Kill off a pending delayed_work.
    2957                 :            :  *
    2958                 :            :  * Return: %true if @dwork was pending and canceled; %false if it wasn't
    2959                 :            :  * pending.
    2960                 :            :  *
    2961                 :            :  * Note:
    2962                 :            :  * The work callback function may still be running on return, unless
    2963                 :            :  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
    2964                 :            :  * use cancel_delayed_work_sync() to wait on it.
    2965                 :            :  *
    2966                 :            :  * This function is safe to call from any context including IRQ handler.
    2967                 :            :  */
    2968                 :          0 : bool cancel_delayed_work(struct delayed_work *dwork)
    2969                 :            : {
    2970                 :            :         unsigned long flags;
    2971                 :            :         int ret;
    2972                 :            : 
    2973                 :            :         do {
    2974                 :          0 :                 ret = try_to_grab_pending(&dwork->work, true, &flags);
    2975         [ #  # ]:          0 :         } while (unlikely(ret == -EAGAIN));
    2976                 :            : 
    2977         [ #  # ]:          0 :         if (unlikely(ret < 0))
    2978                 :            :                 return false;
    2979                 :            : 
    2980                 :          0 :         set_work_pool_and_clear_pending(&dwork->work,
    2981                 :            :                                         get_work_pool_id(&dwork->work));
    2982         [ #  # ]:          0 :         local_irq_restore(flags);
    2983                 :          0 :         return ret;
    2984                 :            : }
    2985                 :            : EXPORT_SYMBOL(cancel_delayed_work);
    2986                 :            : 
    2987                 :            : /**
    2988                 :            :  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
    2989                 :            :  * @dwork: the delayed work cancel
    2990                 :            :  *
    2991                 :            :  * This is cancel_work_sync() for delayed works.
    2992                 :            :  *
    2993                 :            :  * Return:
    2994                 :            :  * %true if @dwork was pending, %false otherwise.
    2995                 :            :  */
    2996                 :          0 : bool cancel_delayed_work_sync(struct delayed_work *dwork)
    2997                 :            : {
    2998                 :        276 :         return __cancel_work_timer(&dwork->work, true);
    2999                 :            : }
    3000                 :            : EXPORT_SYMBOL(cancel_delayed_work_sync);
    3001                 :            : 
    3002                 :            : /**
    3003                 :            :  * schedule_on_each_cpu - execute a function synchronously on each online CPU
    3004                 :            :  * @func: the function to call
    3005                 :            :  *
    3006                 :            :  * schedule_on_each_cpu() executes @func on each online CPU using the
    3007                 :            :  * system workqueue and blocks until all CPUs have completed.
    3008                 :            :  * schedule_on_each_cpu() is very slow.
    3009                 :            :  *
    3010                 :            :  * Return:
    3011                 :            :  * 0 on success, -errno on failure.
    3012                 :            :  */
    3013                 :          0 : int schedule_on_each_cpu(work_func_t func)
    3014                 :            : {
    3015                 :            :         int cpu;
    3016                 :            :         struct work_struct __percpu *works;
    3017                 :            : 
    3018                 :          0 :         works = alloc_percpu(struct work_struct);
    3019         [ #  # ]:          0 :         if (!works)
    3020                 :            :                 return -ENOMEM;
    3021                 :            : 
    3022                 :          0 :         get_online_cpus();
    3023                 :            : 
    3024         [ #  # ]:          0 :         for_each_online_cpu(cpu) {
    3025                 :          0 :                 struct work_struct *work = per_cpu_ptr(works, cpu);
    3026                 :            : 
    3027                 :          0 :                 INIT_WORK(work, func);
    3028                 :            :                 schedule_work_on(cpu, work);
    3029                 :            :         }
    3030                 :            : 
    3031         [ #  # ]:          0 :         for_each_online_cpu(cpu)
    3032                 :          0 :                 flush_work(per_cpu_ptr(works, cpu));
    3033                 :            : 
    3034                 :          0 :         put_online_cpus();
    3035                 :          0 :         free_percpu(works);
    3036                 :          0 :         return 0;
    3037                 :            : }
    3038                 :            : 
    3039                 :            : /**
    3040                 :            :  * flush_scheduled_work - ensure that any scheduled work has run to completion.
    3041                 :            :  *
    3042                 :            :  * Forces execution of the kernel-global workqueue and blocks until its
    3043                 :            :  * completion.
    3044                 :            :  *
    3045                 :            :  * Think twice before calling this function!  It's very easy to get into
    3046                 :            :  * trouble if you don't take great care.  Either of the following situations
    3047                 :            :  * will lead to deadlock:
    3048                 :            :  *
    3049                 :            :  *      One of the work items currently on the workqueue needs to acquire
    3050                 :            :  *      a lock held by your code or its caller.
    3051                 :            :  *
    3052                 :            :  *      Your code is running in the context of a work routine.
    3053                 :            :  *
    3054                 :            :  * They will be detected by lockdep when they occur, but the first might not
    3055                 :            :  * occur very often.  It depends on what work items are on the workqueue and
    3056                 :            :  * what locks they need, which you have no control over.
    3057                 :            :  *
    3058                 :            :  * In most situations flushing the entire workqueue is overkill; you merely
    3059                 :            :  * need to know that a particular work item isn't queued and isn't running.
    3060                 :            :  * In such cases you should use cancel_delayed_work_sync() or
    3061                 :            :  * cancel_work_sync() instead.
    3062                 :            :  */
    3063                 :          0 : void flush_scheduled_work(void)
    3064                 :            : {
    3065                 :          0 :         flush_workqueue(system_wq);
    3066                 :          0 : }
    3067                 :            : EXPORT_SYMBOL(flush_scheduled_work);
    3068                 :            : 
    3069                 :            : /**
    3070                 :            :  * execute_in_process_context - reliably execute the routine with user context
    3071                 :            :  * @fn:         the function to execute
    3072                 :            :  * @ew:         guaranteed storage for the execute work structure (must
    3073                 :            :  *              be available when the work executes)
    3074                 :            :  *
    3075                 :            :  * Executes the function immediately if process context is available,
    3076                 :            :  * otherwise schedules the function for delayed execution.
    3077                 :            :  *
    3078                 :            :  * Return:      0 - function was executed
    3079                 :            :  *              1 - function was scheduled for execution
    3080                 :            :  */
    3081                 :          0 : int execute_in_process_context(work_func_t fn, struct execute_work *ew)
    3082                 :            : {
    3083         [ #  # ]:          0 :         if (!in_interrupt()) {
    3084                 :          0 :                 fn(&ew->work);
    3085                 :          0 :                 return 0;
    3086                 :            :         }
    3087                 :            : 
    3088                 :          0 :         INIT_WORK(&ew->work, fn);
    3089                 :          0 :         schedule_work(&ew->work);
    3090                 :            : 
    3091                 :          0 :         return 1;
    3092                 :            : }
    3093                 :            : EXPORT_SYMBOL_GPL(execute_in_process_context);
    3094                 :            : 
    3095                 :            : #ifdef CONFIG_SYSFS
    3096                 :            : /*
    3097                 :            :  * Workqueues with WQ_SYSFS flag set is visible to userland via
    3098                 :            :  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
    3099                 :            :  * following attributes.
    3100                 :            :  *
    3101                 :            :  *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
    3102                 :            :  *  max_active  RW int  : maximum number of in-flight work items
    3103                 :            :  *
    3104                 :            :  * Unbound workqueues have the following extra attributes.
    3105                 :            :  *
    3106                 :            :  *  id          RO int  : the associated pool ID
    3107                 :            :  *  nice        RW int  : nice value of the workers
    3108                 :            :  *  cpumask     RW mask : bitmask of allowed CPUs for the workers
    3109                 :            :  */
    3110                 :            : struct wq_device {
    3111                 :            :         struct workqueue_struct         *wq;
    3112                 :            :         struct device                   dev;
    3113                 :            : };
    3114                 :            : 
    3115                 :            : static struct workqueue_struct *dev_to_wq(struct device *dev)
    3116                 :            : {
    3117                 :            :         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
    3118                 :            : 
    3119                 :          0 :         return wq_dev->wq;
    3120                 :            : }
    3121                 :            : 
    3122                 :          0 : static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
    3123                 :            :                             char *buf)
    3124                 :            : {
    3125                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3126                 :            : 
    3127                 :          0 :         return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
    3128                 :            : }
    3129                 :            : static DEVICE_ATTR_RO(per_cpu);
    3130                 :            : 
    3131                 :          0 : static ssize_t max_active_show(struct device *dev,
    3132                 :            :                                struct device_attribute *attr, char *buf)
    3133                 :            : {
    3134                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3135                 :            : 
    3136                 :          0 :         return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
    3137                 :            : }
    3138                 :            : 
    3139                 :          0 : static ssize_t max_active_store(struct device *dev,
    3140                 :            :                                 struct device_attribute *attr, const char *buf,
    3141                 :            :                                 size_t count)
    3142                 :            : {
    3143                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3144                 :            :         int val;
    3145                 :            : 
    3146 [ #  # ][ #  # ]:          0 :         if (sscanf(buf, "%d", &val) != 1 || val <= 0)
    3147                 :            :                 return -EINVAL;
    3148                 :            : 
    3149                 :          0 :         workqueue_set_max_active(wq, val);
    3150                 :          0 :         return count;
    3151                 :            : }
    3152                 :            : static DEVICE_ATTR_RW(max_active);
    3153                 :            : 
    3154                 :            : static struct attribute *wq_sysfs_attrs[] = {
    3155                 :            :         &dev_attr_per_cpu.attr,
    3156                 :            :         &dev_attr_max_active.attr,
    3157                 :            :         NULL,
    3158                 :            : };
    3159                 :            : ATTRIBUTE_GROUPS(wq_sysfs);
    3160                 :            : 
    3161                 :          0 : static ssize_t wq_pool_ids_show(struct device *dev,
    3162                 :            :                                 struct device_attribute *attr, char *buf)
    3163                 :            : {
    3164                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3165                 :            :         const char *delim = "";
    3166                 :            :         int node, written = 0;
    3167                 :            : 
    3168                 :            :         rcu_read_lock_sched();
    3169         [ #  # ]:          0 :         for_each_node(node) {
    3170                 :          0 :                 written += scnprintf(buf + written, PAGE_SIZE - written,
    3171                 :            :                                      "%s%d:%d", delim, node,
    3172                 :          0 :                                      unbound_pwq_by_node(wq, node)->pool->id);
    3173                 :            :                 delim = " ";
    3174                 :            :         }
    3175                 :          0 :         written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
    3176                 :            :         rcu_read_unlock_sched();
    3177                 :            : 
    3178                 :          0 :         return written;
    3179                 :            : }
    3180                 :            : 
    3181                 :          0 : static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
    3182                 :            :                             char *buf)
    3183                 :            : {
    3184                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3185                 :            :         int written;
    3186                 :            : 
    3187                 :          0 :         mutex_lock(&wq->mutex);
    3188                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
    3189                 :          0 :         mutex_unlock(&wq->mutex);
    3190                 :            : 
    3191                 :          0 :         return written;
    3192                 :            : }
    3193                 :            : 
    3194                 :            : /* prepare workqueue_attrs for sysfs store operations */
    3195                 :          0 : static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
    3196                 :            : {
    3197                 :            :         struct workqueue_attrs *attrs;
    3198                 :            : 
    3199                 :          0 :         attrs = alloc_workqueue_attrs(GFP_KERNEL);
    3200         [ #  # ]:          0 :         if (!attrs)
    3201                 :            :                 return NULL;
    3202                 :            : 
    3203                 :          0 :         mutex_lock(&wq->mutex);
    3204                 :          0 :         copy_workqueue_attrs(attrs, wq->unbound_attrs);
    3205                 :          0 :         mutex_unlock(&wq->mutex);
    3206                 :          0 :         return attrs;
    3207                 :            : }
    3208                 :            : 
    3209                 :          0 : static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
    3210                 :            :                              const char *buf, size_t count)
    3211                 :            : {
    3212                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3213                 :            :         struct workqueue_attrs *attrs;
    3214                 :            :         int ret;
    3215                 :            : 
    3216                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    3217         [ #  # ]:          0 :         if (!attrs)
    3218                 :            :                 return -ENOMEM;
    3219                 :            : 
    3220 [ #  # ][ #  # ]:          0 :         if (sscanf(buf, "%d", &attrs->nice) == 1 &&
    3221         [ #  # ]:          0 :             attrs->nice >= -20 && attrs->nice <= 19)
    3222                 :          0 :                 ret = apply_workqueue_attrs(wq, attrs);
    3223                 :            :         else
    3224                 :            :                 ret = -EINVAL;
    3225                 :            : 
    3226                 :            :         free_workqueue_attrs(attrs);
    3227         [ #  # ]:          0 :         return ret ?: count;
    3228                 :            : }
    3229                 :            : 
    3230                 :          0 : static ssize_t wq_cpumask_show(struct device *dev,
    3231                 :            :                                struct device_attribute *attr, char *buf)
    3232                 :            : {
    3233                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3234                 :            :         int written;
    3235                 :            : 
    3236                 :          0 :         mutex_lock(&wq->mutex);
    3237                 :          0 :         written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
    3238                 :          0 :         mutex_unlock(&wq->mutex);
    3239                 :            : 
    3240                 :          0 :         written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
    3241                 :          0 :         return written;
    3242                 :            : }
    3243                 :            : 
    3244                 :          0 : static ssize_t wq_cpumask_store(struct device *dev,
    3245                 :            :                                 struct device_attribute *attr,
    3246                 :            :                                 const char *buf, size_t count)
    3247                 :            : {
    3248                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3249                 :            :         struct workqueue_attrs *attrs;
    3250                 :            :         int ret;
    3251                 :            : 
    3252                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    3253         [ #  # ]:          0 :         if (!attrs)
    3254                 :            :                 return -ENOMEM;
    3255                 :            : 
    3256                 :            :         ret = cpumask_parse(buf, attrs->cpumask);
    3257         [ #  # ]:          0 :         if (!ret)
    3258                 :          0 :                 ret = apply_workqueue_attrs(wq, attrs);
    3259                 :            : 
    3260                 :            :         free_workqueue_attrs(attrs);
    3261         [ #  # ]:          0 :         return ret ?: count;
    3262                 :            : }
    3263                 :            : 
    3264                 :          0 : static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
    3265                 :            :                             char *buf)
    3266                 :            : {
    3267                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3268                 :            :         int written;
    3269                 :            : 
    3270                 :          0 :         mutex_lock(&wq->mutex);
    3271                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%d\n",
    3272                 :          0 :                             !wq->unbound_attrs->no_numa);
    3273                 :          0 :         mutex_unlock(&wq->mutex);
    3274                 :            : 
    3275                 :          0 :         return written;
    3276                 :            : }
    3277                 :            : 
    3278                 :          0 : static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
    3279                 :            :                              const char *buf, size_t count)
    3280                 :            : {
    3281                 :            :         struct workqueue_struct *wq = dev_to_wq(dev);
    3282                 :            :         struct workqueue_attrs *attrs;
    3283                 :            :         int v, ret;
    3284                 :            : 
    3285                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    3286         [ #  # ]:          0 :         if (!attrs)
    3287                 :            :                 return -ENOMEM;
    3288                 :            : 
    3289                 :            :         ret = -EINVAL;
    3290         [ #  # ]:          0 :         if (sscanf(buf, "%d", &v) == 1) {
    3291                 :          0 :                 attrs->no_numa = !v;
    3292                 :          0 :                 ret = apply_workqueue_attrs(wq, attrs);
    3293                 :            :         }
    3294                 :            : 
    3295                 :            :         free_workqueue_attrs(attrs);
    3296         [ #  # ]:          0 :         return ret ?: count;
    3297                 :            : }
    3298                 :            : 
    3299                 :            : static struct device_attribute wq_sysfs_unbound_attrs[] = {
    3300                 :            :         __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
    3301                 :            :         __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
    3302                 :            :         __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
    3303                 :            :         __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
    3304                 :            :         __ATTR_NULL,
    3305                 :            : };
    3306                 :            : 
    3307                 :            : static struct bus_type wq_subsys = {
    3308                 :            :         .name                           = "workqueue",
    3309                 :            :         .dev_groups                     = wq_sysfs_groups,
    3310                 :            : };
    3311                 :            : 
    3312                 :          0 : static int __init wq_sysfs_init(void)
    3313                 :            : {
    3314                 :          0 :         return subsys_virtual_register(&wq_subsys, NULL);
    3315                 :            : }
    3316                 :            : core_initcall(wq_sysfs_init);
    3317                 :            : 
    3318                 :          0 : static void wq_device_release(struct device *dev)
    3319                 :            : {
    3320                 :          0 :         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
    3321                 :            : 
    3322                 :          0 :         kfree(wq_dev);
    3323                 :          0 : }
    3324                 :            : 
    3325                 :            : /**
    3326                 :            :  * workqueue_sysfs_register - make a workqueue visible in sysfs
    3327                 :            :  * @wq: the workqueue to register
    3328                 :            :  *
    3329                 :            :  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
    3330                 :            :  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
    3331                 :            :  * which is the preferred method.
    3332                 :            :  *
    3333                 :            :  * Workqueue user should use this function directly iff it wants to apply
    3334                 :            :  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
    3335                 :            :  * apply_workqueue_attrs() may race against userland updating the
    3336                 :            :  * attributes.
    3337                 :            :  *
    3338                 :            :  * Return: 0 on success, -errno on failure.
    3339                 :            :  */
    3340                 :          0 : int workqueue_sysfs_register(struct workqueue_struct *wq)
    3341                 :            : {
    3342                 :            :         struct wq_device *wq_dev;
    3343                 :            :         int ret;
    3344                 :            : 
    3345                 :            :         /*
    3346                 :            :          * Adjusting max_active or creating new pwqs by applyting
    3347                 :            :          * attributes breaks ordering guarantee.  Disallow exposing ordered
    3348                 :            :          * workqueues.
    3349                 :            :          */
    3350 [ #  # ][ #  # ]:          0 :         if (WARN_ON(wq->flags & __WQ_ORDERED))
    3351                 :            :                 return -EINVAL;
    3352                 :            : 
    3353                 :          0 :         wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
    3354         [ #  # ]:          0 :         if (!wq_dev)
    3355                 :            :                 return -ENOMEM;
    3356                 :            : 
    3357                 :          0 :         wq_dev->wq = wq;
    3358                 :          0 :         wq_dev->dev.bus = &wq_subsys;
    3359                 :          0 :         wq_dev->dev.init_name = wq->name;
    3360                 :          0 :         wq_dev->dev.release = wq_device_release;
    3361                 :            : 
    3362                 :            :         /*
    3363                 :            :          * unbound_attrs are created separately.  Suppress uevent until
    3364                 :            :          * everything is ready.
    3365                 :            :          */
    3366                 :            :         dev_set_uevent_suppress(&wq_dev->dev, true);
    3367                 :            : 
    3368                 :          0 :         ret = device_register(&wq_dev->dev);
    3369         [ #  # ]:          0 :         if (ret) {
    3370                 :          0 :                 kfree(wq_dev);
    3371                 :          0 :                 wq->wq_dev = NULL;
    3372                 :          0 :                 return ret;
    3373                 :            :         }
    3374                 :            : 
    3375         [ #  # ]:          0 :         if (wq->flags & WQ_UNBOUND) {
    3376                 :            :                 struct device_attribute *attr;
    3377                 :            : 
    3378         [ #  # ]:          0 :                 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
    3379                 :          0 :                         ret = device_create_file(&wq_dev->dev, attr);
    3380         [ #  # ]:          0 :                         if (ret) {
    3381                 :          0 :                                 device_unregister(&wq_dev->dev);
    3382                 :          0 :                                 wq->wq_dev = NULL;
    3383                 :          0 :                                 return ret;
    3384                 :            :                         }
    3385                 :            :                 }
    3386                 :            :         }
    3387                 :            : 
    3388                 :          0 :         kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
    3389                 :          0 :         return 0;
    3390                 :            : }
    3391                 :            : 
    3392                 :            : /**
    3393                 :            :  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
    3394                 :            :  * @wq: the workqueue to unregister
    3395                 :            :  *
    3396                 :            :  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
    3397                 :            :  */
    3398                 :            : static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
    3399                 :            : {
    3400                 :          0 :         struct wq_device *wq_dev = wq->wq_dev;
    3401                 :            : 
    3402         [ #  # ]:          0 :         if (!wq->wq_dev)
    3403                 :            :                 return;
    3404                 :            : 
    3405                 :          0 :         wq->wq_dev = NULL;
    3406                 :          0 :         device_unregister(&wq_dev->dev);
    3407                 :            : }
    3408                 :            : #else   /* CONFIG_SYSFS */
    3409                 :            : static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
    3410                 :            : #endif  /* CONFIG_SYSFS */
    3411                 :            : 
    3412                 :            : /**
    3413                 :            :  * free_workqueue_attrs - free a workqueue_attrs
    3414                 :            :  * @attrs: workqueue_attrs to free
    3415                 :            :  *
    3416                 :            :  * Undo alloc_workqueue_attrs().
    3417                 :            :  */
    3418                 :          0 : void free_workqueue_attrs(struct workqueue_attrs *attrs)
    3419                 :            : {
    3420 [ #  # ][ #  # ]:          0 :         if (attrs) {
                 [ #  # ]
           [ #  #  #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3421                 :            :                 free_cpumask_var(attrs->cpumask);
    3422                 :          0 :                 kfree(attrs);
    3423                 :            :         }
    3424                 :          0 : }
    3425                 :            : 
    3426                 :            : /**
    3427                 :            :  * alloc_workqueue_attrs - allocate a workqueue_attrs
    3428                 :            :  * @gfp_mask: allocation mask to use
    3429                 :            :  *
    3430                 :            :  * Allocate a new workqueue_attrs, initialize with default settings and
    3431                 :            :  * return it.
    3432                 :            :  *
    3433                 :            :  * Return: The allocated new workqueue_attr on success. %NULL on failure.
    3434                 :            :  */
    3435                 :          0 : struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
    3436                 :            : {
    3437                 :            :         struct workqueue_attrs *attrs;
    3438                 :            : 
    3439                 :            :         attrs = kzalloc(sizeof(*attrs), gfp_mask);
    3440         [ #  # ]:          0 :         if (!attrs)
    3441                 :            :                 goto fail;
    3442                 :            :         if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
    3443                 :            :                 goto fail;
    3444                 :            : 
    3445                 :          0 :         cpumask_copy(attrs->cpumask, cpu_possible_mask);
    3446                 :          0 :         return attrs;
    3447                 :            : fail:
    3448                 :            :         free_workqueue_attrs(attrs);
    3449                 :            :         return NULL;
    3450                 :            : }
    3451                 :            : 
    3452                 :            : static void copy_workqueue_attrs(struct workqueue_attrs *to,
    3453                 :            :                                  const struct workqueue_attrs *from)
    3454                 :            : {
    3455                 :          0 :         to->nice = from->nice;
    3456                 :            :         cpumask_copy(to->cpumask, from->cpumask);
    3457                 :            :         /*
    3458                 :            :          * Unlike hash and equality test, this function doesn't ignore
    3459                 :            :          * ->no_numa as it is used for both pool and wq attrs.  Instead,
    3460                 :            :          * get_unbound_pool() explicitly clears ->no_numa after copying.
    3461                 :            :          */
    3462                 :          0 :         to->no_numa = from->no_numa;
    3463                 :            : }
    3464                 :            : 
    3465                 :            : /* hash value of the content of @attr */
    3466                 :          0 : static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
    3467                 :            : {
    3468                 :            :         u32 hash = 0;
    3469                 :            : 
    3470                 :          0 :         hash = jhash_1word(attrs->nice, hash);
    3471                 :          0 :         hash = jhash(cpumask_bits(attrs->cpumask),
    3472                 :            :                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
    3473                 :          0 :         return hash;
    3474                 :            : }
    3475                 :            : 
    3476                 :            : /* content equality test */
    3477                 :            : static bool wqattrs_equal(const struct workqueue_attrs *a,
    3478                 :            :                           const struct workqueue_attrs *b)
    3479                 :            : {
    3480         [ #  # ]:          0 :         if (a->nice != b->nice)
    3481                 :            :                 return false;
    3482         [ #  # ]:          0 :         if (!cpumask_equal(a->cpumask, b->cpumask))
    3483                 :            :                 return false;
    3484                 :            :         return true;
    3485                 :            : }
    3486                 :            : 
    3487                 :            : /**
    3488                 :            :  * init_worker_pool - initialize a newly zalloc'd worker_pool
    3489                 :            :  * @pool: worker_pool to initialize
    3490                 :            :  *
    3491                 :            :  * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
    3492                 :            :  *
    3493                 :            :  * Return: 0 on success, -errno on failure.  Even on failure, all fields
    3494                 :            :  * inside @pool proper are initialized and put_unbound_pool() can be called
    3495                 :            :  * on @pool safely to release it.
    3496                 :            :  */
    3497                 :          0 : static int init_worker_pool(struct worker_pool *pool)
    3498                 :            : {
    3499                 :          0 :         spin_lock_init(&pool->lock);
    3500                 :          0 :         pool->id = -1;
    3501                 :          0 :         pool->cpu = -1;
    3502                 :          0 :         pool->node = NUMA_NO_NODE;
    3503                 :          0 :         pool->flags |= POOL_DISASSOCIATED;
    3504                 :          0 :         INIT_LIST_HEAD(&pool->worklist);
    3505                 :          0 :         INIT_LIST_HEAD(&pool->idle_list);
    3506                 :          0 :         hash_init(pool->busy_hash);
    3507                 :            : 
    3508                 :          0 :         init_timer_deferrable(&pool->idle_timer);
    3509                 :          0 :         pool->idle_timer.function = idle_worker_timeout;
    3510                 :          0 :         pool->idle_timer.data = (unsigned long)pool;
    3511                 :            : 
    3512                 :          0 :         setup_timer(&pool->mayday_timer, pool_mayday_timeout,
    3513                 :            :                     (unsigned long)pool);
    3514                 :            : 
    3515                 :          0 :         mutex_init(&pool->manager_arb);
    3516                 :          0 :         mutex_init(&pool->manager_mutex);
    3517                 :          0 :         idr_init(&pool->worker_idr);
    3518                 :            : 
    3519                 :            :         INIT_HLIST_NODE(&pool->hash_node);
    3520                 :          0 :         pool->refcnt = 1;
    3521                 :            : 
    3522                 :            :         /* shouldn't fail above this point */
    3523                 :          0 :         pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
    3524         [ #  # ]:          0 :         if (!pool->attrs)
    3525                 :            :                 return -ENOMEM;
    3526                 :          0 :         return 0;
    3527                 :            : }
    3528                 :            : 
    3529                 :          0 : static void rcu_free_pool(struct rcu_head *rcu)
    3530                 :            : {
    3531                 :          0 :         struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
    3532                 :            : 
    3533                 :          0 :         idr_destroy(&pool->worker_idr);
    3534                 :          0 :         free_workqueue_attrs(pool->attrs);
    3535                 :          0 :         kfree(pool);
    3536                 :          0 : }
    3537                 :            : 
    3538                 :            : /**
    3539                 :            :  * put_unbound_pool - put a worker_pool
    3540                 :            :  * @pool: worker_pool to put
    3541                 :            :  *
    3542                 :            :  * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
    3543                 :            :  * safe manner.  get_unbound_pool() calls this function on its failure path
    3544                 :            :  * and this function should be able to release pools which went through,
    3545                 :            :  * successfully or not, init_worker_pool().
    3546                 :            :  *
    3547                 :            :  * Should be called with wq_pool_mutex held.
    3548                 :            :  */
    3549                 :          0 : static void put_unbound_pool(struct worker_pool *pool)
    3550                 :            : {
    3551                 :            :         struct worker *worker;
    3552                 :            : 
    3553                 :            :         lockdep_assert_held(&wq_pool_mutex);
    3554                 :            : 
    3555         [ #  # ]:          0 :         if (--pool->refcnt)
    3556                 :            :                 return;
    3557                 :            : 
    3558                 :            :         /* sanity checks */
    3559 [ #  # ][ #  # ]:          0 :         if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
                 [ #  # ]
    3560         [ #  # ]:          0 :             WARN_ON(!list_empty(&pool->worklist)))
    3561                 :            :                 return;
    3562                 :            : 
    3563                 :            :         /* release id and unhash */
    3564         [ #  # ]:          0 :         if (pool->id >= 0)
    3565                 :          0 :                 idr_remove(&worker_pool_idr, pool->id);
    3566                 :            :         hash_del(&pool->hash_node);
    3567                 :            : 
    3568                 :            :         /*
    3569                 :            :          * Become the manager and destroy all workers.  Grabbing
    3570                 :            :          * manager_arb prevents @pool's workers from blocking on
    3571                 :            :          * manager_mutex.
    3572                 :            :          */
    3573                 :          0 :         mutex_lock(&pool->manager_arb);
    3574                 :          0 :         mutex_lock(&pool->manager_mutex);
    3575                 :            :         spin_lock_irq(&pool->lock);
    3576                 :            : 
    3577         [ #  # ]:          0 :         while ((worker = first_worker(pool)))
    3578                 :          0 :                 destroy_worker(worker);
    3579 [ #  # ][ #  # ]:          0 :         WARN_ON(pool->nr_workers || pool->nr_idle);
                 [ #  # ]
    3580                 :            : 
    3581                 :            :         spin_unlock_irq(&pool->lock);
    3582                 :          0 :         mutex_unlock(&pool->manager_mutex);
    3583                 :          0 :         mutex_unlock(&pool->manager_arb);
    3584                 :            : 
    3585                 :            :         /* shut down the timers */
    3586                 :          0 :         del_timer_sync(&pool->idle_timer);
    3587                 :          0 :         del_timer_sync(&pool->mayday_timer);
    3588                 :            : 
    3589                 :            :         /* sched-RCU protected to allow dereferences from get_work_pool() */
    3590                 :          0 :         call_rcu_sched(&pool->rcu, rcu_free_pool);
    3591                 :            : }
    3592                 :            : 
    3593                 :            : /**
    3594                 :            :  * get_unbound_pool - get a worker_pool with the specified attributes
    3595                 :            :  * @attrs: the attributes of the worker_pool to get
    3596                 :            :  *
    3597                 :            :  * Obtain a worker_pool which has the same attributes as @attrs, bump the
    3598                 :            :  * reference count and return it.  If there already is a matching
    3599                 :            :  * worker_pool, it will be used; otherwise, this function attempts to
    3600                 :            :  * create a new one.
    3601                 :            :  *
    3602                 :            :  * Should be called with wq_pool_mutex held.
    3603                 :            :  *
    3604                 :            :  * Return: On success, a worker_pool with the same attributes as @attrs.
    3605                 :            :  * On failure, %NULL.
    3606                 :            :  */
    3607                 :          0 : static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
    3608                 :            : {
    3609                 :          0 :         u32 hash = wqattrs_hash(attrs);
    3610                 :            :         struct worker_pool *pool;
    3611                 :            :         int node;
    3612                 :            : 
    3613                 :            :         lockdep_assert_held(&wq_pool_mutex);
    3614                 :            : 
    3615                 :            :         /* do we already have a matching pool? */
    3616 [ #  # ][ #  # ]:          0 :         hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
                 [ #  # ]
    3617         [ #  # ]:          0 :                 if (wqattrs_equal(pool->attrs, attrs)) {
    3618                 :          0 :                         pool->refcnt++;
    3619                 :          0 :                         goto out_unlock;
    3620                 :            :                 }
    3621                 :            :         }
    3622                 :            : 
    3623                 :            :         /* nope, create a new one */
    3624                 :            :         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
    3625 [ #  # ][ #  # ]:          0 :         if (!pool || init_worker_pool(pool) < 0)
    3626                 :            :                 goto fail;
    3627                 :            : 
    3628         [ #  # ]:          0 :         if (workqueue_freezing)
    3629                 :          0 :                 pool->flags |= POOL_FREEZING;
    3630                 :            : 
    3631                 :            :         lockdep_set_subclass(&pool->lock, 1);    /* see put_pwq() */
    3632                 :          0 :         copy_workqueue_attrs(pool->attrs, attrs);
    3633                 :            : 
    3634                 :            :         /*
    3635                 :            :          * no_numa isn't a worker_pool attribute, always clear it.  See
    3636                 :            :          * 'struct workqueue_attrs' comments for detail.
    3637                 :            :          */
    3638                 :          0 :         pool->attrs->no_numa = false;
    3639                 :            : 
    3640                 :            :         /* if cpumask is contained inside a NUMA node, we belong to that node */
    3641         [ #  # ]:          0 :         if (wq_numa_enabled) {
    3642         [ #  # ]:          0 :                 for_each_node(node) {
    3643         [ #  # ]:          0 :                         if (cpumask_subset(pool->attrs->cpumask,
    3644                 :          0 :                                            wq_numa_possible_cpumask[node])) {
    3645                 :          0 :                                 pool->node = node;
    3646                 :          0 :                                 break;
    3647                 :            :                         }
    3648                 :            :                 }
    3649                 :            :         }
    3650                 :            : 
    3651         [ #  # ]:          0 :         if (worker_pool_assign_id(pool) < 0)
    3652                 :            :                 goto fail;
    3653                 :            : 
    3654                 :            :         /* create and start the initial worker */
    3655         [ #  # ]:          0 :         if (create_and_start_worker(pool) < 0)
    3656                 :            :                 goto fail;
    3657                 :            : 
    3658                 :            :         /* install */
    3659                 :          0 :         hash_add(unbound_pool_hash, &pool->hash_node, hash);
    3660                 :            : out_unlock:
    3661                 :          0 :         return pool;
    3662                 :            : fail:
    3663         [ #  # ]:          0 :         if (pool)
    3664                 :          0 :                 put_unbound_pool(pool);
    3665                 :            :         return NULL;
    3666                 :            : }
    3667                 :            : 
    3668                 :          0 : static void rcu_free_pwq(struct rcu_head *rcu)
    3669                 :            : {
    3670                 :          0 :         kmem_cache_free(pwq_cache,
    3671                 :          0 :                         container_of(rcu, struct pool_workqueue, rcu));
    3672                 :          0 : }
    3673                 :            : 
    3674                 :            : /*
    3675                 :            :  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
    3676                 :            :  * and needs to be destroyed.
    3677                 :            :  */
    3678                 :          0 : static void pwq_unbound_release_workfn(struct work_struct *work)
    3679                 :            : {
    3680                 :            :         struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
    3681                 :            :                                                   unbound_release_work);
    3682                 :          0 :         struct workqueue_struct *wq = pwq->wq;
    3683                 :          0 :         struct worker_pool *pool = pwq->pool;
    3684                 :            :         bool is_last;
    3685                 :            : 
    3686 [ #  # ][ #  # ]:          0 :         if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
         [ #  # ][ #  # ]
    3687                 :          0 :                 return;
    3688                 :            : 
    3689                 :            :         /*
    3690                 :            :          * Unlink @pwq.  Synchronization against wq->mutex isn't strictly
    3691                 :            :          * necessary on release but do it anyway.  It's easier to verify
    3692                 :            :          * and consistent with the linking path.
    3693                 :            :          */
    3694                 :          0 :         mutex_lock(&wq->mutex);
    3695                 :            :         list_del_rcu(&pwq->pwqs_node);
    3696                 :          0 :         is_last = list_empty(&wq->pwqs);
    3697                 :          0 :         mutex_unlock(&wq->mutex);
    3698                 :            : 
    3699                 :          0 :         mutex_lock(&wq_pool_mutex);
    3700                 :          0 :         put_unbound_pool(pool);
    3701                 :          0 :         mutex_unlock(&wq_pool_mutex);
    3702                 :            : 
    3703                 :          0 :         call_rcu_sched(&pwq->rcu, rcu_free_pwq);
    3704                 :            : 
    3705                 :            :         /*
    3706                 :            :          * If we're the last pwq going away, @wq is already dead and no one
    3707                 :            :          * is gonna access it anymore.  Free it.
    3708                 :            :          */
    3709         [ #  # ]:          0 :         if (is_last) {
    3710                 :          0 :                 free_workqueue_attrs(wq->unbound_attrs);
    3711                 :          0 :                 kfree(wq);
    3712                 :            :         }
    3713                 :            : }
    3714                 :            : 
    3715                 :            : /**
    3716                 :            :  * pwq_adjust_max_active - update a pwq's max_active to the current setting
    3717                 :            :  * @pwq: target pool_workqueue
    3718                 :            :  *
    3719                 :            :  * If @pwq isn't freezing, set @pwq->max_active to the associated
    3720                 :            :  * workqueue's saved_max_active and activate delayed work items
    3721                 :            :  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
    3722                 :            :  */
    3723                 :          0 : static void pwq_adjust_max_active(struct pool_workqueue *pwq)
    3724                 :            : {
    3725                 :          0 :         struct workqueue_struct *wq = pwq->wq;
    3726                 :          0 :         bool freezable = wq->flags & WQ_FREEZABLE;
    3727                 :            : 
    3728                 :            :         /* for @wq->saved_max_active */
    3729                 :            :         lockdep_assert_held(&wq->mutex);
    3730                 :            : 
    3731                 :            :         /* fast exit for non-freezable wqs */
    3732 [ #  # ][ #  # ]:          0 :         if (!freezable && pwq->max_active == wq->saved_max_active)
    3733                 :          0 :                 return;
    3734                 :            : 
    3735                 :          0 :         spin_lock_irq(&pwq->pool->lock);
    3736                 :            : 
    3737 [ #  # ][ #  # ]:          0 :         if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
    3738                 :          0 :                 pwq->max_active = wq->saved_max_active;
    3739                 :            : 
    3740 [ #  # ][ #  # ]:          0 :                 while (!list_empty(&pwq->delayed_works) &&
    3741                 :          0 :                        pwq->nr_active < pwq->max_active)
    3742                 :            :                         pwq_activate_first_delayed(pwq);
    3743                 :            : 
    3744                 :            :                 /*
    3745                 :            :                  * Need to kick a worker after thawed or an unbound wq's
    3746                 :            :                  * max_active is bumped.  It's a slow path.  Do it always.
    3747                 :            :                  */
    3748                 :          0 :                 wake_up_worker(pwq->pool);
    3749                 :            :         } else {
    3750                 :          0 :                 pwq->max_active = 0;
    3751                 :            :         }
    3752                 :            : 
    3753                 :          0 :         spin_unlock_irq(&pwq->pool->lock);
    3754                 :            : }
    3755                 :            : 
    3756                 :            : /* initialize newly alloced @pwq which is associated with @wq and @pool */
    3757                 :          0 : static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
    3758                 :            :                      struct worker_pool *pool)
    3759                 :            : {
    3760         [ #  # ]:          0 :         BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
    3761                 :            : 
    3762                 :          0 :         memset(pwq, 0, sizeof(*pwq));
    3763                 :            : 
    3764                 :          0 :         pwq->pool = pool;
    3765                 :          0 :         pwq->wq = wq;
    3766                 :          0 :         pwq->flush_color = -1;
    3767                 :          0 :         pwq->refcnt = 1;
    3768                 :          0 :         INIT_LIST_HEAD(&pwq->delayed_works);
    3769                 :          0 :         INIT_LIST_HEAD(&pwq->pwqs_node);
    3770                 :          0 :         INIT_LIST_HEAD(&pwq->mayday_node);
    3771                 :          0 :         INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
    3772                 :          0 : }
    3773                 :            : 
    3774                 :            : /* sync @pwq with the current state of its associated wq and link it */
    3775                 :          0 : static void link_pwq(struct pool_workqueue *pwq)
    3776                 :            : {
    3777                 :          0 :         struct workqueue_struct *wq = pwq->wq;
    3778                 :            : 
    3779                 :            :         lockdep_assert_held(&wq->mutex);
    3780                 :            : 
    3781                 :            :         /* may be called multiple times, ignore if already linked */
    3782         [ #  # ]:          0 :         if (!list_empty(&pwq->pwqs_node))
    3783                 :          0 :                 return;
    3784                 :            : 
    3785                 :            :         /*
    3786                 :            :          * Set the matching work_color.  This is synchronized with
    3787                 :            :          * wq->mutex to avoid confusing flush_workqueue().
    3788                 :            :          */
    3789                 :          0 :         pwq->work_color = wq->work_color;
    3790                 :            : 
    3791                 :            :         /* sync max_active to the current setting */
    3792                 :          0 :         pwq_adjust_max_active(pwq);
    3793                 :            : 
    3794                 :            :         /* link in @pwq */
    3795                 :          0 :         list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
    3796                 :            : }
    3797                 :            : 
    3798                 :            : /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
    3799                 :          0 : static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
    3800                 :            :                                         const struct workqueue_attrs *attrs)
    3801                 :            : {
    3802                 :            :         struct worker_pool *pool;
    3803                 :            :         struct pool_workqueue *pwq;
    3804                 :            : 
    3805                 :            :         lockdep_assert_held(&wq_pool_mutex);
    3806                 :            : 
    3807                 :          0 :         pool = get_unbound_pool(attrs);
    3808         [ #  # ]:          0 :         if (!pool)
    3809                 :            :                 return NULL;
    3810                 :            : 
    3811                 :          0 :         pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
    3812         [ #  # ]:          0 :         if (!pwq) {
    3813                 :          0 :                 put_unbound_pool(pool);
    3814                 :          0 :                 return NULL;
    3815                 :            :         }
    3816                 :            : 
    3817                 :          0 :         init_pwq(pwq, wq, pool);
    3818                 :          0 :         return pwq;
    3819                 :            : }
    3820                 :            : 
    3821                 :            : /* undo alloc_unbound_pwq(), used only in the error path */
    3822                 :          0 : static void free_unbound_pwq(struct pool_workqueue *pwq)
    3823                 :            : {
    3824                 :            :         lockdep_assert_held(&wq_pool_mutex);
    3825                 :            : 
    3826         [ #  # ]:          0 :         if (pwq) {
    3827                 :          0 :                 put_unbound_pool(pwq->pool);
    3828                 :          0 :                 kmem_cache_free(pwq_cache, pwq);
    3829                 :            :         }
    3830                 :          0 : }
    3831                 :            : 
    3832                 :            : /**
    3833                 :            :  * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
    3834                 :            :  * @attrs: the wq_attrs of interest
    3835                 :            :  * @node: the target NUMA node
    3836                 :            :  * @cpu_going_down: if >= 0, the CPU to consider as offline
    3837                 :            :  * @cpumask: outarg, the resulting cpumask
    3838                 :            :  *
    3839                 :            :  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
    3840                 :            :  * @cpu_going_down is >= 0, that cpu is considered offline during
    3841                 :            :  * calculation.  The result is stored in @cpumask.
    3842                 :            :  *
    3843                 :            :  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
    3844                 :            :  * enabled and @node has online CPUs requested by @attrs, the returned
    3845                 :            :  * cpumask is the intersection of the possible CPUs of @node and
    3846                 :            :  * @attrs->cpumask.
    3847                 :            :  *
    3848                 :            :  * The caller is responsible for ensuring that the cpumask of @node stays
    3849                 :            :  * stable.
    3850                 :            :  *
    3851                 :            :  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
    3852                 :            :  * %false if equal.
    3853                 :            :  */
    3854                 :          0 : static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
    3855                 :            :                                  int cpu_going_down, cpumask_t *cpumask)
    3856                 :            : {
    3857 [ #  # ][ #  # ]:          0 :         if (!wq_numa_enabled || attrs->no_numa)
    3858                 :            :                 goto use_dfl;
    3859                 :            : 
    3860                 :            :         /* does @node have any online CPUs @attrs wants? */
    3861                 :          0 :         cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
    3862         [ #  # ]:          0 :         if (cpu_going_down >= 0)
    3863                 :            :                 cpumask_clear_cpu(cpu_going_down, cpumask);
    3864                 :            : 
    3865         [ #  # ]:          0 :         if (cpumask_empty(cpumask))
    3866                 :            :                 goto use_dfl;
    3867                 :            : 
    3868                 :            :         /* yeap, return possible CPUs in @node that @attrs wants */
    3869                 :          0 :         cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
    3870                 :          0 :         return !cpumask_equal(cpumask, attrs->cpumask);
    3871                 :            : 
    3872                 :            : use_dfl:
    3873                 :            :         cpumask_copy(cpumask, attrs->cpumask);
    3874                 :          0 :         return false;
    3875                 :            : }
    3876                 :            : 
    3877                 :            : /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
    3878                 :            : static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
    3879                 :            :                                                    int node,
    3880                 :            :                                                    struct pool_workqueue *pwq)
    3881                 :            : {
    3882                 :            :         struct pool_workqueue *old_pwq;
    3883                 :            : 
    3884                 :            :         lockdep_assert_held(&wq->mutex);
    3885                 :            : 
    3886                 :            :         /* link_pwq() can handle duplicate calls */
    3887                 :          0 :         link_pwq(pwq);
    3888                 :            : 
    3889                 :          0 :         old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
    3890                 :          0 :         rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
    3891                 :            :         return old_pwq;
    3892                 :            : }
    3893                 :            : 
    3894                 :            : /**
    3895                 :            :  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
    3896                 :            :  * @wq: the target workqueue
    3897                 :            :  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
    3898                 :            :  *
    3899                 :            :  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
    3900                 :            :  * machines, this function maps a separate pwq to each NUMA node with
    3901                 :            :  * possibles CPUs in @attrs->cpumask so that work items are affine to the
    3902                 :            :  * NUMA node it was issued on.  Older pwqs are released as in-flight work
    3903                 :            :  * items finish.  Note that a work item which repeatedly requeues itself
    3904                 :            :  * back-to-back will stay on its current pwq.
    3905                 :            :  *
    3906                 :            :  * Performs GFP_KERNEL allocations.
    3907                 :            :  *
    3908                 :            :  * Return: 0 on success and -errno on failure.
    3909                 :            :  */
    3910                 :          0 : int apply_workqueue_attrs(struct workqueue_struct *wq,
    3911                 :            :                           const struct workqueue_attrs *attrs)
    3912                 :            : {
    3913                 :            :         struct workqueue_attrs *new_attrs, *tmp_attrs;
    3914                 :            :         struct pool_workqueue **pwq_tbl, *dfl_pwq;
    3915                 :            :         int node, ret;
    3916                 :            : 
    3917                 :            :         /* only unbound workqueues can change attributes */
    3918 [ #  # ][ #  # ]:          0 :         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
    3919                 :            :                 return -EINVAL;
    3920                 :            : 
    3921                 :            :         /* creating multiple pwqs breaks ordering guarantee */
    3922 [ #  # ][ #  # ]:          0 :         if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
         [ #  # ][ #  # ]
    3923                 :            :                 return -EINVAL;
    3924                 :            : 
    3925                 :          0 :         pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
    3926                 :          0 :         new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
    3927                 :          0 :         tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
    3928 [ #  # ][ #  # ]:          0 :         if (!pwq_tbl || !new_attrs || !tmp_attrs)
    3929                 :            :                 goto enomem;
    3930                 :            : 
    3931                 :            :         /* make a copy of @attrs and sanitize it */
    3932                 :            :         copy_workqueue_attrs(new_attrs, attrs);
    3933                 :          0 :         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
    3934                 :            : 
    3935                 :            :         /*
    3936                 :            :          * We may create multiple pwqs with differing cpumasks.  Make a
    3937                 :            :          * copy of @new_attrs which will be modified and used to obtain
    3938                 :            :          * pools.
    3939                 :            :          */
    3940                 :            :         copy_workqueue_attrs(tmp_attrs, new_attrs);
    3941                 :            : 
    3942                 :            :         /*
    3943                 :            :          * CPUs should stay stable across pwq creations and installations.
    3944                 :            :          * Pin CPUs, determine the target cpumask for each node and create
    3945                 :            :          * pwqs accordingly.
    3946                 :            :          */
    3947                 :          0 :         get_online_cpus();
    3948                 :            : 
    3949                 :          0 :         mutex_lock(&wq_pool_mutex);
    3950                 :            : 
    3951                 :            :         /*
    3952                 :            :          * If something goes wrong during CPU up/down, we'll fall back to
    3953                 :            :          * the default pwq covering whole @attrs->cpumask.  Always create
    3954                 :            :          * it even if we don't use it immediately.
    3955                 :            :          */
    3956                 :          0 :         dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
    3957         [ #  # ]:          0 :         if (!dfl_pwq)
    3958                 :            :                 goto enomem_pwq;
    3959                 :            : 
    3960         [ #  # ]:          0 :         for_each_node(node) {
    3961         [ #  # ]:          0 :                 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
    3962                 :          0 :                         pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
    3963         [ #  # ]:          0 :                         if (!pwq_tbl[node])
    3964                 :            :                                 goto enomem_pwq;
    3965                 :            :                 } else {
    3966                 :          0 :                         dfl_pwq->refcnt++;
    3967                 :          0 :                         pwq_tbl[node] = dfl_pwq;
    3968                 :            :                 }
    3969                 :            :         }
    3970                 :            : 
    3971                 :          0 :         mutex_unlock(&wq_pool_mutex);
    3972                 :            : 
    3973                 :            :         /* all pwqs have been created successfully, let's install'em */
    3974                 :          0 :         mutex_lock(&wq->mutex);
    3975                 :            : 
    3976                 :          0 :         copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
    3977                 :            : 
    3978                 :            :         /* save the previous pwq and install the new one */
    3979         [ #  # ]:          0 :         for_each_node(node)
    3980                 :          0 :                 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
    3981                 :            : 
    3982                 :            :         /* @dfl_pwq might not have been used, ensure it's linked */
    3983                 :          0 :         link_pwq(dfl_pwq);
    3984                 :          0 :         swap(wq->dfl_pwq, dfl_pwq);
    3985                 :            : 
    3986                 :          0 :         mutex_unlock(&wq->mutex);
    3987                 :            : 
    3988                 :            :         /* put the old pwqs */
    3989         [ #  # ]:          0 :         for_each_node(node)
    3990                 :          0 :                 put_pwq_unlocked(pwq_tbl[node]);
    3991                 :          0 :         put_pwq_unlocked(dfl_pwq);
    3992                 :            : 
    3993                 :          0 :         put_online_cpus();
    3994                 :            :         ret = 0;
    3995                 :            :         /* fall through */
    3996                 :            : out_free:
    3997                 :            :         free_workqueue_attrs(tmp_attrs);
    3998                 :            :         free_workqueue_attrs(new_attrs);
    3999                 :          0 :         kfree(pwq_tbl);
    4000                 :          0 :         return ret;
    4001                 :            : 
    4002                 :            : enomem_pwq:
    4003                 :          0 :         free_unbound_pwq(dfl_pwq);
    4004         [ #  # ]:          0 :         for_each_node(node)
    4005 [ #  # ][ #  # ]:          0 :                 if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
    4006                 :          0 :                         free_unbound_pwq(pwq_tbl[node]);
    4007                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4008                 :          0 :         put_online_cpus();
    4009                 :            : enomem:
    4010                 :            :         ret = -ENOMEM;
    4011                 :            :         goto out_free;
    4012                 :            : }
    4013                 :            : 
    4014                 :            : /**
    4015                 :            :  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
    4016                 :            :  * @wq: the target workqueue
    4017                 :            :  * @cpu: the CPU coming up or going down
    4018                 :            :  * @online: whether @cpu is coming up or going down
    4019                 :            :  *
    4020                 :            :  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
    4021                 :            :  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
    4022                 :            :  * @wq accordingly.
    4023                 :            :  *
    4024                 :            :  * If NUMA affinity can't be adjusted due to memory allocation failure, it
    4025                 :            :  * falls back to @wq->dfl_pwq which may not be optimal but is always
    4026                 :            :  * correct.
    4027                 :            :  *
    4028                 :            :  * Note that when the last allowed CPU of a NUMA node goes offline for a
    4029                 :            :  * workqueue with a cpumask spanning multiple nodes, the workers which were
    4030                 :            :  * already executing the work items for the workqueue will lose their CPU
    4031                 :            :  * affinity and may execute on any CPU.  This is similar to how per-cpu
    4032                 :            :  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
    4033                 :            :  * affinity, it's the user's responsibility to flush the work item from
    4034                 :            :  * CPU_DOWN_PREPARE.
    4035                 :            :  */
    4036                 :          0 : static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
    4037                 :            :                                    bool online)
    4038                 :            : {
    4039                 :            :         int node = cpu_to_node(cpu);
    4040         [ #  # ]:          0 :         int cpu_off = online ? -1 : cpu;
    4041                 :            :         struct pool_workqueue *old_pwq = NULL, *pwq;
    4042                 :            :         struct workqueue_attrs *target_attrs;
    4043                 :            :         cpumask_t *cpumask;
    4044                 :            : 
    4045                 :            :         lockdep_assert_held(&wq_pool_mutex);
    4046                 :            : 
    4047 [ #  # ][ #  # ]:          0 :         if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
    4048                 :          0 :                 return;
    4049                 :            : 
    4050                 :            :         /*
    4051                 :            :          * We don't wanna alloc/free wq_attrs for each wq for each CPU.
    4052                 :            :          * Let's use a preallocated one.  The following buf is protected by
    4053                 :            :          * CPU hotplug exclusion.
    4054                 :            :          */
    4055                 :          0 :         target_attrs = wq_update_unbound_numa_attrs_buf;
    4056                 :          0 :         cpumask = target_attrs->cpumask;
    4057                 :            : 
    4058                 :          0 :         mutex_lock(&wq->mutex);
    4059         [ #  # ]:          0 :         if (wq->unbound_attrs->no_numa)
    4060                 :            :                 goto out_unlock;
    4061                 :            : 
    4062                 :            :         copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
    4063                 :            :         pwq = unbound_pwq_by_node(wq, node);
    4064                 :            : 
    4065                 :            :         /*
    4066                 :            :          * Let's determine what needs to be done.  If the target cpumask is
    4067                 :            :          * different from wq's, we need to compare it to @pwq's and create
    4068                 :            :          * a new one if they don't match.  If the target cpumask equals
    4069                 :            :          * wq's, the default pwq should be used.  If @pwq is already the
    4070                 :            :          * default one, nothing to do; otherwise, install the default one.
    4071                 :            :          */
    4072         [ #  # ]:          0 :         if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
    4073         [ #  # ]:          0 :                 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
    4074                 :            :                         goto out_unlock;
    4075                 :            :         } else {
    4076         [ #  # ]:          0 :                 if (pwq == wq->dfl_pwq)
    4077                 :            :                         goto out_unlock;
    4078                 :            :                 else
    4079                 :            :                         goto use_dfl_pwq;
    4080                 :            :         }
    4081                 :            : 
    4082                 :          0 :         mutex_unlock(&wq->mutex);
    4083                 :            : 
    4084                 :            :         /* create a new pwq */
    4085                 :          0 :         pwq = alloc_unbound_pwq(wq, target_attrs);
    4086         [ #  # ]:          0 :         if (!pwq) {
    4087                 :          0 :                 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
    4088                 :            :                            wq->name);
    4089                 :          0 :                 goto out_unlock;
    4090                 :            :         }
    4091                 :            : 
    4092                 :            :         /*
    4093                 :            :          * Install the new pwq.  As this function is called only from CPU
    4094                 :            :          * hotplug callbacks and applying a new attrs is wrapped with
    4095                 :            :          * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
    4096                 :            :          * inbetween.
    4097                 :            :          */
    4098                 :          0 :         mutex_lock(&wq->mutex);
    4099                 :            :         old_pwq = numa_pwq_tbl_install(wq, node, pwq);
    4100                 :          0 :         goto out_unlock;
    4101                 :            : 
    4102                 :            : use_dfl_pwq:
    4103                 :          0 :         spin_lock_irq(&wq->dfl_pwq->pool->lock);
    4104                 :          0 :         get_pwq(wq->dfl_pwq);
    4105                 :          0 :         spin_unlock_irq(&wq->dfl_pwq->pool->lock);
    4106                 :          0 :         old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
    4107                 :            : out_unlock:
    4108                 :          0 :         mutex_unlock(&wq->mutex);
    4109                 :          0 :         put_pwq_unlocked(old_pwq);
    4110                 :            : }
    4111                 :            : 
    4112                 :          0 : static int alloc_and_link_pwqs(struct workqueue_struct *wq)
    4113                 :            : {
    4114                 :          0 :         bool highpri = wq->flags & WQ_HIGHPRI;
    4115                 :            :         int cpu, ret;
    4116                 :            : 
    4117         [ #  # ]:          0 :         if (!(wq->flags & WQ_UNBOUND)) {
    4118                 :          0 :                 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
    4119         [ #  # ]:          0 :                 if (!wq->cpu_pwqs)
    4120                 :            :                         return -ENOMEM;
    4121                 :            : 
    4122         [ #  # ]:          0 :                 for_each_possible_cpu(cpu) {
    4123                 :            :                         struct pool_workqueue *pwq =
    4124                 :          0 :                                 per_cpu_ptr(wq->cpu_pwqs, cpu);
    4125                 :            :                         struct worker_pool *cpu_pools =
    4126                 :          0 :                                 per_cpu(cpu_worker_pools, cpu);
    4127                 :            : 
    4128                 :          0 :                         init_pwq(pwq, wq, &cpu_pools[highpri]);
    4129                 :            : 
    4130                 :          0 :                         mutex_lock(&wq->mutex);
    4131                 :          0 :                         link_pwq(pwq);
    4132                 :          0 :                         mutex_unlock(&wq->mutex);
    4133                 :            :                 }
    4134                 :            :                 return 0;
    4135         [ #  # ]:          0 :         } else if (wq->flags & __WQ_ORDERED) {
    4136                 :          0 :                 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
    4137                 :            :                 /* there should only be single pwq for ordering guarantee */
    4138 [ #  # ][ #  # ]:          0 :                 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
         [ #  # ][ #  # ]
    4139                 :            :                               wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
    4140                 :            :                      "ordering guarantee broken for workqueue %s\n", wq->name);
    4141                 :          0 :                 return ret;
    4142                 :            :         } else {
    4143                 :          0 :                 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
    4144                 :            :         }
    4145                 :            : }
    4146                 :            : 
    4147                 :          0 : static int wq_clamp_max_active(int max_active, unsigned int flags,
    4148                 :            :                                const char *name)
    4149                 :            : {
    4150         [ #  # ]:          0 :         int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
    4151                 :            : 
    4152         [ #  # ]:          0 :         if (max_active < 1 || max_active > lim)
    4153                 :          0 :                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
    4154                 :            :                         max_active, name, 1, lim);
    4155                 :            : 
    4156                 :          0 :         return clamp_val(max_active, 1, lim);
    4157                 :            : }
    4158                 :            : 
    4159                 :          0 : struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
    4160                 :            :                                                unsigned int flags,
    4161                 :            :                                                int max_active,
    4162                 :            :                                                struct lock_class_key *key,
    4163                 :            :                                                const char *lock_name, ...)
    4164                 :            : {
    4165                 :            :         size_t tbl_size = 0;
    4166                 :            :         va_list args;
    4167                 :            :         struct workqueue_struct *wq;
    4168                 :            :         struct pool_workqueue *pwq;
    4169                 :            : 
    4170                 :            :         /* see the comment above the definition of WQ_POWER_EFFICIENT */
    4171 [ #  # ][ #  # ]:          0 :         if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
    4172                 :          0 :                 flags |= WQ_UNBOUND;
    4173                 :            : 
    4174                 :            :         /* allocate wq and format name */
    4175         [ #  # ]:          0 :         if (flags & WQ_UNBOUND)
    4176                 :          0 :                 tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
    4177                 :            : 
    4178                 :          0 :         wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
    4179         [ #  # ]:          0 :         if (!wq)
    4180                 :            :                 return NULL;
    4181                 :            : 
    4182         [ #  # ]:          0 :         if (flags & WQ_UNBOUND) {
    4183                 :          0 :                 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
    4184         [ #  # ]:          0 :                 if (!wq->unbound_attrs)
    4185                 :            :                         goto err_free_wq;
    4186                 :            :         }
    4187                 :            : 
    4188                 :          0 :         va_start(args, lock_name);
    4189                 :          0 :         vsnprintf(wq->name, sizeof(wq->name), fmt, args);
    4190                 :          0 :         va_end(args);
    4191                 :            : 
    4192         [ #  # ]:          0 :         max_active = max_active ?: WQ_DFL_ACTIVE;
    4193                 :          0 :         max_active = wq_clamp_max_active(max_active, flags, wq->name);
    4194                 :            : 
    4195                 :            :         /* init wq */
    4196                 :          0 :         wq->flags = flags;
    4197                 :          0 :         wq->saved_max_active = max_active;
    4198                 :          0 :         mutex_init(&wq->mutex);
    4199                 :          0 :         atomic_set(&wq->nr_pwqs_to_flush, 0);
    4200                 :          0 :         INIT_LIST_HEAD(&wq->pwqs);
    4201                 :          0 :         INIT_LIST_HEAD(&wq->flusher_queue);
    4202                 :          0 :         INIT_LIST_HEAD(&wq->flusher_overflow);
    4203                 :          0 :         INIT_LIST_HEAD(&wq->maydays);
    4204                 :            : 
    4205                 :            :         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
    4206                 :          0 :         INIT_LIST_HEAD(&wq->list);
    4207                 :            : 
    4208         [ #  # ]:          0 :         if (alloc_and_link_pwqs(wq) < 0)
    4209                 :            :                 goto err_free_wq;
    4210                 :            : 
    4211                 :            :         /*
    4212                 :            :          * Workqueues which may be used during memory reclaim should
    4213                 :            :          * have a rescuer to guarantee forward progress.
    4214                 :            :          */
    4215         [ #  # ]:          0 :         if (flags & WQ_MEM_RECLAIM) {
    4216                 :            :                 struct worker *rescuer;
    4217                 :            : 
    4218                 :          0 :                 rescuer = alloc_worker();
    4219         [ #  # ]:          0 :                 if (!rescuer)
    4220                 :            :                         goto err_destroy;
    4221                 :            : 
    4222                 :          0 :                 rescuer->rescue_wq = wq;
    4223                 :          0 :                 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
    4224                 :            :                                                wq->name);
    4225         [ #  # ]:          0 :                 if (IS_ERR(rescuer->task)) {
    4226                 :          0 :                         kfree(rescuer);
    4227                 :          0 :                         goto err_destroy;
    4228                 :            :                 }
    4229                 :            : 
    4230                 :          0 :                 wq->rescuer = rescuer;
    4231                 :          0 :                 rescuer->task->flags |= PF_NO_SETAFFINITY;
    4232                 :          0 :                 wake_up_process(rescuer->task);
    4233                 :            :         }
    4234                 :            : 
    4235 [ #  # ][ #  # ]:          0 :         if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
    4236                 :            :                 goto err_destroy;
    4237                 :            : 
    4238                 :            :         /*
    4239                 :            :          * wq_pool_mutex protects global freeze state and workqueues list.
    4240                 :            :          * Grab it, adjust max_active and add the new @wq to workqueues
    4241                 :            :          * list.
    4242                 :            :          */
    4243                 :          0 :         mutex_lock(&wq_pool_mutex);
    4244                 :            : 
    4245                 :          0 :         mutex_lock(&wq->mutex);
    4246         [ #  # ]:          0 :         for_each_pwq(pwq, wq)
    4247                 :          0 :                 pwq_adjust_max_active(pwq);
    4248                 :          0 :         mutex_unlock(&wq->mutex);
    4249                 :            : 
    4250                 :            :         list_add(&wq->list, &workqueues);
    4251                 :            : 
    4252                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4253                 :            : 
    4254                 :          0 :         return wq;
    4255                 :            : 
    4256                 :            : err_free_wq:
    4257                 :          0 :         free_workqueue_attrs(wq->unbound_attrs);
    4258                 :          0 :         kfree(wq);
    4259                 :          0 :         return NULL;
    4260                 :            : err_destroy:
    4261                 :          0 :         destroy_workqueue(wq);
    4262                 :          0 :         return NULL;
    4263                 :            : }
    4264                 :            : EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
    4265                 :            : 
    4266                 :            : /**
    4267                 :            :  * destroy_workqueue - safely terminate a workqueue
    4268                 :            :  * @wq: target workqueue
    4269                 :            :  *
    4270                 :            :  * Safely destroy a workqueue. All work currently pending will be done first.
    4271                 :            :  */
    4272                 :          0 : void destroy_workqueue(struct workqueue_struct *wq)
    4273                 :            : {
    4274                 :            :         struct pool_workqueue *pwq;
    4275                 :            :         int node;
    4276                 :            : 
    4277                 :            :         /* drain it before proceeding with destruction */
    4278                 :          0 :         drain_workqueue(wq);
    4279                 :            : 
    4280                 :            :         /* sanity checks */
    4281                 :          0 :         mutex_lock(&wq->mutex);
    4282         [ #  # ]:          0 :         for_each_pwq(pwq, wq) {
    4283                 :            :                 int i;
    4284                 :            : 
    4285         [ #  # ]:          0 :                 for (i = 0; i < WORK_NR_COLORS; i++) {
    4286 [ #  # ][ #  # ]:          0 :                         if (WARN_ON(pwq->nr_in_flight[i])) {
    4287                 :          0 :                                 mutex_unlock(&wq->mutex);
    4288                 :          0 :                                 return;
    4289                 :            :                         }
    4290                 :            :                 }
    4291                 :            : 
    4292 [ #  # ][ #  # ]:          0 :                 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
         [ #  # ][ #  # ]
                 [ #  # ]
    4293 [ #  # ][ #  # ]:          0 :                     WARN_ON(pwq->nr_active) ||
    4294         [ #  # ]:          0 :                     WARN_ON(!list_empty(&pwq->delayed_works))) {
    4295                 :          0 :                         mutex_unlock(&wq->mutex);
    4296                 :          0 :                         return;
    4297                 :            :                 }
    4298                 :            :         }
    4299                 :          0 :         mutex_unlock(&wq->mutex);
    4300                 :            : 
    4301                 :            :         /*
    4302                 :            :          * wq list is used to freeze wq, remove from list after
    4303                 :            :          * flushing is complete in case freeze races us.
    4304                 :            :          */
    4305                 :          0 :         mutex_lock(&wq_pool_mutex);
    4306                 :          0 :         list_del_init(&wq->list);
    4307                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4308                 :            : 
    4309                 :            :         workqueue_sysfs_unregister(wq);
    4310                 :            : 
    4311         [ #  # ]:          0 :         if (wq->rescuer) {
    4312                 :          0 :                 kthread_stop(wq->rescuer->task);
    4313                 :          0 :                 kfree(wq->rescuer);
    4314                 :          0 :                 wq->rescuer = NULL;
    4315                 :            :         }
    4316                 :            : 
    4317         [ #  # ]:          0 :         if (!(wq->flags & WQ_UNBOUND)) {
    4318                 :            :                 /*
    4319                 :            :                  * The base ref is never dropped on per-cpu pwqs.  Directly
    4320                 :            :                  * free the pwqs and wq.
    4321                 :            :                  */
    4322                 :          0 :                 free_percpu(wq->cpu_pwqs);
    4323                 :          0 :                 kfree(wq);
    4324                 :            :         } else {
    4325                 :            :                 /*
    4326                 :            :                  * We're the sole accessor of @wq at this point.  Directly
    4327                 :            :                  * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
    4328                 :            :                  * @wq will be freed when the last pwq is released.
    4329                 :            :                  */
    4330         [ #  # ]:          0 :                 for_each_node(node) {
    4331                 :          0 :                         pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
    4332                 :          0 :                         RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
    4333                 :          0 :                         put_pwq_unlocked(pwq);
    4334                 :            :                 }
    4335                 :            : 
    4336                 :            :                 /*
    4337                 :            :                  * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
    4338                 :            :                  * put.  Don't access it afterwards.
    4339                 :            :                  */
    4340                 :          0 :                 pwq = wq->dfl_pwq;
    4341                 :          0 :                 wq->dfl_pwq = NULL;
    4342                 :          0 :                 put_pwq_unlocked(pwq);
    4343                 :            :         }
    4344                 :            : }
    4345                 :            : EXPORT_SYMBOL_GPL(destroy_workqueue);
    4346                 :            : 
    4347                 :            : /**
    4348                 :            :  * workqueue_set_max_active - adjust max_active of a workqueue
    4349                 :            :  * @wq: target workqueue
    4350                 :            :  * @max_active: new max_active value.
    4351                 :            :  *
    4352                 :            :  * Set max_active of @wq to @max_active.
    4353                 :            :  *
    4354                 :            :  * CONTEXT:
    4355                 :            :  * Don't call from IRQ context.
    4356                 :            :  */
    4357                 :          0 : void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
    4358                 :            : {
    4359                 :            :         struct pool_workqueue *pwq;
    4360                 :            : 
    4361                 :            :         /* disallow meddling with max_active for ordered workqueues */
    4362 [ #  # ][ #  # ]:          0 :         if (WARN_ON(wq->flags & __WQ_ORDERED))
    4363                 :          0 :                 return;
    4364                 :            : 
    4365                 :          0 :         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
    4366                 :            : 
    4367                 :          0 :         mutex_lock(&wq->mutex);
    4368                 :            : 
    4369                 :          0 :         wq->saved_max_active = max_active;
    4370                 :            : 
    4371         [ #  # ]:          0 :         for_each_pwq(pwq, wq)
    4372                 :          0 :                 pwq_adjust_max_active(pwq);
    4373                 :            : 
    4374                 :          0 :         mutex_unlock(&wq->mutex);
    4375                 :            : }
    4376                 :            : EXPORT_SYMBOL_GPL(workqueue_set_max_active);
    4377                 :            : 
    4378                 :            : /**
    4379                 :            :  * current_is_workqueue_rescuer - is %current workqueue rescuer?
    4380                 :            :  *
    4381                 :            :  * Determine whether %current is a workqueue rescuer.  Can be used from
    4382                 :            :  * work functions to determine whether it's being run off the rescuer task.
    4383                 :            :  *
    4384                 :            :  * Return: %true if %current is a workqueue rescuer. %false otherwise.
    4385                 :            :  */
    4386                 :          0 : bool current_is_workqueue_rescuer(void)
    4387                 :            : {
    4388                 :            :         struct worker *worker = current_wq_worker();
    4389                 :            : 
    4390 [ +  - ][ +  - ]:       8778 :         return worker && worker->rescue_wq;
    4391                 :            : }
    4392                 :            : 
    4393                 :            : /**
    4394                 :            :  * workqueue_congested - test whether a workqueue is congested
    4395                 :            :  * @cpu: CPU in question
    4396                 :            :  * @wq: target workqueue
    4397                 :            :  *
    4398                 :            :  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
    4399                 :            :  * no synchronization around this function and the test result is
    4400                 :            :  * unreliable and only useful as advisory hints or for debugging.
    4401                 :            :  *
    4402                 :            :  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
    4403                 :            :  * Note that both per-cpu and unbound workqueues may be associated with
    4404                 :            :  * multiple pool_workqueues which have separate congested states.  A
    4405                 :            :  * workqueue being congested on one CPU doesn't mean the workqueue is also
    4406                 :            :  * contested on other CPUs / NUMA nodes.
    4407                 :            :  *
    4408                 :            :  * Return:
    4409                 :            :  * %true if congested, %false otherwise.
    4410                 :            :  */
    4411                 :          0 : bool workqueue_congested(int cpu, struct workqueue_struct *wq)
    4412                 :            : {
    4413                 :            :         struct pool_workqueue *pwq;
    4414                 :            :         bool ret;
    4415                 :            : 
    4416                 :            :         rcu_read_lock_sched();
    4417                 :            : 
    4418         [ #  # ]:          0 :         if (cpu == WORK_CPU_UNBOUND)
    4419                 :          0 :                 cpu = smp_processor_id();
    4420                 :            : 
    4421         [ #  # ]:          0 :         if (!(wq->flags & WQ_UNBOUND))
    4422                 :          0 :                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
    4423                 :            :         else
    4424                 :            :                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
    4425                 :            : 
    4426                 :          0 :         ret = !list_empty(&pwq->delayed_works);
    4427                 :            :         rcu_read_unlock_sched();
    4428                 :            : 
    4429                 :          0 :         return ret;
    4430                 :            : }
    4431                 :            : EXPORT_SYMBOL_GPL(workqueue_congested);
    4432                 :            : 
    4433                 :            : /**
    4434                 :            :  * work_busy - test whether a work is currently pending or running
    4435                 :            :  * @work: the work to be tested
    4436                 :            :  *
    4437                 :            :  * Test whether @work is currently pending or running.  There is no
    4438                 :            :  * synchronization around this function and the test result is
    4439                 :            :  * unreliable and only useful as advisory hints or for debugging.
    4440                 :            :  *
    4441                 :            :  * Return:
    4442                 :            :  * OR'd bitmask of WORK_BUSY_* bits.
    4443                 :            :  */
    4444                 :          0 : unsigned int work_busy(struct work_struct *work)
    4445                 :            : {
    4446                 :            :         struct worker_pool *pool;
    4447                 :            :         unsigned long flags;
    4448                 :            :         unsigned int ret = 0;
    4449                 :            : 
    4450         [ #  # ]:          0 :         if (work_pending(work))
    4451                 :            :                 ret |= WORK_BUSY_PENDING;
    4452                 :            : 
    4453                 :            :         local_irq_save(flags);
    4454                 :          0 :         pool = get_work_pool(work);
    4455         [ #  # ]:          0 :         if (pool) {
    4456                 :            :                 spin_lock(&pool->lock);
    4457         [ #  # ]:          0 :                 if (find_worker_executing_work(pool, work))
    4458                 :          0 :                         ret |= WORK_BUSY_RUNNING;
    4459                 :            :                 spin_unlock(&pool->lock);
    4460                 :            :         }
    4461         [ #  # ]:          0 :         local_irq_restore(flags);
    4462                 :            : 
    4463                 :          0 :         return ret;
    4464                 :            : }
    4465                 :            : EXPORT_SYMBOL_GPL(work_busy);
    4466                 :            : 
    4467                 :            : /**
    4468                 :            :  * set_worker_desc - set description for the current work item
    4469                 :            :  * @fmt: printf-style format string
    4470                 :            :  * @...: arguments for the format string
    4471                 :            :  *
    4472                 :            :  * This function can be called by a running work function to describe what
    4473                 :            :  * the work item is about.  If the worker task gets dumped, this
    4474                 :            :  * information will be printed out together to help debugging.  The
    4475                 :            :  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
    4476                 :            :  */
    4477                 :          0 : void set_worker_desc(const char *fmt, ...)
    4478                 :            : {
    4479                 :            :         struct worker *worker = current_wq_worker();
    4480                 :            :         va_list args;
    4481                 :            : 
    4482         [ +  - ]:       8778 :         if (worker) {
    4483                 :       4389 :                 va_start(args, fmt);
    4484                 :       4389 :                 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
    4485                 :       4389 :                 va_end(args);
    4486                 :       4389 :                 worker->desc_valid = true;
    4487                 :            :         }
    4488                 :       4389 : }
    4489                 :            : 
    4490                 :            : /**
    4491                 :            :  * print_worker_info - print out worker information and description
    4492                 :            :  * @log_lvl: the log level to use when printing
    4493                 :            :  * @task: target task
    4494                 :            :  *
    4495                 :            :  * If @task is a worker and currently executing a work item, print out the
    4496                 :            :  * name of the workqueue being serviced and worker description set with
    4497                 :            :  * set_worker_desc() by the currently executing work item.
    4498                 :            :  *
    4499                 :            :  * This function can be safely called on any task as long as the
    4500                 :            :  * task_struct itself is accessible.  While safe, this function isn't
    4501                 :            :  * synchronized and may print out mixups or garbages of limited length.
    4502                 :            :  */
    4503                 :          0 : void print_worker_info(const char *log_lvl, struct task_struct *task)
    4504                 :            : {
    4505                 :          3 :         work_func_t *fn = NULL;
    4506                 :          3 :         char name[WQ_NAME_LEN] = { };
    4507                 :          3 :         char desc[WORKER_DESC_LEN] = { };
    4508                 :          3 :         struct pool_workqueue *pwq = NULL;
    4509                 :          3 :         struct workqueue_struct *wq = NULL;
    4510                 :          3 :         bool desc_valid = false;
    4511                 :            :         struct worker *worker;
    4512                 :            : 
    4513         [ -  + ]:          3 :         if (!(task->flags & PF_WQ_WORKER))
    4514                 :          3 :                 return;
    4515                 :            : 
    4516                 :            :         /*
    4517                 :            :          * This function is called without any synchronization and @task
    4518                 :            :          * could be in any state.  Be careful with dereferences.
    4519                 :            :          */
    4520                 :          0 :         worker = probe_kthread_data(task);
    4521                 :            : 
    4522                 :            :         /*
    4523                 :            :          * Carefully copy the associated workqueue's workfn and name.  Keep
    4524                 :            :          * the original last '\0' in case the original contains garbage.
    4525                 :            :          */
    4526                 :          0 :         probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
    4527                 :          0 :         probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
    4528                 :          0 :         probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
    4529                 :          0 :         probe_kernel_read(name, wq->name, sizeof(name) - 1);
    4530                 :            : 
    4531                 :            :         /* copy worker description */
    4532                 :          0 :         probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
    4533         [ #  # ]:          0 :         if (desc_valid)
    4534                 :          0 :                 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
    4535                 :            : 
    4536 [ #  # ][ #  # ]:          0 :         if (fn || name[0] || desc[0]) {
                 [ #  # ]
    4537                 :          0 :                 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
    4538         [ #  # ]:          0 :                 if (desc[0])
    4539                 :          0 :                         pr_cont(" (%s)", desc);
    4540                 :          0 :                 pr_cont("\n");
    4541                 :            :         }
    4542                 :            : }
    4543                 :            : 
    4544                 :            : /*
    4545                 :            :  * CPU hotplug.
    4546                 :            :  *
    4547                 :            :  * There are two challenges in supporting CPU hotplug.  Firstly, there
    4548                 :            :  * are a lot of assumptions on strong associations among work, pwq and
    4549                 :            :  * pool which make migrating pending and scheduled works very
    4550                 :            :  * difficult to implement without impacting hot paths.  Secondly,
    4551                 :            :  * worker pools serve mix of short, long and very long running works making
    4552                 :            :  * blocked draining impractical.
    4553                 :            :  *
    4554                 :            :  * This is solved by allowing the pools to be disassociated from the CPU
    4555                 :            :  * running as an unbound one and allowing it to be reattached later if the
    4556                 :            :  * cpu comes back online.
    4557                 :            :  */
    4558                 :            : 
    4559                 :          0 : static void wq_unbind_fn(struct work_struct *work)
    4560                 :            : {
    4561                 :          0 :         int cpu = smp_processor_id();
    4562                 :            :         struct worker_pool *pool;
    4563                 :            :         struct worker *worker;
    4564                 :            :         int wi;
    4565                 :            : 
    4566         [ #  # ]:          0 :         for_each_cpu_worker_pool(pool, cpu) {
    4567 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(cpu != smp_processor_id());
                 [ #  # ]
    4568                 :            : 
    4569                 :          0 :                 mutex_lock(&pool->manager_mutex);
    4570                 :            :                 spin_lock_irq(&pool->lock);
    4571                 :            : 
    4572                 :            :                 /*
    4573                 :            :                  * We've blocked all manager operations.  Make all workers
    4574                 :            :                  * unbound and set DISASSOCIATED.  Before this, all workers
    4575                 :            :                  * except for the ones which are still executing works from
    4576                 :            :                  * before the last CPU down must be on the cpu.  After
    4577                 :            :                  * this, they may become diasporas.
    4578                 :            :                  */
    4579         [ #  # ]:          0 :                 for_each_pool_worker(worker, wi, pool)
    4580                 :          0 :                         worker->flags |= WORKER_UNBOUND;
    4581                 :            : 
    4582                 :          0 :                 pool->flags |= POOL_DISASSOCIATED;
    4583                 :            : 
    4584                 :            :                 spin_unlock_irq(&pool->lock);
    4585                 :          0 :                 mutex_unlock(&pool->manager_mutex);
    4586                 :            : 
    4587                 :            :                 /*
    4588                 :            :                  * Call schedule() so that we cross rq->lock and thus can
    4589                 :            :                  * guarantee sched callbacks see the %WORKER_UNBOUND flag.
    4590                 :            :                  * This is necessary as scheduler callbacks may be invoked
    4591                 :            :                  * from other cpus.
    4592                 :            :                  */
    4593                 :          0 :                 schedule();
    4594                 :            : 
    4595                 :            :                 /*
    4596                 :            :                  * Sched callbacks are disabled now.  Zap nr_running.
    4597                 :            :                  * After this, nr_running stays zero and need_more_worker()
    4598                 :            :                  * and keep_working() are always true as long as the
    4599                 :            :                  * worklist is not empty.  This pool now behaves as an
    4600                 :            :                  * unbound (in terms of concurrency management) pool which
    4601                 :            :                  * are served by workers tied to the pool.
    4602                 :            :                  */
    4603                 :          0 :                 atomic_set(&pool->nr_running, 0);
    4604                 :            : 
    4605                 :            :                 /*
    4606                 :            :                  * With concurrency management just turned off, a busy
    4607                 :            :                  * worker blocking could lead to lengthy stalls.  Kick off
    4608                 :            :                  * unbound chain execution of currently pending work items.
    4609                 :            :                  */
    4610                 :            :                 spin_lock_irq(&pool->lock);
    4611                 :          0 :                 wake_up_worker(pool);
    4612                 :            :                 spin_unlock_irq(&pool->lock);
    4613                 :            :         }
    4614                 :          0 : }
    4615                 :            : 
    4616                 :            : /**
    4617                 :            :  * rebind_workers - rebind all workers of a pool to the associated CPU
    4618                 :            :  * @pool: pool of interest
    4619                 :            :  *
    4620                 :            :  * @pool->cpu is coming online.  Rebind all workers to the CPU.
    4621                 :            :  */
    4622                 :          0 : static void rebind_workers(struct worker_pool *pool)
    4623                 :            : {
    4624                 :            :         struct worker *worker;
    4625                 :            :         int wi;
    4626                 :            : 
    4627                 :            :         lockdep_assert_held(&pool->manager_mutex);
    4628                 :            : 
    4629                 :            :         /*
    4630                 :            :          * Restore CPU affinity of all workers.  As all idle workers should
    4631                 :            :          * be on the run-queue of the associated CPU before any local
    4632                 :            :          * wake-ups for concurrency management happen, restore CPU affinty
    4633                 :            :          * of all workers first and then clear UNBOUND.  As we're called
    4634                 :            :          * from CPU_ONLINE, the following shouldn't fail.
    4635                 :            :          */
    4636         [ #  # ]:          0 :         for_each_pool_worker(worker, wi, pool)
    4637 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                 [ #  # ]
    4638                 :            :                                                   pool->attrs->cpumask) < 0);
    4639                 :            : 
    4640                 :            :         spin_lock_irq(&pool->lock);
    4641                 :            : 
    4642         [ #  # ]:          0 :         for_each_pool_worker(worker, wi, pool) {
    4643                 :          0 :                 unsigned int worker_flags = worker->flags;
    4644                 :            : 
    4645                 :            :                 /*
    4646                 :            :                  * A bound idle worker should actually be on the runqueue
    4647                 :            :                  * of the associated CPU for local wake-ups targeting it to
    4648                 :            :                  * work.  Kick all idle workers so that they migrate to the
    4649                 :            :                  * associated CPU.  Doing this in the same loop as
    4650                 :            :                  * replacing UNBOUND with REBOUND is safe as no worker will
    4651                 :            :                  * be bound before @pool->lock is released.
    4652                 :            :                  */
    4653         [ #  # ]:          0 :                 if (worker_flags & WORKER_IDLE)
    4654                 :          0 :                         wake_up_process(worker->task);
    4655                 :            : 
    4656                 :            :                 /*
    4657                 :            :                  * We want to clear UNBOUND but can't directly call
    4658                 :            :                  * worker_clr_flags() or adjust nr_running.  Atomically
    4659                 :            :                  * replace UNBOUND with another NOT_RUNNING flag REBOUND.
    4660                 :            :                  * @worker will clear REBOUND using worker_clr_flags() when
    4661                 :            :                  * it initiates the next execution cycle thus restoring
    4662                 :            :                  * concurrency management.  Note that when or whether
    4663                 :            :                  * @worker clears REBOUND doesn't affect correctness.
    4664                 :            :                  *
    4665                 :            :                  * ACCESS_ONCE() is necessary because @worker->flags may be
    4666                 :            :                  * tested without holding any lock in
    4667                 :            :                  * wq_worker_waking_up().  Without it, NOT_RUNNING test may
    4668                 :            :                  * fail incorrectly leading to premature concurrency
    4669                 :            :                  * management operations.
    4670                 :            :                  */
    4671 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
                 [ #  # ]
    4672                 :            :                 worker_flags |= WORKER_REBOUND;
    4673                 :          0 :                 worker_flags &= ~WORKER_UNBOUND;
    4674                 :          0 :                 ACCESS_ONCE(worker->flags) = worker_flags;
    4675                 :            :         }
    4676                 :            : 
    4677                 :            :         spin_unlock_irq(&pool->lock);
    4678                 :          0 : }
    4679                 :            : 
    4680                 :            : /**
    4681                 :            :  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
    4682                 :            :  * @pool: unbound pool of interest
    4683                 :            :  * @cpu: the CPU which is coming up
    4684                 :            :  *
    4685                 :            :  * An unbound pool may end up with a cpumask which doesn't have any online
    4686                 :            :  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
    4687                 :            :  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
    4688                 :            :  * online CPU before, cpus_allowed of all its workers should be restored.
    4689                 :            :  */
    4690                 :          0 : static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
    4691                 :            : {
    4692                 :            :         static cpumask_t cpumask;
    4693                 :            :         struct worker *worker;
    4694                 :            :         int wi;
    4695                 :            : 
    4696                 :            :         lockdep_assert_held(&pool->manager_mutex);
    4697                 :            : 
    4698                 :            :         /* is @cpu allowed for @pool? */
    4699         [ #  # ]:          0 :         if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
    4700                 :          0 :                 return;
    4701                 :            : 
    4702                 :            :         /* is @cpu the only online CPU? */
    4703                 :          0 :         cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
    4704         [ #  # ]:          0 :         if (cpumask_weight(&cpumask) != 1)
    4705                 :            :                 return;
    4706                 :            : 
    4707                 :            :         /* as we're called from CPU_ONLINE, the following shouldn't fail */
    4708         [ #  # ]:          0 :         for_each_pool_worker(worker, wi, pool)
    4709 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                 [ #  # ]
    4710                 :            :                                                   pool->attrs->cpumask) < 0);
    4711                 :            : }
    4712                 :            : 
    4713                 :            : /*
    4714                 :            :  * Workqueues should be brought up before normal priority CPU notifiers.
    4715                 :            :  * This will be registered high priority CPU notifier.
    4716                 :            :  */
    4717                 :          0 : static int workqueue_cpu_up_callback(struct notifier_block *nfb,
    4718                 :            :                                                unsigned long action,
    4719                 :            :                                                void *hcpu)
    4720                 :            : {
    4721                 :          0 :         int cpu = (unsigned long)hcpu;
    4722                 :            :         struct worker_pool *pool;
    4723                 :            :         struct workqueue_struct *wq;
    4724                 :            :         int pi;
    4725                 :            : 
    4726      [ #  #  # ]:          0 :         switch (action & ~CPU_TASKS_FROZEN) {
    4727                 :            :         case CPU_UP_PREPARE:
    4728         [ #  # ]:          0 :                 for_each_cpu_worker_pool(pool, cpu) {
    4729         [ #  # ]:          0 :                         if (pool->nr_workers)
    4730                 :          0 :                                 continue;
    4731         [ #  # ]:          0 :                         if (create_and_start_worker(pool) < 0)
    4732                 :            :                                 return NOTIFY_BAD;
    4733                 :            :                 }
    4734                 :            :                 break;
    4735                 :            : 
    4736                 :            :         case CPU_DOWN_FAILED:
    4737                 :            :         case CPU_ONLINE:
    4738                 :          0 :                 mutex_lock(&wq_pool_mutex);
    4739                 :            : 
    4740         [ #  # ]:          0 :                 for_each_pool(pool, pi) {
    4741                 :          0 :                         mutex_lock(&pool->manager_mutex);
    4742                 :            : 
    4743         [ #  # ]:          0 :                         if (pool->cpu == cpu) {
    4744                 :            :                                 spin_lock_irq(&pool->lock);
    4745                 :          0 :                                 pool->flags &= ~POOL_DISASSOCIATED;
    4746                 :            :                                 spin_unlock_irq(&pool->lock);
    4747                 :            : 
    4748                 :          0 :                                 rebind_workers(pool);
    4749         [ #  # ]:          0 :                         } else if (pool->cpu < 0) {
    4750                 :          0 :                                 restore_unbound_workers_cpumask(pool, cpu);
    4751                 :            :                         }
    4752                 :            : 
    4753                 :          0 :                         mutex_unlock(&pool->manager_mutex);
    4754                 :            :                 }
    4755                 :            : 
    4756                 :            :                 /* update NUMA affinity of unbound workqueues */
    4757         [ #  # ]:          0 :                 list_for_each_entry(wq, &workqueues, list)
    4758                 :          0 :                         wq_update_unbound_numa(wq, cpu, true);
    4759                 :            : 
    4760                 :          0 :                 mutex_unlock(&wq_pool_mutex);
    4761                 :          0 :                 break;
    4762                 :            :         }
    4763                 :            :         return NOTIFY_OK;
    4764                 :            : }
    4765                 :            : 
    4766                 :            : /*
    4767                 :            :  * Workqueues should be brought down after normal priority CPU notifiers.
    4768                 :            :  * This will be registered as low priority CPU notifier.
    4769                 :            :  */
    4770                 :          0 : static int workqueue_cpu_down_callback(struct notifier_block *nfb,
    4771                 :            :                                                  unsigned long action,
    4772                 :            :                                                  void *hcpu)
    4773                 :            : {
    4774                 :          0 :         int cpu = (unsigned long)hcpu;
    4775                 :            :         struct work_struct unbind_work;
    4776                 :            :         struct workqueue_struct *wq;
    4777                 :            : 
    4778         [ #  # ]:          0 :         switch (action & ~CPU_TASKS_FROZEN) {
    4779                 :            :         case CPU_DOWN_PREPARE:
    4780                 :            :                 /* unbinding per-cpu workers should happen on the local CPU */
    4781                 :          0 :                 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
    4782                 :          0 :                 queue_work_on(cpu, system_highpri_wq, &unbind_work);
    4783                 :            : 
    4784                 :            :                 /* update NUMA affinity of unbound workqueues */
    4785                 :          0 :                 mutex_lock(&wq_pool_mutex);
    4786         [ #  # ]:          0 :                 list_for_each_entry(wq, &workqueues, list)
    4787                 :          0 :                         wq_update_unbound_numa(wq, cpu, false);
    4788                 :          0 :                 mutex_unlock(&wq_pool_mutex);
    4789                 :            : 
    4790                 :            :                 /* wait for per-cpu unbinding to finish */
    4791                 :          0 :                 flush_work(&unbind_work);
    4792                 :          0 :                 break;
    4793                 :            :         }
    4794                 :          0 :         return NOTIFY_OK;
    4795                 :            : }
    4796                 :            : 
    4797                 :            : #ifdef CONFIG_SMP
    4798                 :            : 
    4799                 :            : struct work_for_cpu {
    4800                 :            :         struct work_struct work;
    4801                 :            :         long (*fn)(void *);
    4802                 :            :         void *arg;
    4803                 :            :         long ret;
    4804                 :            : };
    4805                 :            : 
    4806                 :          0 : static void work_for_cpu_fn(struct work_struct *work)
    4807                 :            : {
    4808                 :            :         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
    4809                 :            : 
    4810                 :          0 :         wfc->ret = wfc->fn(wfc->arg);
    4811                 :          0 : }
    4812                 :            : 
    4813                 :            : /**
    4814                 :            :  * work_on_cpu - run a function in user context on a particular cpu
    4815                 :            :  * @cpu: the cpu to run on
    4816                 :            :  * @fn: the function to run
    4817                 :            :  * @arg: the function arg
    4818                 :            :  *
    4819                 :            :  * It is up to the caller to ensure that the cpu doesn't go offline.
    4820                 :            :  * The caller must not hold any locks which would prevent @fn from completing.
    4821                 :            :  *
    4822                 :            :  * Return: The value @fn returns.
    4823                 :            :  */
    4824                 :          0 : long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
    4825                 :            : {
    4826                 :          0 :         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
    4827                 :            : 
    4828                 :          0 :         INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
    4829                 :            :         schedule_work_on(cpu, &wfc.work);
    4830                 :          0 :         flush_work(&wfc.work);
    4831                 :          0 :         return wfc.ret;
    4832                 :            : }
    4833                 :            : EXPORT_SYMBOL_GPL(work_on_cpu);
    4834                 :            : #endif /* CONFIG_SMP */
    4835                 :            : 
    4836                 :            : #ifdef CONFIG_FREEZER
    4837                 :            : 
    4838                 :            : /**
    4839                 :            :  * freeze_workqueues_begin - begin freezing workqueues
    4840                 :            :  *
    4841                 :            :  * Start freezing workqueues.  After this function returns, all freezable
    4842                 :            :  * workqueues will queue new works to their delayed_works list instead of
    4843                 :            :  * pool->worklist.
    4844                 :            :  *
    4845                 :            :  * CONTEXT:
    4846                 :            :  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
    4847                 :            :  */
    4848                 :          0 : void freeze_workqueues_begin(void)
    4849                 :            : {
    4850                 :            :         struct worker_pool *pool;
    4851                 :            :         struct workqueue_struct *wq;
    4852                 :            :         struct pool_workqueue *pwq;
    4853                 :            :         int pi;
    4854                 :            : 
    4855                 :          0 :         mutex_lock(&wq_pool_mutex);
    4856                 :            : 
    4857 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(workqueue_freezing);
                 [ #  # ]
    4858                 :          0 :         workqueue_freezing = true;
    4859                 :            : 
    4860                 :            :         /* set FREEZING */
    4861         [ #  # ]:          0 :         for_each_pool(pool, pi) {
    4862                 :            :                 spin_lock_irq(&pool->lock);
    4863 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
                 [ #  # ]
    4864                 :          0 :                 pool->flags |= POOL_FREEZING;
    4865                 :            :                 spin_unlock_irq(&pool->lock);
    4866                 :            :         }
    4867                 :            : 
    4868         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    4869                 :          0 :                 mutex_lock(&wq->mutex);
    4870         [ #  # ]:          0 :                 for_each_pwq(pwq, wq)
    4871                 :          0 :                         pwq_adjust_max_active(pwq);
    4872                 :          0 :                 mutex_unlock(&wq->mutex);
    4873                 :            :         }
    4874                 :            : 
    4875                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4876                 :          0 : }
    4877                 :            : 
    4878                 :            : /**
    4879                 :            :  * freeze_workqueues_busy - are freezable workqueues still busy?
    4880                 :            :  *
    4881                 :            :  * Check whether freezing is complete.  This function must be called
    4882                 :            :  * between freeze_workqueues_begin() and thaw_workqueues().
    4883                 :            :  *
    4884                 :            :  * CONTEXT:
    4885                 :            :  * Grabs and releases wq_pool_mutex.
    4886                 :            :  *
    4887                 :            :  * Return:
    4888                 :            :  * %true if some freezable workqueues are still busy.  %false if freezing
    4889                 :            :  * is complete.
    4890                 :            :  */
    4891                 :          0 : bool freeze_workqueues_busy(void)
    4892                 :            : {
    4893                 :            :         bool busy = false;
    4894                 :            :         struct workqueue_struct *wq;
    4895                 :            :         struct pool_workqueue *pwq;
    4896                 :            : 
    4897                 :          0 :         mutex_lock(&wq_pool_mutex);
    4898                 :            : 
    4899 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(!workqueue_freezing);
                 [ #  # ]
    4900                 :            : 
    4901         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    4902         [ #  # ]:          0 :                 if (!(wq->flags & WQ_FREEZABLE))
    4903                 :          0 :                         continue;
    4904                 :            :                 /*
    4905                 :            :                  * nr_active is monotonically decreasing.  It's safe
    4906                 :            :                  * to peek without lock.
    4907                 :            :                  */
    4908                 :            :                 rcu_read_lock_sched();
    4909         [ #  # ]:          0 :                 for_each_pwq(pwq, wq) {
    4910 [ #  # ][ #  # ]:          0 :                         WARN_ON_ONCE(pwq->nr_active < 0);
                 [ #  # ]
    4911         [ #  # ]:          0 :                         if (pwq->nr_active) {
    4912                 :            :                                 busy = true;
    4913                 :            :                                 rcu_read_unlock_sched();
    4914                 :            :                                 goto out_unlock;
    4915                 :            :                         }
    4916                 :            :                 }
    4917                 :            :                 rcu_read_unlock_sched();
    4918                 :            :         }
    4919                 :            : out_unlock:
    4920                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4921                 :          0 :         return busy;
    4922                 :            : }
    4923                 :            : 
    4924                 :            : /**
    4925                 :            :  * thaw_workqueues - thaw workqueues
    4926                 :            :  *
    4927                 :            :  * Thaw workqueues.  Normal queueing is restored and all collected
    4928                 :            :  * frozen works are transferred to their respective pool worklists.
    4929                 :            :  *
    4930                 :            :  * CONTEXT:
    4931                 :            :  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
    4932                 :            :  */
    4933                 :          0 : void thaw_workqueues(void)
    4934                 :            : {
    4935                 :            :         struct workqueue_struct *wq;
    4936                 :            :         struct pool_workqueue *pwq;
    4937                 :            :         struct worker_pool *pool;
    4938                 :            :         int pi;
    4939                 :            : 
    4940                 :          0 :         mutex_lock(&wq_pool_mutex);
    4941                 :            : 
    4942         [ #  # ]:          0 :         if (!workqueue_freezing)
    4943                 :            :                 goto out_unlock;
    4944                 :            : 
    4945                 :            :         /* clear FREEZING */
    4946         [ #  # ]:          0 :         for_each_pool(pool, pi) {
    4947                 :            :                 spin_lock_irq(&pool->lock);
    4948 [ #  # ][ #  # ]:          0 :                 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
                 [ #  # ]
    4949                 :          0 :                 pool->flags &= ~POOL_FREEZING;
    4950                 :            :                 spin_unlock_irq(&pool->lock);
    4951                 :            :         }
    4952                 :            : 
    4953                 :            :         /* restore max_active and repopulate worklist */
    4954         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    4955                 :          0 :                 mutex_lock(&wq->mutex);
    4956         [ #  # ]:          0 :                 for_each_pwq(pwq, wq)
    4957                 :          0 :                         pwq_adjust_max_active(pwq);
    4958                 :          0 :                 mutex_unlock(&wq->mutex);
    4959                 :            :         }
    4960                 :            : 
    4961                 :          0 :         workqueue_freezing = false;
    4962                 :            : out_unlock:
    4963                 :          0 :         mutex_unlock(&wq_pool_mutex);
    4964                 :          0 : }
    4965                 :            : #endif /* CONFIG_FREEZER */
    4966                 :            : 
    4967                 :          0 : static void __init wq_numa_init(void)
    4968                 :            : {
    4969                 :            :         cpumask_var_t *tbl;
    4970                 :            :         int node, cpu;
    4971                 :            : 
    4972                 :            :         /* determine NUMA pwq table len - highest node id + 1 */
    4973         [ #  # ]:          0 :         for_each_node(node)
    4974                 :          0 :                 wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
    4975                 :            : 
    4976                 :            :         if (num_possible_nodes() <= 1)
    4977                 :            :                 return;
    4978                 :            : 
    4979                 :            :         if (wq_disable_numa) {
    4980                 :            :                 pr_info("workqueue: NUMA affinity support disabled\n");
    4981                 :            :                 return;
    4982                 :            :         }
    4983                 :            : 
    4984                 :            :         wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
    4985                 :            :         BUG_ON(!wq_update_unbound_numa_attrs_buf);
    4986                 :            : 
    4987                 :            :         /*
    4988                 :            :          * We want masks of possible CPUs of each node which isn't readily
    4989                 :            :          * available.  Build one from cpu_to_node() which should have been
    4990                 :            :          * fully initialized by now.
    4991                 :            :          */
    4992                 :            :         tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
    4993                 :            :         BUG_ON(!tbl);
    4994                 :            : 
    4995                 :            :         for_each_node(node)
    4996                 :            :                 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
    4997                 :            :                                 node_online(node) ? node : NUMA_NO_NODE));
    4998                 :            : 
    4999                 :            :         for_each_possible_cpu(cpu) {
    5000                 :            :                 node = cpu_to_node(cpu);
    5001                 :            :                 if (WARN_ON(node == NUMA_NO_NODE)) {
    5002                 :            :                         pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
    5003                 :            :                         /* happens iff arch is bonkers, let's just proceed */
    5004                 :            :                         return;
    5005                 :            :                 }
    5006                 :            :                 cpumask_set_cpu(cpu, tbl[node]);
    5007                 :            :         }
    5008                 :            : 
    5009                 :            :         wq_numa_possible_cpumask = tbl;
    5010                 :            :         wq_numa_enabled = true;
    5011                 :            : }
    5012                 :            : 
    5013                 :          0 : static int __init init_workqueues(void)
    5014                 :            : {
    5015                 :          0 :         int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
    5016                 :            :         int i, cpu;
    5017                 :            : 
    5018                 :            :         WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
    5019                 :            : 
    5020                 :          0 :         pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
    5021                 :            : 
    5022                 :          0 :         cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
    5023                 :          0 :         hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
    5024                 :            : 
    5025                 :          0 :         wq_numa_init();
    5026                 :            : 
    5027                 :            :         /* initialize CPU pools */
    5028         [ #  # ]:          0 :         for_each_possible_cpu(cpu) {
    5029                 :            :                 struct worker_pool *pool;
    5030                 :            : 
    5031                 :            :                 i = 0;
    5032         [ #  # ]:          0 :                 for_each_cpu_worker_pool(pool, cpu) {
    5033         [ #  # ]:          0 :                         BUG_ON(init_worker_pool(pool));
    5034                 :          0 :                         pool->cpu = cpu;
    5035                 :          0 :                         cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
    5036                 :          0 :                         pool->attrs->nice = std_nice[i++];
    5037                 :          0 :                         pool->node = cpu_to_node(cpu);
    5038                 :            : 
    5039                 :            :                         /* alloc pool ID */
    5040                 :          0 :                         mutex_lock(&wq_pool_mutex);
    5041         [ #  # ]:          0 :                         BUG_ON(worker_pool_assign_id(pool));
    5042                 :          0 :                         mutex_unlock(&wq_pool_mutex);
    5043                 :            :                 }
    5044                 :            :         }
    5045                 :            : 
    5046                 :            :         /* create the initial worker */
    5047         [ #  # ]:          0 :         for_each_online_cpu(cpu) {
    5048                 :            :                 struct worker_pool *pool;
    5049                 :            : 
    5050         [ #  # ]:          0 :                 for_each_cpu_worker_pool(pool, cpu) {
    5051                 :          0 :                         pool->flags &= ~POOL_DISASSOCIATED;
    5052         [ #  # ]:          0 :                         BUG_ON(create_and_start_worker(pool) < 0);
    5053                 :            :                 }
    5054                 :            :         }
    5055                 :            : 
    5056                 :            :         /* create default unbound and ordered wq attrs */
    5057         [ #  # ]:          0 :         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
    5058                 :            :                 struct workqueue_attrs *attrs;
    5059                 :            : 
    5060         [ #  # ]:          0 :                 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
    5061                 :          0 :                 attrs->nice = std_nice[i];
    5062                 :          0 :                 unbound_std_wq_attrs[i] = attrs;
    5063                 :            : 
    5064                 :            :                 /*
    5065                 :            :                  * An ordered wq should have only one pwq as ordering is
    5066                 :            :                  * guaranteed by max_active which is enforced by pwqs.
    5067                 :            :                  * Turn off NUMA so that dfl_pwq is used for all nodes.
    5068                 :            :                  */
    5069         [ #  # ]:          0 :                 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
    5070                 :          0 :                 attrs->nice = std_nice[i];
    5071                 :          0 :                 attrs->no_numa = true;
    5072                 :          0 :                 ordered_wq_attrs[i] = attrs;
    5073                 :            :         }
    5074                 :            : 
    5075                 :          0 :         system_wq = alloc_workqueue("events", 0, 0);
    5076                 :          0 :         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
    5077                 :          0 :         system_long_wq = alloc_workqueue("events_long", 0, 0);
    5078                 :          0 :         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
    5079                 :            :                                             WQ_UNBOUND_MAX_ACTIVE);
    5080                 :          0 :         system_freezable_wq = alloc_workqueue("events_freezable",
    5081                 :            :                                               WQ_FREEZABLE, 0);
    5082                 :          0 :         system_power_efficient_wq = alloc_workqueue("events_power_efficient",
    5083                 :            :                                               WQ_POWER_EFFICIENT, 0);
    5084                 :          0 :         system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
    5085                 :            :                                               WQ_FREEZABLE | WQ_POWER_EFFICIENT,
    5086                 :            :                                               0);
    5087 [ #  # ][ #  # ]:          0 :         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5088                 :            :                !system_unbound_wq || !system_freezable_wq ||
    5089                 :            :                !system_power_efficient_wq ||
    5090                 :            :                !system_freezable_power_efficient_wq);
    5091                 :          0 :         return 0;
    5092                 :            : }
    5093                 :            : early_initcall(init_workqueues);

Generated by: LCOV version 1.9