Branch data Line data Source code
1 : : /*
2 : : * Generic helpers for smp ipi calls
3 : : *
4 : : * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 : : */
6 : : #include <linux/rcupdate.h>
7 : : #include <linux/rculist.h>
8 : : #include <linux/kernel.h>
9 : : #include <linux/export.h>
10 : : #include <linux/percpu.h>
11 : : #include <linux/init.h>
12 : : #include <linux/gfp.h>
13 : : #include <linux/smp.h>
14 : : #include <linux/cpu.h>
15 : :
16 : : #include "smpboot.h"
17 : :
18 : : enum {
19 : : CSD_FLAG_LOCK = 0x01,
20 : : CSD_FLAG_WAIT = 0x02,
21 : : };
22 : :
23 : : struct call_function_data {
24 : : struct call_single_data __percpu *csd;
25 : : cpumask_var_t cpumask;
26 : : };
27 : :
28 : : static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29 : :
30 : : static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
31 : :
32 : : static int
33 : 0 : hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34 : : {
35 : 555 : long cpu = (long)hcpu;
36 [ + + ][ + + ]: 555 : struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
[ + + ]
37 : :
38 : : switch (action) {
39 : : case CPU_UP_PREPARE:
40 : : case CPU_UP_PREPARE_FROZEN:
41 : : if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
42 : : cpu_to_node(cpu)))
43 : : return notifier_from_errno(-ENOMEM);
44 : 81 : cfd->csd = alloc_percpu(struct call_single_data);
45 [ + - ]: 81 : if (!cfd->csd) {
46 : : free_cpumask_var(cfd->cpumask);
47 : : return notifier_from_errno(-ENOMEM);
48 : : }
49 : : break;
50 : :
51 : : #ifdef CONFIG_HOTPLUG_CPU
52 : : case CPU_UP_CANCELED:
53 : : case CPU_UP_CANCELED_FROZEN:
54 : :
55 : : case CPU_DEAD:
56 : : case CPU_DEAD_FROZEN:
57 : : free_cpumask_var(cfd->cpumask);
58 : 78 : free_percpu(cfd->csd);
59 : 159 : break;
60 : : #endif
61 : : };
62 : :
63 : : return NOTIFY_OK;
64 : : }
65 : :
66 : : static struct notifier_block hotplug_cfd_notifier = {
67 : : .notifier_call = hotplug_cfd,
68 : : };
69 : :
70 : 0 : void __init call_function_init(void)
71 : : {
72 : 0 : void *cpu = (void *)(long)smp_processor_id();
73 : : int i;
74 : :
75 [ # # ]: 0 : for_each_possible_cpu(i)
76 : 0 : init_llist_head(&per_cpu(call_single_queue, i));
77 : :
78 : 0 : hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
79 : 0 : register_cpu_notifier(&hotplug_cfd_notifier);
80 : 0 : }
81 : :
82 : : /*
83 : : * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
84 : : *
85 : : * For non-synchronous ipi calls the csd can still be in use by the
86 : : * previous function call. For multi-cpu calls its even more interesting
87 : : * as we'll have to ensure no other cpu is observing our csd.
88 : : */
89 : : static void csd_lock_wait(struct call_single_data *csd)
90 : : {
91 [ - + ][ + + ]: 15251543 : while (csd->flags & CSD_FLAG_LOCK)
[ - + ][ - + ]
[ + + ]
92 : 15126575 : cpu_relax();
93 : : }
94 : :
95 : : static void csd_lock(struct call_single_data *csd)
96 : : {
97 : : csd_lock_wait(csd);
98 : 124081 : csd->flags |= CSD_FLAG_LOCK;
99 : :
100 : : /*
101 : : * prevent CPU from reordering the above assignment
102 : : * to ->flags with any subsequent assignments to other
103 : : * fields of the specified call_single_data structure:
104 : : */
105 : 124081 : smp_mb();
106 : : }
107 : :
108 : 0 : static void csd_unlock(struct call_single_data *csd)
109 : : {
110 [ - + ]: 124078 : WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
111 : :
112 : : /*
113 : : * ensure we're all done before releasing data:
114 : : */
115 : 124078 : smp_mb();
116 : :
117 : 124077 : csd->flags &= ~CSD_FLAG_LOCK;
118 : 124077 : }
119 : :
120 : : /*
121 : : * Insert a previously allocated call_single_data element
122 : : * for execution on the given CPU. data must already have
123 : : * ->func, ->info, and ->flags set.
124 : : */
125 : 0 : static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
126 : : {
127 [ + + ]: 123881 : if (wait)
128 : 623 : csd->flags |= CSD_FLAG_WAIT;
129 : :
130 : : /*
131 : : * The list addition should be visible before sending the IPI
132 : : * handler locks the list to pull the entry off it because of
133 : : * normal cache coherency rules implied by spinlocks.
134 : : *
135 : : * If IPIs can go out of order to the cache coherency protocol
136 : : * in an architecture, sufficient synchronisation should be added
137 : : * to arch code to make it appear to obey cache coherency WRT
138 : : * locking and barrier primitives. Generic code isn't really
139 : : * equipped to do the right thing...
140 : : */
141 [ + + ]: 123881 : if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
142 : 123880 : arch_send_call_function_single_ipi(cpu);
143 : :
144 [ + + ]: 247762 : if (wait)
145 : : csd_lock_wait(csd);
146 : 123881 : }
147 : :
148 : : /*
149 : : * Invoked by arch to handle an IPI for call function single. Must be
150 : : * called from the arch with interrupts disabled.
151 : : */
152 : 0 : void generic_smp_call_function_single_interrupt(void)
153 : : {
154 : : struct llist_node *entry, *next;
155 : :
156 : : /*
157 : : * Shouldn't receive this interrupt on a cpu that is not yet online.
158 : : */
159 [ - + ][ # # ]: 124072 : WARN_ON_ONCE(!cpu_online(smp_processor_id()));
[ # # ]
160 : :
161 : 248144 : entry = llist_del_all(&__get_cpu_var(call_single_queue));
162 : 124071 : entry = llist_reverse_order(entry);
163 : :
164 [ + + ]: 248155 : while (entry) {
165 : : struct call_single_data *csd;
166 : :
167 : 124075 : next = entry->next;
168 : :
169 : : csd = llist_entry(entry, struct call_single_data, llist);
170 : 124075 : csd->func(csd->info);
171 : 124078 : csd_unlock(csd);
172 : :
173 : : entry = next;
174 : : }
175 : 124080 : }
176 : :
177 : : static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
178 : :
179 : : /*
180 : : * smp_call_function_single - Run a function on a specific CPU
181 : : * @func: The function to run. This must be fast and non-blocking.
182 : : * @info: An arbitrary pointer to pass to the function.
183 : : * @wait: If true, wait until function has completed on other CPUs.
184 : : *
185 : : * Returns 0 on success, else a negative status code.
186 : : */
187 : 0 : int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
188 : : int wait)
189 : : {
190 : 783 : struct call_single_data d = {
191 : : .flags = 0,
192 : : };
193 : : unsigned long flags;
194 : : int this_cpu;
195 : : int err = 0;
196 : :
197 : : /*
198 : : * prevent preemption and reschedule on another processor,
199 : : * as well as CPU removal
200 : : */
201 : 783 : this_cpu = get_cpu();
202 : :
203 : : /*
204 : : * Can deadlock when called with interrupts disabled.
205 : : * We allow cpu's that are not yet online though, as no one else can
206 : : * send smp call function interrupt to this cpu and as such deadlocks
207 : : * can't happen.
208 : : */
209 [ + - - + ]: 1566 : WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
[ # # ][ - + ]
[ # # ][ # # ]
210 : : && !oops_in_progress);
211 : :
212 [ + + ]: 783 : if (cpu == this_cpu) {
213 : : local_irq_save(flags);
214 : 160 : func(info);
215 [ - + ]: 160 : local_irq_restore(flags);
216 : : } else {
217 [ + - ][ + - ]: 1246 : if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
218 : : struct call_single_data *csd = &d;
219 : :
220 [ - + ]: 623 : if (!wait)
221 : 623 : csd = &__get_cpu_var(csd_data);
222 : :
223 : : csd_lock(csd);
224 : :
225 : 623 : csd->func = func;
226 : 623 : csd->info = info;
227 : 623 : generic_exec_single(cpu, csd, wait);
228 : : } else {
229 : : err = -ENXIO; /* CPU not online */
230 : : }
231 : : }
232 : :
233 : 783 : put_cpu();
234 : :
235 : 783 : return err;
236 : : }
237 : : EXPORT_SYMBOL(smp_call_function_single);
238 : :
239 : : /*
240 : : * smp_call_function_any - Run a function on any of the given cpus
241 : : * @mask: The mask of cpus it can run on.
242 : : * @func: The function to run. This must be fast and non-blocking.
243 : : * @info: An arbitrary pointer to pass to the function.
244 : : * @wait: If true, wait until function has completed.
245 : : *
246 : : * Returns 0 on success, else a negative status code (if no cpus were online).
247 : : *
248 : : * Selection preference:
249 : : * 1) current cpu if in @mask
250 : : * 2) any cpu of current node if in @mask
251 : : * 3) any other online cpu in @mask
252 : : */
253 : 0 : int smp_call_function_any(const struct cpumask *mask,
254 : : smp_call_func_t func, void *info, int wait)
255 : : {
256 : : unsigned int cpu;
257 : : const struct cpumask *nodemask;
258 : : int ret;
259 : :
260 : : /* Try for same CPU (cheapest) */
261 : 0 : cpu = get_cpu();
262 [ # # ]: 0 : if (cpumask_test_cpu(cpu, mask))
263 : : goto call;
264 : :
265 : : /* Try for same node. */
266 : 0 : nodemask = cpumask_of_node(cpu_to_node(cpu));
267 [ # # ]: 0 : for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
268 : 0 : cpu = cpumask_next_and(cpu, nodemask, mask)) {
269 [ # # ]: 0 : if (cpu_online(cpu))
270 : : goto call;
271 : : }
272 : :
273 : : /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
274 : 0 : cpu = cpumask_any_and(mask, cpu_online_mask);
275 : : call:
276 : 0 : ret = smp_call_function_single(cpu, func, info, wait);
277 : 0 : put_cpu();
278 : 0 : return ret;
279 : : }
280 : : EXPORT_SYMBOL_GPL(smp_call_function_any);
281 : :
282 : : /**
283 : : * __smp_call_function_single(): Run a function on a specific CPU
284 : : * @cpu: The CPU to run on.
285 : : * @data: Pre-allocated and setup data structure
286 : : * @wait: If true, wait until function has completed on specified CPU.
287 : : *
288 : : * Like smp_call_function_single(), but allow caller to pass in a
289 : : * pre-allocated data structure. Useful for embedding @data inside
290 : : * other structures, for instance.
291 : : */
292 : 0 : void __smp_call_function_single(int cpu, struct call_single_data *csd,
293 : : int wait)
294 : : {
295 : : unsigned int this_cpu;
296 : : unsigned long flags;
297 : :
298 : 123258 : this_cpu = get_cpu();
299 : : /*
300 : : * Can deadlock when called with interrupts disabled.
301 : : * We allow cpu's that are not yet online though, as no one else can
302 : : * send smp call function interrupt to this cpu and as such deadlocks
303 : : * can't happen.
304 : : */
305 [ + - ]: 123258 : WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
[ - + # # ]
[ # # ][ - + ]
[ # # ][ - - ]
306 : : && !oops_in_progress);
307 : :
308 [ + - ]: 123258 : if (cpu == this_cpu) {
309 : : local_irq_save(flags);
310 : 0 : csd->func(csd->info);
311 [ # # ]: 0 : local_irq_restore(flags);
312 : : } else {
313 : : csd_lock(csd);
314 : 123258 : generic_exec_single(cpu, csd, wait);
315 : : }
316 : 123258 : put_cpu();
317 : 123258 : }
318 : : EXPORT_SYMBOL_GPL(__smp_call_function_single);
319 : :
320 : : /**
321 : : * smp_call_function_many(): Run a function on a set of other CPUs.
322 : : * @mask: The set of cpus to run on (only runs on online subset).
323 : : * @func: The function to run. This must be fast and non-blocking.
324 : : * @info: An arbitrary pointer to pass to the function.
325 : : * @wait: If true, wait (atomically) until function has completed
326 : : * on other CPUs.
327 : : *
328 : : * If @wait is true, then returns once @func has returned.
329 : : *
330 : : * You must not call this function with disabled interrupts or from a
331 : : * hardware interrupt handler or from a bottom half handler. Preemption
332 : : * must be disabled when calling this function.
333 : : */
334 : 0 : void smp_call_function_many(const struct cpumask *mask,
335 : : smp_call_func_t func, void *info, bool wait)
336 : : {
337 : : struct call_function_data *cfd;
338 : 4981 : int cpu, next_cpu, this_cpu = smp_processor_id();
339 : :
340 : : /*
341 : : * Can deadlock when called with interrupts disabled.
342 : : * We allow cpu's that are not yet online though, as no one else can
343 : : * send smp call function interrupt to this cpu and as such deadlocks
344 : : * can't happen.
345 : : */
346 [ + - - + ]: 9962 : WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
[ # # ][ # # ]
[ - + ][ # # ]
[ # # ]
347 : : && !oops_in_progress && !early_boot_irqs_disabled);
348 : :
349 : : /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
350 : 4981 : cpu = cpumask_first_and(mask, cpu_online_mask);
351 [ + + ]: 4981 : if (cpu == this_cpu)
352 : 208 : cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
353 : :
354 : : /* No online cpus? We're done. */
355 [ + + ]: 4981 : if (cpu >= nr_cpu_ids)
356 : : return;
357 : :
358 : : /* Do we have another CPU which isn't us? */
359 : 142 : next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
360 [ + + ]: 142 : if (next_cpu == this_cpu)
361 : 24 : next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
362 : :
363 : : /* Fastpath: do that cpu by itself. */
364 [ + + ]: 142 : if (next_cpu >= nr_cpu_ids) {
365 : 78 : smp_call_function_single(cpu, func, info, wait);
366 : 78 : return;
367 : : }
368 : :
369 : 128 : cfd = &__get_cpu_var(cfd_data);
370 : :
371 : : cpumask_and(cfd->cpumask, mask, cpu_online_mask);
372 : : cpumask_clear_cpu(this_cpu, cfd->cpumask);
373 : :
374 : : /* Some callers race with other cpus changing the passed mask */
375 [ + - ]: 64 : if (unlikely(!cpumask_weight(cfd->cpumask)))
376 : : return;
377 : :
378 [ + + ]: 264 : for_each_cpu(cpu, cfd->cpumask) {
379 : 200 : struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
380 : :
381 : : csd_lock(csd);
382 : 200 : csd->func = func;
383 : 200 : csd->info = info;
384 : 200 : llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
385 : : }
386 : :
387 : : /* Send a message to all CPUs in the map */
388 : 64 : arch_send_call_function_ipi_mask(cfd->cpumask);
389 : :
390 [ + - ]: 64 : if (wait) {
391 [ + + ]: 264 : for_each_cpu(cpu, cfd->cpumask) {
392 : : struct call_single_data *csd;
393 : :
394 : 200 : csd = per_cpu_ptr(cfd->csd, cpu);
395 : : csd_lock_wait(csd);
396 : : }
397 : : }
398 : : }
399 : : EXPORT_SYMBOL(smp_call_function_many);
400 : :
401 : : /**
402 : : * smp_call_function(): Run a function on all other CPUs.
403 : : * @func: The function to run. This must be fast and non-blocking.
404 : : * @info: An arbitrary pointer to pass to the function.
405 : : * @wait: If true, wait (atomically) until function has completed
406 : : * on other CPUs.
407 : : *
408 : : * Returns 0.
409 : : *
410 : : * If @wait is true, then returns once @func has returned; otherwise
411 : : * it returns just before the target cpu calls @func.
412 : : *
413 : : * You must not call this function with disabled interrupts or from a
414 : : * hardware interrupt handler or from a bottom half handler.
415 : : */
416 : 0 : int smp_call_function(smp_call_func_t func, void *info, int wait)
417 : : {
418 : 12 : preempt_disable();
419 : 12 : smp_call_function_many(cpu_online_mask, func, info, wait);
420 : 12 : preempt_enable();
421 : :
422 : 1 : return 0;
423 : : }
424 : : EXPORT_SYMBOL(smp_call_function);
425 : :
426 : : /* Setup configured maximum number of CPUs to activate */
427 : : unsigned int setup_max_cpus = NR_CPUS;
428 : : EXPORT_SYMBOL(setup_max_cpus);
429 : :
430 : :
431 : : /*
432 : : * Setup routine for controlling SMP activation
433 : : *
434 : : * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
435 : : * activation entirely (the MPS table probe still happens, though).
436 : : *
437 : : * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
438 : : * greater than 0, limits the maximum number of CPUs activated in
439 : : * SMP mode to <NUM>.
440 : : */
441 : :
442 : 0 : void __weak arch_disable_smp_support(void) { }
443 : :
444 : 0 : static int __init nosmp(char *str)
445 : : {
446 : 0 : setup_max_cpus = 0;
447 : 0 : arch_disable_smp_support();
448 : :
449 : 0 : return 0;
450 : : }
451 : :
452 : : early_param("nosmp", nosmp);
453 : :
454 : : /* this is hard limit */
455 : 0 : static int __init nrcpus(char *str)
456 : : {
457 : : int nr_cpus;
458 : :
459 : 0 : get_option(&str, &nr_cpus);
460 [ # # ][ # # ]: 0 : if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
461 : 0 : nr_cpu_ids = nr_cpus;
462 : :
463 : 0 : return 0;
464 : : }
465 : :
466 : : early_param("nr_cpus", nrcpus);
467 : :
468 : 0 : static int __init maxcpus(char *str)
469 : : {
470 : 0 : get_option(&str, &setup_max_cpus);
471 [ # # ]: 0 : if (setup_max_cpus == 0)
472 : 0 : arch_disable_smp_support();
473 : :
474 : 0 : return 0;
475 : : }
476 : :
477 : : early_param("maxcpus", maxcpus);
478 : :
479 : : /* Setup number of possible processor ids */
480 : : int nr_cpu_ids __read_mostly = NR_CPUS;
481 : : EXPORT_SYMBOL(nr_cpu_ids);
482 : :
483 : : /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
484 : 0 : void __init setup_nr_cpu_ids(void)
485 : : {
486 : 0 : nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
487 : 0 : }
488 : :
489 : 0 : void __weak smp_announce(void)
490 : : {
491 : 0 : printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
492 : 0 : }
493 : :
494 : : /* Called by boot processor to activate the rest. */
495 : 0 : void __init smp_init(void)
496 : : {
497 : : unsigned int cpu;
498 : :
499 : 0 : idle_threads_init();
500 : :
501 : : /* FIXME: This should be done in userspace --RR */
502 [ # # ]: 0 : for_each_present_cpu(cpu) {
503 [ # # ]: 0 : if (num_online_cpus() >= setup_max_cpus)
504 : : break;
505 [ # # ]: 0 : if (!cpu_online(cpu))
506 : 0 : cpu_up(cpu);
507 : : }
508 : :
509 : : /* Any cleanup work */
510 : 0 : smp_announce();
511 : 0 : smp_cpus_done(setup_max_cpus);
512 : 0 : }
513 : :
514 : : /*
515 : : * Call a function on all processors. May be used during early boot while
516 : : * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
517 : : * of local_irq_disable/enable().
518 : : */
519 : 0 : int on_each_cpu(void (*func) (void *info), void *info, int wait)
520 : : {
521 : : unsigned long flags;
522 : : int ret = 0;
523 : :
524 : 11 : preempt_disable();
525 : : ret = smp_call_function(func, info, wait);
526 : : local_irq_save(flags);
527 : 11 : func(info);
528 [ - + ]: 11 : local_irq_restore(flags);
529 : 11 : preempt_enable();
530 : 11 : return ret;
531 : : }
532 : : EXPORT_SYMBOL(on_each_cpu);
533 : :
534 : : /**
535 : : * on_each_cpu_mask(): Run a function on processors specified by
536 : : * cpumask, which may include the local processor.
537 : : * @mask: The set of cpus to run on (only runs on online subset).
538 : : * @func: The function to run. This must be fast and non-blocking.
539 : : * @info: An arbitrary pointer to pass to the function.
540 : : * @wait: If true, wait (atomically) until function has completed
541 : : * on other CPUs.
542 : : *
543 : : * If @wait is true, then returns once @func has returned.
544 : : *
545 : : * You must not call this function with disabled interrupts or from a
546 : : * hardware interrupt handler or from a bottom half handler. The
547 : : * exception is that it may be used during early boot while
548 : : * early_boot_irqs_disabled is set.
549 : : */
550 : 0 : void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
551 : : void *info, bool wait)
552 : : {
553 : 4969 : int cpu = get_cpu();
554 : :
555 : 4969 : smp_call_function_many(mask, func, info, wait);
556 [ + + ]: 4969 : if (cpumask_test_cpu(cpu, mask)) {
557 : : unsigned long flags;
558 : : local_irq_save(flags);
559 : 255 : func(info);
560 [ - + ]: 255 : local_irq_restore(flags);
561 : : }
562 : 4969 : put_cpu();
563 : 4969 : }
564 : : EXPORT_SYMBOL(on_each_cpu_mask);
565 : :
566 : : /*
567 : : * on_each_cpu_cond(): Call a function on each processor for which
568 : : * the supplied function cond_func returns true, optionally waiting
569 : : * for all the required CPUs to finish. This may include the local
570 : : * processor.
571 : : * @cond_func: A callback function that is passed a cpu id and
572 : : * the the info parameter. The function is called
573 : : * with preemption disabled. The function should
574 : : * return a blooean value indicating whether to IPI
575 : : * the specified CPU.
576 : : * @func: The function to run on all applicable CPUs.
577 : : * This must be fast and non-blocking.
578 : : * @info: An arbitrary pointer to pass to both functions.
579 : : * @wait: If true, wait (atomically) until function has
580 : : * completed on other CPUs.
581 : : * @gfp_flags: GFP flags to use when allocating the cpumask
582 : : * used internally by the function.
583 : : *
584 : : * The function might sleep if the GFP flags indicates a non
585 : : * atomic allocation is allowed.
586 : : *
587 : : * Preemption is disabled to protect against CPUs going offline but not online.
588 : : * CPUs going online during the call will not be seen or sent an IPI.
589 : : *
590 : : * You must not call this function with disabled interrupts or
591 : : * from a hardware interrupt handler or from a bottom half handler.
592 : : */
593 : 0 : void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
594 : : smp_call_func_t func, void *info, bool wait,
595 : : gfp_t gfp_flags)
596 : : {
597 : : cpumask_var_t cpus;
598 : : int cpu, ret;
599 : :
600 : : might_sleep_if(gfp_flags & __GFP_WAIT);
601 : :
602 : : if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
603 : 58 : preempt_disable();
604 [ + + ]: 406 : for_each_online_cpu(cpu)
605 [ + + ]: 290 : if (cond_func(cpu, info))
606 : : cpumask_set_cpu(cpu, cpus);
607 : 58 : on_each_cpu_mask(cpus, func, info, wait);
608 : 58 : preempt_enable();
609 : : free_cpumask_var(cpus);
610 : : } else {
611 : : /*
612 : : * No free cpumask, bother. No matter, we'll
613 : : * just have to IPI them one by one.
614 : : */
615 : : preempt_disable();
616 : : for_each_online_cpu(cpu)
617 : : if (cond_func(cpu, info)) {
618 : : ret = smp_call_function_single(cpu, func,
619 : : info, wait);
620 : : WARN_ON_ONCE(!ret);
621 : : }
622 : : preempt_enable();
623 : : }
624 : 58 : }
625 : : EXPORT_SYMBOL(on_each_cpu_cond);
626 : :
627 : 0 : static void do_nothing(void *unused)
628 : : {
629 : 0 : }
630 : :
631 : : /**
632 : : * kick_all_cpus_sync - Force all cpus out of idle
633 : : *
634 : : * Used to synchronize the update of pm_idle function pointer. It's
635 : : * called after the pointer is updated and returns after the dummy
636 : : * callback function has been executed on all cpus. The execution of
637 : : * the function can only happen on the remote cpus after they have
638 : : * left the idle function which had been called via pm_idle function
639 : : * pointer. So it's guaranteed that nothing uses the previous pointer
640 : : * anymore.
641 : : */
642 : 0 : void kick_all_cpus_sync(void)
643 : : {
644 : : /* Make sure the change is visible before we kick the cpus */
645 : 0 : smp_mb();
646 : : smp_call_function(do_nothing, NULL, 1);
647 : 0 : }
648 : : EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
|