Branch data Line data Source code
1 : : /*
2 : : * mm/percpu.c - percpu memory allocator
3 : : *
4 : : * Copyright (C) 2009 SUSE Linux Products GmbH
5 : : * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 : : *
7 : : * This file is released under the GPLv2.
8 : : *
9 : : * This is percpu allocator which can handle both static and dynamic
10 : : * areas. Percpu areas are allocated in chunks. Each chunk is
11 : : * consisted of boot-time determined number of units and the first
12 : : * chunk is used for static percpu variables in the kernel image
13 : : * (special boot time alloc/init handling necessary as these areas
14 : : * need to be brought up before allocation services are running).
15 : : * Unit grows as necessary and all units grow or shrink in unison.
16 : : * When a chunk is filled up, another chunk is allocated.
17 : : *
18 : : * c0 c1 c2
19 : : * ------------------- ------------------- ------------
20 : : * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 : : * ------------------- ...... ------------------- .... ------------
22 : : *
23 : : * Allocation is done in offset-size areas of single unit space. Ie,
24 : : * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 : : * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 : : * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 : : * Percpu access can be done by configuring percpu base registers
28 : : * according to cpu to unit mapping and pcpu_unit_size.
29 : : *
30 : : * There are usually many small percpu allocations many of them being
31 : : * as small as 4 bytes. The allocator organizes chunks into lists
32 : : * according to free size and tries to allocate from the fullest one.
33 : : * Each chunk keeps the maximum contiguous area size hint which is
34 : : * guaranteed to be equal to or larger than the maximum contiguous
35 : : * area in the chunk. This helps the allocator not to iterate the
36 : : * chunk maps unnecessarily.
37 : : *
38 : : * Allocation state in each chunk is kept using an array of integers
39 : : * on chunk->map. A positive value in the map represents a free
40 : : * region and negative allocated. Allocation inside a chunk is done
41 : : * by scanning this map sequentially and serving the first matching
42 : : * entry. This is mostly copied from the percpu_modalloc() allocator.
43 : : * Chunks can be determined from the address using the index field
44 : : * in the page struct. The index field contains a pointer to the chunk.
45 : : *
46 : : * To use this allocator, arch code should do the followings.
47 : : *
48 : : * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 : : * regular address to percpu pointer and back if they need to be
50 : : * different from the default
51 : : *
52 : : * - use pcpu_setup_first_chunk() during percpu area initialization to
53 : : * setup the first chunk containing the kernel static percpu area
54 : : */
55 : :
56 : : #include <linux/bitmap.h>
57 : : #include <linux/bootmem.h>
58 : : #include <linux/err.h>
59 : : #include <linux/list.h>
60 : : #include <linux/log2.h>
61 : : #include <linux/mm.h>
62 : : #include <linux/module.h>
63 : : #include <linux/mutex.h>
64 : : #include <linux/percpu.h>
65 : : #include <linux/pfn.h>
66 : : #include <linux/slab.h>
67 : : #include <linux/spinlock.h>
68 : : #include <linux/vmalloc.h>
69 : : #include <linux/workqueue.h>
70 : : #include <linux/kmemleak.h>
71 : :
72 : : #include <asm/cacheflush.h>
73 : : #include <asm/sections.h>
74 : : #include <asm/tlbflush.h>
75 : : #include <asm/io.h>
76 : :
77 : : #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78 : : #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79 : :
80 : : #ifdef CONFIG_SMP
81 : : /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82 : : #ifndef __addr_to_pcpu_ptr
83 : : #define __addr_to_pcpu_ptr(addr) \
84 : : (void __percpu *)((unsigned long)(addr) - \
85 : : (unsigned long)pcpu_base_addr + \
86 : : (unsigned long)__per_cpu_start)
87 : : #endif
88 : : #ifndef __pcpu_ptr_to_addr
89 : : #define __pcpu_ptr_to_addr(ptr) \
90 : : (void __force *)((unsigned long)(ptr) + \
91 : : (unsigned long)pcpu_base_addr - \
92 : : (unsigned long)__per_cpu_start)
93 : : #endif
94 : : #else /* CONFIG_SMP */
95 : : /* on UP, it's always identity mapped */
96 : : #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
97 : : #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
98 : : #endif /* CONFIG_SMP */
99 : :
100 : : struct pcpu_chunk {
101 : : struct list_head list; /* linked to pcpu_slot lists */
102 : : int free_size; /* free bytes in the chunk */
103 : : int contig_hint; /* max contiguous size hint */
104 : : void *base_addr; /* base address of this chunk */
105 : : int map_used; /* # of map entries used */
106 : : int map_alloc; /* # of map entries allocated */
107 : : int *map; /* allocation map */
108 : : void *data; /* chunk data */
109 : : bool immutable; /* no [de]population allowed */
110 : : unsigned long populated[]; /* populated bitmap */
111 : : };
112 : :
113 : : static int pcpu_unit_pages __read_mostly;
114 : : static int pcpu_unit_size __read_mostly;
115 : : static int pcpu_nr_units __read_mostly;
116 : : static int pcpu_atom_size __read_mostly;
117 : : static int pcpu_nr_slots __read_mostly;
118 : : static size_t pcpu_chunk_struct_size __read_mostly;
119 : :
120 : : /* cpus with the lowest and highest unit addresses */
121 : : static unsigned int pcpu_low_unit_cpu __read_mostly;
122 : : static unsigned int pcpu_high_unit_cpu __read_mostly;
123 : :
124 : : /* the address of the first chunk which starts with the kernel static area */
125 : : void *pcpu_base_addr __read_mostly;
126 : : EXPORT_SYMBOL_GPL(pcpu_base_addr);
127 : :
128 : : static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
129 : : const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
130 : :
131 : : /* group information, used for vm allocation */
132 : : static int pcpu_nr_groups __read_mostly;
133 : : static const unsigned long *pcpu_group_offsets __read_mostly;
134 : : static const size_t *pcpu_group_sizes __read_mostly;
135 : :
136 : : /*
137 : : * The first chunk which always exists. Note that unlike other
138 : : * chunks, this one can be allocated and mapped in several different
139 : : * ways and thus often doesn't live in the vmalloc area.
140 : : */
141 : : static struct pcpu_chunk *pcpu_first_chunk;
142 : :
143 : : /*
144 : : * Optional reserved chunk. This chunk reserves part of the first
145 : : * chunk and serves it for reserved allocations. The amount of
146 : : * reserved offset is in pcpu_reserved_chunk_limit. When reserved
147 : : * area doesn't exist, the following variables contain NULL and 0
148 : : * respectively.
149 : : */
150 : : static struct pcpu_chunk *pcpu_reserved_chunk;
151 : : static int pcpu_reserved_chunk_limit;
152 : :
153 : : /*
154 : : * Synchronization rules.
155 : : *
156 : : * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
157 : : * protects allocation/reclaim paths, chunks, populated bitmap and
158 : : * vmalloc mapping. The latter is a spinlock and protects the index
159 : : * data structures - chunk slots, chunks and area maps in chunks.
160 : : *
161 : : * During allocation, pcpu_alloc_mutex is kept locked all the time and
162 : : * pcpu_lock is grabbed and released as necessary. All actual memory
163 : : * allocations are done using GFP_KERNEL with pcpu_lock released. In
164 : : * general, percpu memory can't be allocated with irq off but
165 : : * irqsave/restore are still used in alloc path so that it can be used
166 : : * from early init path - sched_init() specifically.
167 : : *
168 : : * Free path accesses and alters only the index data structures, so it
169 : : * can be safely called from atomic context. When memory needs to be
170 : : * returned to the system, free path schedules reclaim_work which
171 : : * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
172 : : * reclaimed, release both locks and frees the chunks. Note that it's
173 : : * necessary to grab both locks to remove a chunk from circulation as
174 : : * allocation path might be referencing the chunk with only
175 : : * pcpu_alloc_mutex locked.
176 : : */
177 : : static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
178 : : static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
179 : :
180 : : static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
181 : :
182 : : /* reclaim work to release fully free chunks, scheduled from free path */
183 : : static void pcpu_reclaim(struct work_struct *work);
184 : : static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
185 : :
186 : : static bool pcpu_addr_in_first_chunk(void *addr)
187 : : {
188 : 4905 : void *first_start = pcpu_first_chunk->base_addr;
189 : :
190 [ + - ][ - + ]: 4905 : return addr >= first_start && addr < first_start + pcpu_unit_size;
191 : : }
192 : :
193 : : static bool pcpu_addr_in_reserved_chunk(void *addr)
194 : : {
195 : : void *first_start = pcpu_first_chunk->base_addr;
196 : :
197 [ + - ][ + - ]: 4905 : return addr >= first_start &&
198 : 4905 : addr < first_start + pcpu_reserved_chunk_limit;
199 : : }
200 : :
201 : 0 : static int __pcpu_size_to_slot(int size)
202 : : {
203 : : int highbit = fls(size); /* size is in bytes */
204 : 0 : return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
205 : : }
206 : :
207 : : static int pcpu_size_to_slot(int size)
208 : : {
209 [ - + ][ - + ]: 24537 : if (size == pcpu_unit_size)
210 : 0 : return pcpu_nr_slots - 1;
211 : 24537 : return __pcpu_size_to_slot(size);
212 : : }
213 : :
214 : 0 : static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
215 : : {
216 [ + - ][ + - ]: 19628 : if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
217 : : return 0;
218 : :
219 : 19628 : return pcpu_size_to_slot(chunk->free_size);
220 : : }
221 : :
222 : : /* set the pointer to a chunk in a page struct */
223 : : static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
224 : : {
225 : 0 : page->index = (unsigned long)pcpu;
226 : : }
227 : :
228 : : /* obtain pointer to a chunk from a page struct */
229 : : static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
230 : : {
231 : 0 : return (struct pcpu_chunk *)page->index;
232 : : }
233 : :
234 : : static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
235 : : {
236 : 0 : return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
237 : : }
238 : :
239 : : static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
240 : : unsigned int cpu, int page_idx)
241 : : {
242 : 24545 : return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
243 : 0 : (page_idx << PAGE_SHIFT);
244 : : }
245 : :
246 : 0 : static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
247 : : int *rs, int *re, int end)
248 : : {
249 : 0 : *rs = find_next_zero_bit(chunk->populated, end, *rs);
250 : 0 : *re = find_next_bit(chunk->populated, end, *rs + 1);
251 : 0 : }
252 : :
253 : 0 : static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
254 : : int *rs, int *re, int end)
255 : : {
256 : 4909 : *rs = find_next_bit(chunk->populated, end, *rs);
257 : 4909 : *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
258 : 4909 : }
259 : :
260 : : /*
261 : : * (Un)populated page region iterators. Iterate over (un)populated
262 : : * page regions between @start and @end in @chunk. @rs and @re should
263 : : * be integer variables and will be set to start and end page index of
264 : : * the current region.
265 : : */
266 : : #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
267 : : for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
268 : : (rs) < (re); \
269 : : (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
270 : :
271 : : #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
272 : : for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
273 : : (rs) < (re); \
274 : : (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
275 : :
276 : : /**
277 : : * pcpu_mem_zalloc - allocate memory
278 : : * @size: bytes to allocate
279 : : *
280 : : * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
281 : : * kzalloc() is used; otherwise, vzalloc() is used. The returned
282 : : * memory is always zeroed.
283 : : *
284 : : * CONTEXT:
285 : : * Does GFP_KERNEL allocation.
286 : : *
287 : : * RETURNS:
288 : : * Pointer to the allocated area on success, NULL on failure.
289 : : */
290 : 0 : static void *pcpu_mem_zalloc(size_t size)
291 : : {
292 [ # # ][ # # ]: 0 : if (WARN_ON_ONCE(!slab_is_available()))
[ # # ][ # # ]
293 : : return NULL;
294 : :
295 [ # # ]: 0 : if (size <= PAGE_SIZE)
296 : 0 : return kzalloc(size, GFP_KERNEL);
297 : : else
298 : 0 : return vzalloc(size);
299 : : }
300 : :
301 : : /**
302 : : * pcpu_mem_free - free memory
303 : : * @ptr: memory to free
304 : : * @size: size of the area
305 : : *
306 : : * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
307 : : */
308 : : static void pcpu_mem_free(void *ptr, size_t size)
309 : : {
310 [ # # ]: 0 : if (size <= PAGE_SIZE)
[ # # # # ]
[ # # ]
311 : 0 : kfree(ptr);
312 : : else
313 : 0 : vfree(ptr);
314 : : }
315 : :
316 : : /**
317 : : * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
318 : : * @chunk: chunk of interest
319 : : * @oslot: the previous slot it was on
320 : : *
321 : : * This function is called after an allocation or free changed @chunk.
322 : : * New slot according to the changed state is determined and @chunk is
323 : : * moved to the slot. Note that the reserved chunk is never put on
324 : : * chunk slots.
325 : : *
326 : : * CONTEXT:
327 : : * pcpu_lock.
328 : : */
329 : 0 : static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
330 : : {
331 : 9814 : int nslot = pcpu_chunk_slot(chunk);
332 : :
333 [ + - ][ + ]: 9814 : if (chunk != pcpu_reserved_chunk && oslot != nslot) {
334 [ + + ]: 9977 : if (oslot < nslot)
335 : 81 : list_move(&chunk->list, &pcpu_slot[nslot]);
336 : : else
337 : 82 : list_move_tail(&chunk->list, &pcpu_slot[nslot]);
338 : : }
339 : 0 : }
340 : :
341 : : /**
342 : : * pcpu_need_to_extend - determine whether chunk area map needs to be extended
343 : : * @chunk: chunk of interest
344 : : *
345 : : * Determine whether area map of @chunk needs to be extended to
346 : : * accommodate a new allocation.
347 : : *
348 : : * CONTEXT:
349 : : * pcpu_lock.
350 : : *
351 : : * RETURNS:
352 : : * New target map allocation length if extension is necessary, 0
353 : : * otherwise.
354 : : */
355 : : static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
356 : : {
357 : : int new_alloc;
358 : :
359 [ # # ][ - + ]: 4909 : if (chunk->map_alloc >= chunk->map_used + 2)
360 : : return 0;
361 : :
362 : : new_alloc = PCPU_DFL_MAP_ALLOC;
363 [ # # ][ # # ]: 0 : while (new_alloc < chunk->map_used + 2)
364 : 0 : new_alloc *= 2;
365 : :
366 : : return new_alloc;
367 : : }
368 : :
369 : : /**
370 : : * pcpu_extend_area_map - extend area map of a chunk
371 : : * @chunk: chunk of interest
372 : : * @new_alloc: new target allocation length of the area map
373 : : *
374 : : * Extend area map of @chunk to have @new_alloc entries.
375 : : *
376 : : * CONTEXT:
377 : : * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
378 : : *
379 : : * RETURNS:
380 : : * 0 on success, -errno on failure.
381 : : */
382 : 0 : static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
383 : : {
384 : : int *old = NULL, *new = NULL;
385 : 0 : size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
386 : : unsigned long flags;
387 : :
388 : 0 : new = pcpu_mem_zalloc(new_size);
389 [ # # ]: 0 : if (!new)
390 : : return -ENOMEM;
391 : :
392 : : /* acquire pcpu_lock and switch to new area map */
393 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
394 : :
395 [ # # ]: 0 : if (new_alloc <= chunk->map_alloc)
396 : : goto out_unlock;
397 : :
398 : 0 : old_size = chunk->map_alloc * sizeof(chunk->map[0]);
399 : 0 : old = chunk->map;
400 : :
401 : 0 : memcpy(new, old, old_size);
402 : :
403 : 0 : chunk->map_alloc = new_alloc;
404 : 0 : chunk->map = new;
405 : : new = NULL;
406 : :
407 : : out_unlock:
408 : : spin_unlock_irqrestore(&pcpu_lock, flags);
409 : :
410 : : /*
411 : : * pcpu_mem_free() might end up calling vfree() which uses
412 : : * IRQ-unsafe lock and thus can't be called under pcpu_lock.
413 : : */
414 : : pcpu_mem_free(old, old_size);
415 : : pcpu_mem_free(new, new_size);
416 : :
417 : : return 0;
418 : : }
419 : :
420 : : /**
421 : : * pcpu_split_block - split a map block
422 : : * @chunk: chunk of interest
423 : : * @i: index of map block to split
424 : : * @head: head size in bytes (can be 0)
425 : : * @tail: tail size in bytes (can be 0)
426 : : *
427 : : * Split the @i'th map block into two or three blocks. If @head is
428 : : * non-zero, @head bytes block is inserted before block @i moving it
429 : : * to @i+1 and reducing its size by @head bytes.
430 : : *
431 : : * If @tail is non-zero, the target block, which can be @i or @i+1
432 : : * depending on @head, is reduced by @tail bytes and @tail byte block
433 : : * is inserted after the target block.
434 : : *
435 : : * @chunk->map must have enough free slots to accommodate the split.
436 : : *
437 : : * CONTEXT:
438 : : * pcpu_lock.
439 : : */
440 : 0 : static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
441 : : int head, int tail)
442 : : {
443 : 4656 : int nr_extra = !!head + !!tail;
444 : :
445 [ - + ]: 4656 : BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
446 : :
447 : : /* insert new subblocks */
448 : 9312 : memmove(&chunk->map[i + nr_extra], &chunk->map[i],
449 : 4656 : sizeof(chunk->map[0]) * (chunk->map_used - i));
450 : 4656 : chunk->map_used += nr_extra;
451 : :
452 [ - + ]: 4656 : if (head) {
453 : 0 : chunk->map[i + 1] = chunk->map[i] - head;
454 : 0 : chunk->map[i++] = head;
455 : : }
456 [ + ]: 4656 : if (tail) {
457 : 4656 : chunk->map[i++] -= tail;
458 : 4656 : chunk->map[i] = tail;
459 : : }
460 : 0 : }
461 : :
462 : : /**
463 : : * pcpu_alloc_area - allocate area from a pcpu_chunk
464 : : * @chunk: chunk of interest
465 : : * @size: wanted size in bytes
466 : : * @align: wanted align
467 : : *
468 : : * Try to allocate @size bytes area aligned at @align from @chunk.
469 : : * Note that this function only allocates the offset. It doesn't
470 : : * populate or map the area.
471 : : *
472 : : * @chunk->map must have at least two free slots.
473 : : *
474 : : * CONTEXT:
475 : : * pcpu_lock.
476 : : *
477 : : * RETURNS:
478 : : * Allocated offset in @chunk on success, -1 if no matching area is
479 : : * found.
480 : : */
481 : 0 : static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
482 : : {
483 : 4909 : int oslot = pcpu_chunk_slot(chunk);
484 : : int max_contig = 0;
485 : : int i, off;
486 : :
487 [ + - ]: 3088940 : for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
488 : 3088940 : bool is_last = i + 1 == chunk->map_used;
489 : : int head, tail;
490 : :
491 : : /* extra for alignment requirement */
492 : 3088940 : head = ALIGN(off, align) - off;
493 [ + ]: 3088940 : BUG_ON(i == 0 && head != 0);
494 : :
495 [ + + ]: 3093849 : if (chunk->map[i] < 0)
496 : 3083969 : continue;
497 [ + + ]: 9880 : if (chunk->map[i] < head + size) {
498 : 62 : max_contig = max(chunk->map[i], max_contig);
499 : 62 : continue;
500 : : }
501 : :
502 : : /*
503 : : * If head is small or the previous block is free,
504 : : * merge'em. Note that 'small' is defined as smaller
505 : : * than sizeof(int), which is very small but isn't too
506 : : * uncommon for percpu allocations.
507 : : */
508 [ - + ][ # # ]: 4909 : if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
[ # # ]
509 [ # # ]: 0 : if (chunk->map[i - 1] > 0)
510 : 0 : chunk->map[i - 1] += head;
511 : : else {
512 : 0 : chunk->map[i - 1] -= head;
513 : 0 : chunk->free_size -= head;
514 : : }
515 : 0 : chunk->map[i] -= head;
516 : : off += head;
517 : : head = 0;
518 : : }
519 : :
520 : : /* if tail is small, just keep it around */
521 : 4909 : tail = chunk->map[i] - head - size;
522 [ + + ]: 4909 : if (tail < sizeof(int))
523 : : tail = 0;
524 : :
525 : : /* split if warranted */
526 [ + + ]: 4909 : if (head || tail) {
527 : 4656 : pcpu_split_block(chunk, i, head, tail);
528 [ - + ]: 4656 : if (head) {
529 : : i++;
530 : 0 : off += head;
531 : 0 : max_contig = max(chunk->map[i - 1], max_contig);
532 : : }
533 [ + - ]: 4656 : if (tail)
534 : 4656 : max_contig = max(chunk->map[i + 1], max_contig);
535 : : }
536 : :
537 : : /* update hint and mark allocated */
538 [ + + ]: 4909 : if (is_last)
539 : 4479 : chunk->contig_hint = max_contig; /* fully scanned */
540 : : else
541 : 430 : chunk->contig_hint = max(chunk->contig_hint,
542 : : max_contig);
543 : :
544 : 4909 : chunk->free_size -= chunk->map[i];
545 : 4909 : chunk->map[i] = -chunk->map[i];
546 : :
547 : 4909 : pcpu_chunk_relocate(chunk, oslot);
548 : 4909 : return off;
549 : : }
550 : :
551 : 0 : chunk->contig_hint = max_contig; /* fully scanned */
552 : 0 : pcpu_chunk_relocate(chunk, oslot);
553 : :
554 : : /* tell the upper layer that this chunk has no matching area */
555 : 0 : return -1;
556 : : }
557 : :
558 : : /**
559 : : * pcpu_free_area - free area to a pcpu_chunk
560 : : * @chunk: chunk of interest
561 : : * @freeme: offset of area to free
562 : : *
563 : : * Free area starting from @freeme to @chunk. Note that this function
564 : : * only modifies the allocation map. It doesn't depopulate or unmap
565 : : * the area.
566 : : *
567 : : * CONTEXT:
568 : : * pcpu_lock.
569 : : */
570 : 0 : static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
571 : : {
572 : 4905 : int oslot = pcpu_chunk_slot(chunk);
573 : : int i, off;
574 : :
575 [ + - ]: 3087767 : for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
576 [ + + ]: 3082862 : if (off == freeme)
577 : : break;
578 [ - + ]: 4905 : BUG_ON(off != freeme);
579 [ - + ]: 4905 : BUG_ON(chunk->map[i] > 0);
580 : :
581 : 4905 : chunk->map[i] = -chunk->map[i];
582 : 4905 : chunk->free_size += chunk->map[i];
583 : :
584 : : /* merge with previous? */
585 [ + - ][ + + ]: 4905 : if (i > 0 && chunk->map[i - 1] >= 0) {
586 : 1235 : chunk->map[i - 1] += chunk->map[i];
587 : 1235 : chunk->map_used--;
588 : 2470 : memmove(&chunk->map[i], &chunk->map[i + 1],
589 : 1235 : (chunk->map_used - i) * sizeof(chunk->map[0]));
590 : 1235 : i--;
591 : : }
592 : : /* merge with next? */
593 [ + - ][ + + ]: 4905 : if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
594 : 3418 : chunk->map[i] += chunk->map[i + 1];
595 : 3418 : chunk->map_used--;
596 : 3418 : memmove(&chunk->map[i + 1], &chunk->map[i + 2],
597 : 3418 : (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
598 : : }
599 : :
600 : 4905 : chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
601 : 4905 : pcpu_chunk_relocate(chunk, oslot);
602 : 4905 : }
603 : :
604 : 0 : static struct pcpu_chunk *pcpu_alloc_chunk(void)
605 : : {
606 : : struct pcpu_chunk *chunk;
607 : :
608 : 0 : chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
609 [ # # ]: 0 : if (!chunk)
610 : : return NULL;
611 : :
612 : 0 : chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
613 : : sizeof(chunk->map[0]));
614 [ # # ]: 0 : if (!chunk->map) {
615 : 0 : kfree(chunk);
616 : 0 : return NULL;
617 : : }
618 : :
619 : 0 : chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
620 : 0 : chunk->map[chunk->map_used++] = pcpu_unit_size;
621 : :
622 : 0 : INIT_LIST_HEAD(&chunk->list);
623 : 0 : chunk->free_size = pcpu_unit_size;
624 : 0 : chunk->contig_hint = pcpu_unit_size;
625 : :
626 : 0 : return chunk;
627 : : }
628 : :
629 : 0 : static void pcpu_free_chunk(struct pcpu_chunk *chunk)
630 : : {
631 [ # # ]: 0 : if (!chunk)
632 : 0 : return;
633 : 0 : pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
634 : 0 : pcpu_mem_free(chunk, pcpu_chunk_struct_size);
635 : : }
636 : :
637 : : /*
638 : : * Chunk management implementation.
639 : : *
640 : : * To allow different implementations, chunk alloc/free and
641 : : * [de]population are implemented in a separate file which is pulled
642 : : * into this file and compiled together. The following functions
643 : : * should be implemented.
644 : : *
645 : : * pcpu_populate_chunk - populate the specified range of a chunk
646 : : * pcpu_depopulate_chunk - depopulate the specified range of a chunk
647 : : * pcpu_create_chunk - create a new chunk
648 : : * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
649 : : * pcpu_addr_to_page - translate address to physical address
650 : : * pcpu_verify_alloc_info - check alloc_info is acceptable during init
651 : : */
652 : : static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
653 : : static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
654 : : static struct pcpu_chunk *pcpu_create_chunk(void);
655 : : static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
656 : : static struct page *pcpu_addr_to_page(void *addr);
657 : : static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
658 : :
659 : : #ifdef CONFIG_NEED_PER_CPU_KM
660 : : #include "percpu-km.c"
661 : : #else
662 : : #include "percpu-vm.c"
663 : : #endif
664 : :
665 : : /**
666 : : * pcpu_chunk_addr_search - determine chunk containing specified address
667 : : * @addr: address for which the chunk needs to be determined.
668 : : *
669 : : * RETURNS:
670 : : * The address of the found chunk.
671 : : */
672 : 0 : static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
673 : : {
674 : : /* is it in the first chunk? */
675 [ + - ]: 4905 : if (pcpu_addr_in_first_chunk(addr)) {
676 : : /* is it in the reserved area? */
677 [ - + ]: 4905 : if (pcpu_addr_in_reserved_chunk(addr))
678 : 0 : return pcpu_reserved_chunk;
679 : : return pcpu_first_chunk;
680 : : }
681 : :
682 : : /*
683 : : * The address is relative to unit0 which might be unused and
684 : : * thus unmapped. Offset the address to the unit space of the
685 : : * current processor before looking it up in the vmalloc
686 : : * space. Note that any possible cpu id can be used here, so
687 : : * there's no need to worry about preemption or cpu hotplug.
688 : : */
689 : 0 : addr += pcpu_unit_offsets[raw_smp_processor_id()];
690 : 0 : return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
691 : : }
692 : :
693 : : /**
694 : : * pcpu_alloc - the percpu allocator
695 : : * @size: size of area to allocate in bytes
696 : : * @align: alignment of area (max PAGE_SIZE)
697 : : * @reserved: allocate from the reserved chunk if available
698 : : *
699 : : * Allocate percpu area of @size bytes aligned at @align.
700 : : *
701 : : * CONTEXT:
702 : : * Does GFP_KERNEL allocation.
703 : : *
704 : : * RETURNS:
705 : : * Percpu pointer to the allocated area on success, NULL on failure.
706 : : */
707 : 0 : static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
708 : : {
709 : : static int warn_limit = 10;
710 : : struct pcpu_chunk *chunk;
711 : : const char *err;
712 : : int slot, off, new_alloc;
713 : : unsigned long flags;
714 : : void __percpu *ptr;
715 : :
716 [ + - ][ - + ]: 4909 : if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
717 : 0 : WARN(true, "illegal size (%zu) or align (%zu) for "
718 : : "percpu allocation\n", size, align);
719 : 0 : return NULL;
720 : : }
721 : :
722 : 4909 : mutex_lock(&pcpu_alloc_mutex);
723 : 4909 : spin_lock_irqsave(&pcpu_lock, flags);
724 : :
725 : : /* serve reserved allocations from the reserved chunk if available */
726 [ - + ][ # # ]: 4909 : if (reserved && pcpu_reserved_chunk) {
727 : : chunk = pcpu_reserved_chunk;
728 : :
729 [ # # ]: 0 : if (size > chunk->contig_hint) {
730 : : err = "alloc from reserved chunk failed";
731 : : goto fail_unlock;
732 : : }
733 : :
734 [ # # ]: 0 : while ((new_alloc = pcpu_need_to_extend(chunk))) {
735 : : spin_unlock_irqrestore(&pcpu_lock, flags);
736 [ # # ]: 0 : if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
737 : : err = "failed to extend area map of reserved chunk";
738 : : goto fail_unlock_mutex;
739 : : }
740 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
741 : : }
742 : :
743 : 0 : off = pcpu_alloc_area(chunk, size, align);
744 [ # # ]: 0 : if (off >= 0)
745 : : goto area_found;
746 : :
747 : : err = "alloc from reserved chunk failed";
748 : : goto fail_unlock;
749 : : }
750 : :
751 : : restart:
752 : : /* search through normal chunks */
753 [ + - ]: 44234 : for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
754 [ + + ]: 44234 : list_for_each_entry(chunk, &pcpu_slot[slot], list) {
755 [ - + ]: 4909 : if (size > chunk->contig_hint)
756 : 0 : continue;
757 : :
758 : : new_alloc = pcpu_need_to_extend(chunk);
759 [ - + ]: 4909 : if (new_alloc) {
760 : : spin_unlock_irqrestore(&pcpu_lock, flags);
761 [ # # ]: 0 : if (pcpu_extend_area_map(chunk,
762 : : new_alloc) < 0) {
763 : : err = "failed to extend area map";
764 : : goto fail_unlock_mutex;
765 : : }
766 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
767 : : /*
768 : : * pcpu_lock has been dropped, need to
769 : : * restart cpu_slot list walking.
770 : : */
771 : 0 : goto restart;
772 : : }
773 : :
774 : 4909 : off = pcpu_alloc_area(chunk, size, align);
775 [ - + ]: 4909 : if (off >= 0)
776 : : goto area_found;
777 : : }
778 : : }
779 : :
780 : : /* hmmm... no space left, create a new chunk */
781 : : spin_unlock_irqrestore(&pcpu_lock, flags);
782 : :
783 : 0 : chunk = pcpu_create_chunk();
784 [ # # ]: 0 : if (!chunk) {
785 : : err = "failed to allocate new chunk";
786 : : goto fail_unlock_mutex;
787 : : }
788 : :
789 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
790 : 0 : pcpu_chunk_relocate(chunk, -1);
791 : 0 : goto restart;
792 : :
793 : : area_found:
794 : : spin_unlock_irqrestore(&pcpu_lock, flags);
795 : :
796 : : /* populate, map and clear the area */
797 [ - + ]: 4909 : if (pcpu_populate_chunk(chunk, off, size)) {
798 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
799 : 0 : pcpu_free_area(chunk, off);
800 : : err = "failed to populate";
801 : 0 : goto fail_unlock;
802 : : }
803 : :
804 : 4909 : mutex_unlock(&pcpu_alloc_mutex);
805 : :
806 : : /* return address relative to base address */
807 : 4909 : ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
808 : : kmemleak_alloc_percpu(ptr, size);
809 : 4909 : return ptr;
810 : :
811 : : fail_unlock:
812 : : spin_unlock_irqrestore(&pcpu_lock, flags);
813 : : fail_unlock_mutex:
814 : 0 : mutex_unlock(&pcpu_alloc_mutex);
815 [ # # ]: 0 : if (warn_limit) {
816 : 0 : pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
817 : : "%s\n", size, align, err);
818 : 0 : dump_stack();
819 [ # # ]: 0 : if (!--warn_limit)
820 : 0 : pr_info("PERCPU: limit reached, disable warning\n");
821 : : }
822 : : return NULL;
823 : : }
824 : :
825 : : /**
826 : : * __alloc_percpu - allocate dynamic percpu area
827 : : * @size: size of area to allocate in bytes
828 : : * @align: alignment of area (max PAGE_SIZE)
829 : : *
830 : : * Allocate zero-filled percpu area of @size bytes aligned at @align.
831 : : * Might sleep. Might trigger writeouts.
832 : : *
833 : : * CONTEXT:
834 : : * Does GFP_KERNEL allocation.
835 : : *
836 : : * RETURNS:
837 : : * Percpu pointer to the allocated area on success, NULL on failure.
838 : : */
839 : 0 : void __percpu *__alloc_percpu(size_t size, size_t align)
840 : : {
841 : 4909 : return pcpu_alloc(size, align, false);
842 : : }
843 : : EXPORT_SYMBOL_GPL(__alloc_percpu);
844 : :
845 : : /**
846 : : * __alloc_reserved_percpu - allocate reserved percpu area
847 : : * @size: size of area to allocate in bytes
848 : : * @align: alignment of area (max PAGE_SIZE)
849 : : *
850 : : * Allocate zero-filled percpu area of @size bytes aligned at @align
851 : : * from reserved percpu area if arch has set it up; otherwise,
852 : : * allocation is served from the same dynamic area. Might sleep.
853 : : * Might trigger writeouts.
854 : : *
855 : : * CONTEXT:
856 : : * Does GFP_KERNEL allocation.
857 : : *
858 : : * RETURNS:
859 : : * Percpu pointer to the allocated area on success, NULL on failure.
860 : : */
861 : 0 : void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
862 : : {
863 : 0 : return pcpu_alloc(size, align, true);
864 : : }
865 : :
866 : : /**
867 : : * pcpu_reclaim - reclaim fully free chunks, workqueue function
868 : : * @work: unused
869 : : *
870 : : * Reclaim all fully free chunks except for the first one.
871 : : *
872 : : * CONTEXT:
873 : : * workqueue context.
874 : : */
875 : 0 : static void pcpu_reclaim(struct work_struct *work)
876 : : {
877 : 0 : LIST_HEAD(todo);
878 : 0 : struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
879 : : struct pcpu_chunk *chunk, *next;
880 : :
881 : 0 : mutex_lock(&pcpu_alloc_mutex);
882 : : spin_lock_irq(&pcpu_lock);
883 : :
884 [ # # ]: 0 : list_for_each_entry_safe(chunk, next, head, list) {
885 [ # # ]: 0 : WARN_ON(chunk->immutable);
886 : :
887 : : /* spare the first one */
888 [ # # ]: 0 : if (chunk == list_first_entry(head, struct pcpu_chunk, list))
889 : 0 : continue;
890 : :
891 : : list_move(&chunk->list, &todo);
892 : : }
893 : :
894 : : spin_unlock_irq(&pcpu_lock);
895 : :
896 [ # # ]: 0 : list_for_each_entry_safe(chunk, next, &todo, list) {
897 : 0 : pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
898 : 0 : pcpu_destroy_chunk(chunk);
899 : : }
900 : :
901 : 0 : mutex_unlock(&pcpu_alloc_mutex);
902 : 0 : }
903 : :
904 : : /**
905 : : * free_percpu - free percpu area
906 : : * @ptr: pointer to area to free
907 : : *
908 : : * Free percpu area @ptr.
909 : : *
910 : : * CONTEXT:
911 : : * Can be called from atomic context.
912 : : */
913 : 0 : void free_percpu(void __percpu *ptr)
914 : : {
915 : : void *addr;
916 : : struct pcpu_chunk *chunk;
917 : : unsigned long flags;
918 : : int off;
919 : :
920 [ + + ]: 4942 : if (!ptr)
921 : 4942 : return;
922 : :
923 : : kmemleak_free_percpu(ptr);
924 : :
925 : 4905 : addr = __pcpu_ptr_to_addr(ptr);
926 : :
927 : 4905 : spin_lock_irqsave(&pcpu_lock, flags);
928 : :
929 : 4905 : chunk = pcpu_chunk_addr_search(addr);
930 : 4905 : off = addr - chunk->base_addr;
931 : :
932 : 4905 : pcpu_free_area(chunk, off);
933 : :
934 : : /* if there are more than one fully free chunks, wake up grim reaper */
935 [ - + ]: 4905 : if (chunk->free_size == pcpu_unit_size) {
936 : : struct pcpu_chunk *pos;
937 : :
938 [ # # ]: 0 : list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
939 [ # # ]: 0 : if (pos != chunk) {
940 : : schedule_work(&pcpu_reclaim_work);
941 : : break;
942 : : }
943 : : }
944 : :
945 : : spin_unlock_irqrestore(&pcpu_lock, flags);
946 : : }
947 : : EXPORT_SYMBOL_GPL(free_percpu);
948 : :
949 : : /**
950 : : * is_kernel_percpu_address - test whether address is from static percpu area
951 : : * @addr: address to test
952 : : *
953 : : * Test whether @addr belongs to in-kernel static percpu area. Module
954 : : * static percpu areas are not considered. For those, use
955 : : * is_module_percpu_address().
956 : : *
957 : : * RETURNS:
958 : : * %true if @addr is from in-kernel static percpu area, %false otherwise.
959 : : */
960 : 0 : bool is_kernel_percpu_address(unsigned long addr)
961 : : {
962 : : #ifdef CONFIG_SMP
963 : 0 : const size_t static_size = __per_cpu_end - __per_cpu_start;
964 : : void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
965 : : unsigned int cpu;
966 : :
967 [ # # ]: 0 : for_each_possible_cpu(cpu) {
968 : 0 : void *start = per_cpu_ptr(base, cpu);
969 : :
970 [ # # ][ # # ]: 0 : if ((void *)addr >= start && (void *)addr < start + static_size)
971 : : return true;
972 : : }
973 : : #endif
974 : : /* on UP, can't distinguish from other static vars, always false */
975 : : return false;
976 : : }
977 : :
978 : : /**
979 : : * per_cpu_ptr_to_phys - convert translated percpu address to physical address
980 : : * @addr: the address to be converted to physical address
981 : : *
982 : : * Given @addr which is dereferenceable address obtained via one of
983 : : * percpu access macros, this function translates it into its physical
984 : : * address. The caller is responsible for ensuring @addr stays valid
985 : : * until this function finishes.
986 : : *
987 : : * percpu allocator has special setup for the first chunk, which currently
988 : : * supports either embedding in linear address space or vmalloc mapping,
989 : : * and, from the second one, the backing allocator (currently either vm or
990 : : * km) provides translation.
991 : : *
992 : : * The addr can be tranlated simply without checking if it falls into the
993 : : * first chunk. But the current code reflects better how percpu allocator
994 : : * actually works, and the verification can discover both bugs in percpu
995 : : * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
996 : : * code.
997 : : *
998 : : * RETURNS:
999 : : * The physical address for @addr.
1000 : : */
1001 : 0 : phys_addr_t per_cpu_ptr_to_phys(void *addr)
1002 : : {
1003 : : void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1004 : : bool in_first_chunk = false;
1005 : : unsigned long first_low, first_high;
1006 : : unsigned int cpu;
1007 : :
1008 : : /*
1009 : : * The following test on unit_low/high isn't strictly
1010 : : * necessary but will speed up lookups of addresses which
1011 : : * aren't in the first chunk.
1012 : : */
1013 : 0 : first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1014 : 0 : first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1015 : : pcpu_unit_pages);
1016 [ # # ]: 0 : if ((unsigned long)addr >= first_low &&
1017 : 0 : (unsigned long)addr < first_high) {
1018 [ # # ]: 0 : for_each_possible_cpu(cpu) {
1019 : 0 : void *start = per_cpu_ptr(base, cpu);
1020 : :
1021 [ # # ][ # # ]: 0 : if (addr >= start && addr < start + pcpu_unit_size) {
1022 : : in_first_chunk = true;
1023 : : break;
1024 : : }
1025 : : }
1026 : : }
1027 : :
1028 [ # # ]: 0 : if (in_first_chunk) {
1029 [ # # ]: 0 : if (!is_vmalloc_addr(addr))
1030 : 0 : return __pa(addr);
1031 : : else
1032 : 0 : return page_to_phys(vmalloc_to_page(addr)) +
1033 : 0 : offset_in_page(addr);
1034 : : } else
1035 : 0 : return page_to_phys(pcpu_addr_to_page(addr)) +
1036 : 0 : offset_in_page(addr);
1037 : : }
1038 : :
1039 : : /**
1040 : : * pcpu_alloc_alloc_info - allocate percpu allocation info
1041 : : * @nr_groups: the number of groups
1042 : : * @nr_units: the number of units
1043 : : *
1044 : : * Allocate ai which is large enough for @nr_groups groups containing
1045 : : * @nr_units units. The returned ai's groups[0].cpu_map points to the
1046 : : * cpu_map array which is long enough for @nr_units and filled with
1047 : : * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1048 : : * pointer of other groups.
1049 : : *
1050 : : * RETURNS:
1051 : : * Pointer to the allocated pcpu_alloc_info on success, NULL on
1052 : : * failure.
1053 : : */
1054 : 0 : struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1055 : : int nr_units)
1056 : : {
1057 : : struct pcpu_alloc_info *ai;
1058 : : size_t base_size, ai_size;
1059 : : void *ptr;
1060 : : int unit;
1061 : :
1062 : 0 : base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1063 : : __alignof__(ai->groups[0].cpu_map[0]));
1064 : 0 : ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1065 : :
1066 : 0 : ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1067 [ # # ]: 0 : if (!ptr)
1068 : : return NULL;
1069 : : ai = ptr;
1070 : 0 : ptr += base_size;
1071 : :
1072 : 0 : ai->groups[0].cpu_map = ptr;
1073 : :
1074 [ # # ]: 0 : for (unit = 0; unit < nr_units; unit++)
1075 : 0 : ai->groups[0].cpu_map[unit] = NR_CPUS;
1076 : :
1077 : 0 : ai->nr_groups = nr_groups;
1078 : 0 : ai->__ai_size = PFN_ALIGN(ai_size);
1079 : :
1080 : 0 : return ai;
1081 : : }
1082 : :
1083 : : /**
1084 : : * pcpu_free_alloc_info - free percpu allocation info
1085 : : * @ai: pcpu_alloc_info to free
1086 : : *
1087 : : * Free @ai which was allocated by pcpu_alloc_alloc_info().
1088 : : */
1089 : 0 : void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1090 : : {
1091 : 0 : memblock_free_early(__pa(ai), ai->__ai_size);
1092 : 0 : }
1093 : :
1094 : : /**
1095 : : * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1096 : : * @lvl: loglevel
1097 : : * @ai: allocation info to dump
1098 : : *
1099 : : * Print out information about @ai using loglevel @lvl.
1100 : : */
1101 : 0 : static void pcpu_dump_alloc_info(const char *lvl,
1102 : : const struct pcpu_alloc_info *ai)
1103 : : {
1104 : : int group_width = 1, cpu_width = 1, width;
1105 : 0 : char empty_str[] = "--------";
1106 : : int alloc = 0, alloc_end = 0;
1107 : : int group, v;
1108 : : int upa, apl; /* units per alloc, allocs per line */
1109 : :
1110 : 0 : v = ai->nr_groups;
1111 [ # # ]: 0 : while (v /= 10)
1112 : 0 : group_width++;
1113 : :
1114 : 0 : v = num_possible_cpus();
1115 [ # # ]: 0 : while (v /= 10)
1116 : 0 : cpu_width++;
1117 : 0 : empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1118 : :
1119 : 0 : upa = ai->alloc_size / ai->unit_size;
1120 : 0 : width = upa * (cpu_width + 1) + group_width + 3;
1121 : 0 : apl = rounddown_pow_of_two(max(60 / width, 1));
1122 : :
1123 : 0 : printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1124 : : lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1125 : 0 : ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1126 : :
1127 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++) {
1128 : 0 : const struct pcpu_group_info *gi = &ai->groups[group];
1129 : : int unit = 0, unit_end = 0;
1130 : :
1131 [ # # ]: 0 : BUG_ON(gi->nr_units % upa);
1132 [ # # ]: 0 : for (alloc_end += gi->nr_units / upa;
1133 : 0 : alloc < alloc_end; alloc++) {
1134 [ # # ]: 0 : if (!(alloc % apl)) {
1135 : 0 : printk(KERN_CONT "\n");
1136 : 0 : printk("%spcpu-alloc: ", lvl);
1137 : : }
1138 : 0 : printk(KERN_CONT "[%0*d] ", group_width, group);
1139 : :
1140 [ # # ]: 0 : for (unit_end += upa; unit < unit_end; unit++)
1141 [ # # ]: 0 : if (gi->cpu_map[unit] != NR_CPUS)
1142 : 0 : printk(KERN_CONT "%0*d ", cpu_width,
1143 : : gi->cpu_map[unit]);
1144 : : else
1145 : 0 : printk(KERN_CONT "%s ", empty_str);
1146 : : }
1147 : : }
1148 : 0 : printk(KERN_CONT "\n");
1149 : 0 : }
1150 : :
1151 : : /**
1152 : : * pcpu_setup_first_chunk - initialize the first percpu chunk
1153 : : * @ai: pcpu_alloc_info describing how to percpu area is shaped
1154 : : * @base_addr: mapped address
1155 : : *
1156 : : * Initialize the first percpu chunk which contains the kernel static
1157 : : * perpcu area. This function is to be called from arch percpu area
1158 : : * setup path.
1159 : : *
1160 : : * @ai contains all information necessary to initialize the first
1161 : : * chunk and prime the dynamic percpu allocator.
1162 : : *
1163 : : * @ai->static_size is the size of static percpu area.
1164 : : *
1165 : : * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1166 : : * reserve after the static area in the first chunk. This reserves
1167 : : * the first chunk such that it's available only through reserved
1168 : : * percpu allocation. This is primarily used to serve module percpu
1169 : : * static areas on architectures where the addressing model has
1170 : : * limited offset range for symbol relocations to guarantee module
1171 : : * percpu symbols fall inside the relocatable range.
1172 : : *
1173 : : * @ai->dyn_size determines the number of bytes available for dynamic
1174 : : * allocation in the first chunk. The area between @ai->static_size +
1175 : : * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1176 : : *
1177 : : * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1178 : : * and equal to or larger than @ai->static_size + @ai->reserved_size +
1179 : : * @ai->dyn_size.
1180 : : *
1181 : : * @ai->atom_size is the allocation atom size and used as alignment
1182 : : * for vm areas.
1183 : : *
1184 : : * @ai->alloc_size is the allocation size and always multiple of
1185 : : * @ai->atom_size. This is larger than @ai->atom_size if
1186 : : * @ai->unit_size is larger than @ai->atom_size.
1187 : : *
1188 : : * @ai->nr_groups and @ai->groups describe virtual memory layout of
1189 : : * percpu areas. Units which should be colocated are put into the
1190 : : * same group. Dynamic VM areas will be allocated according to these
1191 : : * groupings. If @ai->nr_groups is zero, a single group containing
1192 : : * all units is assumed.
1193 : : *
1194 : : * The caller should have mapped the first chunk at @base_addr and
1195 : : * copied static data to each unit.
1196 : : *
1197 : : * If the first chunk ends up with both reserved and dynamic areas, it
1198 : : * is served by two chunks - one to serve the core static and reserved
1199 : : * areas and the other for the dynamic area. They share the same vm
1200 : : * and page map but uses different area allocation map to stay away
1201 : : * from each other. The latter chunk is circulated in the chunk slots
1202 : : * and available for dynamic allocation like any other chunks.
1203 : : *
1204 : : * RETURNS:
1205 : : * 0 on success, -errno on failure.
1206 : : */
1207 : 0 : int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1208 : : void *base_addr)
1209 : : {
1210 : : static char cpus_buf[4096] __initdata;
1211 : : static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1212 : : static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1213 : 0 : size_t dyn_size = ai->dyn_size;
1214 : 0 : size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1215 : : struct pcpu_chunk *schunk, *dchunk = NULL;
1216 : : unsigned long *group_offsets;
1217 : : size_t *group_sizes;
1218 : : unsigned long *unit_off;
1219 : : unsigned int cpu;
1220 : : int *unit_map;
1221 : : int group, unit, i;
1222 : :
1223 : 0 : cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1224 : :
1225 : : #define PCPU_SETUP_BUG_ON(cond) do { \
1226 : : if (unlikely(cond)) { \
1227 : : pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1228 : : pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1229 : : pcpu_dump_alloc_info(KERN_EMERG, ai); \
1230 : : BUG(); \
1231 : : } \
1232 : : } while (0)
1233 : :
1234 : : /* sanity checks */
1235 [ # # ]: 0 : PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1236 : : #ifdef CONFIG_SMP
1237 [ # # ]: 0 : PCPU_SETUP_BUG_ON(!ai->static_size);
1238 [ # # ]: 0 : PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1239 : : #endif
1240 [ # # ]: 0 : PCPU_SETUP_BUG_ON(!base_addr);
1241 [ # # ]: 0 : PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1242 [ # # ]: 0 : PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1243 [ # # ]: 0 : PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1244 [ # # ]: 0 : PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1245 [ # # ]: 0 : PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1246 : : PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1247 : :
1248 : : /* process group information and build config tables accordingly */
1249 : 0 : group_offsets = memblock_virt_alloc(ai->nr_groups *
1250 : : sizeof(group_offsets[0]), 0);
1251 : 0 : group_sizes = memblock_virt_alloc(ai->nr_groups *
1252 : : sizeof(group_sizes[0]), 0);
1253 : 0 : unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1254 : 0 : unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1255 : :
1256 [ # # ]: 0 : for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1257 : 0 : unit_map[cpu] = UINT_MAX;
1258 : :
1259 : 0 : pcpu_low_unit_cpu = NR_CPUS;
1260 : 0 : pcpu_high_unit_cpu = NR_CPUS;
1261 : :
1262 [ # # ]: 0 : for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1263 : 0 : const struct pcpu_group_info *gi = &ai->groups[group];
1264 : :
1265 : 0 : group_offsets[group] = gi->base_offset;
1266 : 0 : group_sizes[group] = gi->nr_units * ai->unit_size;
1267 : :
1268 [ # # ]: 0 : for (i = 0; i < gi->nr_units; i++) {
1269 : 0 : cpu = gi->cpu_map[i];
1270 [ # # ]: 0 : if (cpu == NR_CPUS)
1271 : 0 : continue;
1272 : :
1273 [ # # ]: 0 : PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1274 [ # # ]: 0 : PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1275 [ # # ]: 0 : PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1276 : :
1277 : 0 : unit_map[cpu] = unit + i;
1278 : 0 : unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1279 : :
1280 : : /* determine low/high unit_cpu */
1281 [ # # ][ # # ]: 0 : if (pcpu_low_unit_cpu == NR_CPUS ||
1282 : 0 : unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1283 : 0 : pcpu_low_unit_cpu = cpu;
1284 [ # # ][ # # ]: 0 : if (pcpu_high_unit_cpu == NR_CPUS ||
1285 : 0 : unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1286 : 0 : pcpu_high_unit_cpu = cpu;
1287 : : }
1288 : : }
1289 : 0 : pcpu_nr_units = unit;
1290 : :
1291 [ # # ]: 0 : for_each_possible_cpu(cpu)
1292 [ # # ]: 0 : PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1293 : :
1294 : : /* we're done parsing the input, undefine BUG macro and dump config */
1295 : : #undef PCPU_SETUP_BUG_ON
1296 : 0 : pcpu_dump_alloc_info(KERN_DEBUG, ai);
1297 : :
1298 : 0 : pcpu_nr_groups = ai->nr_groups;
1299 : 0 : pcpu_group_offsets = group_offsets;
1300 : 0 : pcpu_group_sizes = group_sizes;
1301 : 0 : pcpu_unit_map = unit_map;
1302 : 0 : pcpu_unit_offsets = unit_off;
1303 : :
1304 : : /* determine basic parameters */
1305 : 0 : pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1306 : 0 : pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1307 : 0 : pcpu_atom_size = ai->atom_size;
1308 : 0 : pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1309 : 0 : BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1310 : :
1311 : : /*
1312 : : * Allocate chunk slots. The additional last slot is for
1313 : : * empty chunks.
1314 : : */
1315 : 0 : pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1316 : 0 : pcpu_slot = memblock_virt_alloc(
1317 : : pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1318 [ # # ]: 0 : for (i = 0; i < pcpu_nr_slots; i++)
1319 : 0 : INIT_LIST_HEAD(&pcpu_slot[i]);
1320 : :
1321 : : /*
1322 : : * Initialize static chunk. If reserved_size is zero, the
1323 : : * static chunk covers static area + dynamic allocation area
1324 : : * in the first chunk. If reserved_size is not zero, it
1325 : : * covers static area + reserved area (mostly used for module
1326 : : * static percpu allocation).
1327 : : */
1328 : 0 : schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1329 : 0 : INIT_LIST_HEAD(&schunk->list);
1330 : 0 : schunk->base_addr = base_addr;
1331 : 0 : schunk->map = smap;
1332 : 0 : schunk->map_alloc = ARRAY_SIZE(smap);
1333 : 0 : schunk->immutable = true;
1334 : 0 : bitmap_fill(schunk->populated, pcpu_unit_pages);
1335 : :
1336 [ # # ]: 0 : if (ai->reserved_size) {
1337 : 0 : schunk->free_size = ai->reserved_size;
1338 : 0 : pcpu_reserved_chunk = schunk;
1339 : 0 : pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1340 : : } else {
1341 : 0 : schunk->free_size = dyn_size;
1342 : : dyn_size = 0; /* dynamic area covered */
1343 : : }
1344 : 0 : schunk->contig_hint = schunk->free_size;
1345 : :
1346 : 0 : schunk->map[schunk->map_used++] = -ai->static_size;
1347 [ # # ]: 0 : if (schunk->free_size)
1348 : 0 : schunk->map[schunk->map_used++] = schunk->free_size;
1349 : :
1350 : : /* init dynamic chunk if necessary */
1351 [ # # ]: 0 : if (dyn_size) {
1352 : 0 : dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1353 : 0 : INIT_LIST_HEAD(&dchunk->list);
1354 : 0 : dchunk->base_addr = base_addr;
1355 : 0 : dchunk->map = dmap;
1356 : 0 : dchunk->map_alloc = ARRAY_SIZE(dmap);
1357 : 0 : dchunk->immutable = true;
1358 : 0 : bitmap_fill(dchunk->populated, pcpu_unit_pages);
1359 : :
1360 : 0 : dchunk->contig_hint = dchunk->free_size = dyn_size;
1361 : 0 : dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1362 : 0 : dchunk->map[dchunk->map_used++] = dchunk->free_size;
1363 : : }
1364 : :
1365 : : /* link the first chunk in */
1366 [ # # ]: 0 : pcpu_first_chunk = dchunk ?: schunk;
1367 : 0 : pcpu_chunk_relocate(pcpu_first_chunk, -1);
1368 : :
1369 : : /* we're done */
1370 : 0 : pcpu_base_addr = base_addr;
1371 : 0 : return 0;
1372 : : }
1373 : :
1374 : : #ifdef CONFIG_SMP
1375 : :
1376 : : const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1377 : : [PCPU_FC_AUTO] = "auto",
1378 : : [PCPU_FC_EMBED] = "embed",
1379 : : [PCPU_FC_PAGE] = "page",
1380 : : };
1381 : :
1382 : : enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1383 : :
1384 : 0 : static int __init percpu_alloc_setup(char *str)
1385 : : {
1386 [ # # ]: 0 : if (!str)
1387 : : return -EINVAL;
1388 : :
1389 : : if (0)
1390 : : /* nada */;
1391 : : #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1392 : : else if (!strcmp(str, "embed"))
1393 : : pcpu_chosen_fc = PCPU_FC_EMBED;
1394 : : #endif
1395 : : #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1396 : : else if (!strcmp(str, "page"))
1397 : : pcpu_chosen_fc = PCPU_FC_PAGE;
1398 : : #endif
1399 : : else
1400 : 0 : pr_warning("PERCPU: unknown allocator %s specified\n", str);
1401 : :
1402 : 0 : return 0;
1403 : : }
1404 : : early_param("percpu_alloc", percpu_alloc_setup);
1405 : :
1406 : : /*
1407 : : * pcpu_embed_first_chunk() is used by the generic percpu setup.
1408 : : * Build it if needed by the arch config or the generic setup is going
1409 : : * to be used.
1410 : : */
1411 : : #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1412 : : !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1413 : : #define BUILD_EMBED_FIRST_CHUNK
1414 : : #endif
1415 : :
1416 : : /* build pcpu_page_first_chunk() iff needed by the arch config */
1417 : : #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1418 : : #define BUILD_PAGE_FIRST_CHUNK
1419 : : #endif
1420 : :
1421 : : /* pcpu_build_alloc_info() is used by both embed and page first chunk */
1422 : : #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1423 : : /**
1424 : : * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1425 : : * @reserved_size: the size of reserved percpu area in bytes
1426 : : * @dyn_size: minimum free size for dynamic allocation in bytes
1427 : : * @atom_size: allocation atom size
1428 : : * @cpu_distance_fn: callback to determine distance between cpus, optional
1429 : : *
1430 : : * This function determines grouping of units, their mappings to cpus
1431 : : * and other parameters considering needed percpu size, allocation
1432 : : * atom size and distances between CPUs.
1433 : : *
1434 : : * Groups are always mutliples of atom size and CPUs which are of
1435 : : * LOCAL_DISTANCE both ways are grouped together and share space for
1436 : : * units in the same group. The returned configuration is guaranteed
1437 : : * to have CPUs on different nodes on different groups and >=75% usage
1438 : : * of allocated virtual address space.
1439 : : *
1440 : : * RETURNS:
1441 : : * On success, pointer to the new allocation_info is returned. On
1442 : : * failure, ERR_PTR value is returned.
1443 : : */
1444 : 0 : static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1445 : : size_t reserved_size, size_t dyn_size,
1446 : : size_t atom_size,
1447 : : pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1448 : : {
1449 : : static int group_map[NR_CPUS] __initdata;
1450 : : static int group_cnt[NR_CPUS] __initdata;
1451 : 0 : const size_t static_size = __per_cpu_end - __per_cpu_start;
1452 : : int nr_groups = 1, nr_units = 0;
1453 : : size_t size_sum, min_unit_size, alloc_size;
1454 : : int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1455 : : int last_allocs, group, unit;
1456 : : unsigned int cpu, tcpu;
1457 : : struct pcpu_alloc_info *ai;
1458 : : unsigned int *cpu_map;
1459 : :
1460 : : /* this function may be called multiple times */
1461 : 0 : memset(group_map, 0, sizeof(group_map));
1462 : 0 : memset(group_cnt, 0, sizeof(group_cnt));
1463 : :
1464 : : /* calculate size_sum and ensure dyn_size is enough for early alloc */
1465 : 0 : size_sum = PFN_ALIGN(static_size + reserved_size +
1466 : : max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1467 : 0 : dyn_size = size_sum - static_size - reserved_size;
1468 : :
1469 : : /*
1470 : : * Determine min_unit_size, alloc_size and max_upa such that
1471 : : * alloc_size is multiple of atom_size and is the smallest
1472 : : * which can accommodate 4k aligned segments which are equal to
1473 : : * or larger than min_unit_size.
1474 : : */
1475 : 0 : min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1476 : :
1477 : 0 : alloc_size = roundup(min_unit_size, atom_size);
1478 : 0 : upa = alloc_size / min_unit_size;
1479 [ # # ][ # # ]: 0 : while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1480 : 0 : upa--;
1481 : : max_upa = upa;
1482 : :
1483 : : /* group cpus according to their proximity */
1484 [ # # ]: 0 : for_each_possible_cpu(cpu) {
1485 : : group = 0;
1486 : : next_group:
1487 [ # # ]: 0 : for_each_possible_cpu(tcpu) {
1488 [ # # ]: 0 : if (cpu == tcpu)
1489 : : break;
1490 [ # # ]: 0 : if (group_map[tcpu] == group && cpu_distance_fn &&
[ # # # # ]
1491 [ # # ]: 0 : (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1492 : 0 : cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1493 : 0 : group++;
1494 : 0 : nr_groups = max(nr_groups, group + 1);
1495 : 0 : goto next_group;
1496 : : }
1497 : : }
1498 : 0 : group_map[cpu] = group;
1499 : 0 : group_cnt[group]++;
1500 : : }
1501 : :
1502 : : /*
1503 : : * Expand unit size until address space usage goes over 75%
1504 : : * and then as much as possible without using more address
1505 : : * space.
1506 : : */
1507 : : last_allocs = INT_MAX;
1508 [ # # ]: 0 : for (upa = max_upa; upa; upa--) {
1509 : : int allocs = 0, wasted = 0;
1510 : :
1511 [ # # ][ # # ]: 0 : if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1512 : 0 : continue;
1513 : :
1514 [ # # ]: 0 : for (group = 0; group < nr_groups; group++) {
1515 : 0 : int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1516 : 0 : allocs += this_allocs;
1517 : 0 : wasted += this_allocs * upa - group_cnt[group];
1518 : : }
1519 : :
1520 : : /*
1521 : : * Don't accept if wastage is over 1/3. The
1522 : : * greater-than comparison ensures upa==1 always
1523 : : * passes the following check.
1524 : : */
1525 [ # # ]: 0 : if (wasted > num_possible_cpus() / 3)
1526 : 0 : continue;
1527 : :
1528 : : /* and then don't consume more memory */
1529 [ # # ]: 0 : if (allocs > last_allocs)
1530 : : break;
1531 : : last_allocs = allocs;
1532 : : best_upa = upa;
1533 : : }
1534 : : upa = best_upa;
1535 : :
1536 : : /* allocate and fill alloc_info */
1537 [ # # ]: 0 : for (group = 0; group < nr_groups; group++)
1538 : 0 : nr_units += roundup(group_cnt[group], upa);
1539 : :
1540 : 0 : ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1541 [ # # ]: 0 : if (!ai)
1542 : : return ERR_PTR(-ENOMEM);
1543 : 0 : cpu_map = ai->groups[0].cpu_map;
1544 : :
1545 [ # # ]: 0 : for (group = 0; group < nr_groups; group++) {
1546 : 0 : ai->groups[group].cpu_map = cpu_map;
1547 : 0 : cpu_map += roundup(group_cnt[group], upa);
1548 : : }
1549 : :
1550 : 0 : ai->static_size = static_size;
1551 : 0 : ai->reserved_size = reserved_size;
1552 : 0 : ai->dyn_size = dyn_size;
1553 : 0 : ai->unit_size = alloc_size / upa;
1554 : 0 : ai->atom_size = atom_size;
1555 : 0 : ai->alloc_size = alloc_size;
1556 : :
1557 [ # # ]: 0 : for (group = 0, unit = 0; group_cnt[group]; group++) {
1558 : 0 : struct pcpu_group_info *gi = &ai->groups[group];
1559 : :
1560 : : /*
1561 : : * Initialize base_offset as if all groups are located
1562 : : * back-to-back. The caller should update this to
1563 : : * reflect actual allocation.
1564 : : */
1565 : 0 : gi->base_offset = unit * ai->unit_size;
1566 : :
1567 [ # # ]: 0 : for_each_possible_cpu(cpu)
1568 [ # # ]: 0 : if (group_map[cpu] == group)
1569 : 0 : gi->cpu_map[gi->nr_units++] = cpu;
1570 : 0 : gi->nr_units = roundup(gi->nr_units, upa);
1571 : 0 : unit += gi->nr_units;
1572 : : }
1573 [ # # ]: 0 : BUG_ON(unit != nr_units);
1574 : :
1575 : : return ai;
1576 : : }
1577 : : #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1578 : :
1579 : : #if defined(BUILD_EMBED_FIRST_CHUNK)
1580 : : /**
1581 : : * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1582 : : * @reserved_size: the size of reserved percpu area in bytes
1583 : : * @dyn_size: minimum free size for dynamic allocation in bytes
1584 : : * @atom_size: allocation atom size
1585 : : * @cpu_distance_fn: callback to determine distance between cpus, optional
1586 : : * @alloc_fn: function to allocate percpu page
1587 : : * @free_fn: function to free percpu page
1588 : : *
1589 : : * This is a helper to ease setting up embedded first percpu chunk and
1590 : : * can be called where pcpu_setup_first_chunk() is expected.
1591 : : *
1592 : : * If this function is used to setup the first chunk, it is allocated
1593 : : * by calling @alloc_fn and used as-is without being mapped into
1594 : : * vmalloc area. Allocations are always whole multiples of @atom_size
1595 : : * aligned to @atom_size.
1596 : : *
1597 : : * This enables the first chunk to piggy back on the linear physical
1598 : : * mapping which often uses larger page size. Please note that this
1599 : : * can result in very sparse cpu->unit mapping on NUMA machines thus
1600 : : * requiring large vmalloc address space. Don't use this allocator if
1601 : : * vmalloc space is not orders of magnitude larger than distances
1602 : : * between node memory addresses (ie. 32bit NUMA machines).
1603 : : *
1604 : : * @dyn_size specifies the minimum dynamic area size.
1605 : : *
1606 : : * If the needed size is smaller than the minimum or specified unit
1607 : : * size, the leftover is returned using @free_fn.
1608 : : *
1609 : : * RETURNS:
1610 : : * 0 on success, -errno on failure.
1611 : : */
1612 : 0 : int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1613 : : size_t atom_size,
1614 : : pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1615 : : pcpu_fc_alloc_fn_t alloc_fn,
1616 : : pcpu_fc_free_fn_t free_fn)
1617 : : {
1618 : : void *base = (void *)ULONG_MAX;
1619 : : void **areas = NULL;
1620 : : struct pcpu_alloc_info *ai;
1621 : : size_t size_sum, areas_size, max_distance;
1622 : : int group, i, rc;
1623 : :
1624 : 0 : ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1625 : : cpu_distance_fn);
1626 [ # # ]: 0 : if (IS_ERR(ai))
1627 : 0 : return PTR_ERR(ai);
1628 : :
1629 : 0 : size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1630 : 0 : areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1631 : :
1632 : : areas = memblock_virt_alloc_nopanic(areas_size, 0);
1633 [ # # ]: 0 : if (!areas) {
1634 : : rc = -ENOMEM;
1635 : : goto out_free;
1636 : : }
1637 : :
1638 : : /* allocate, copy and determine base address */
1639 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++) {
1640 : 0 : struct pcpu_group_info *gi = &ai->groups[group];
1641 : : unsigned int cpu = NR_CPUS;
1642 : : void *ptr;
1643 : :
1644 [ # # ][ # # ]: 0 : for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1645 : 0 : cpu = gi->cpu_map[i];
1646 [ # # ]: 0 : BUG_ON(cpu == NR_CPUS);
1647 : :
1648 : : /* allocate space for the whole group */
1649 : 0 : ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1650 [ # # ]: 0 : if (!ptr) {
1651 : : rc = -ENOMEM;
1652 : : goto out_free_areas;
1653 : : }
1654 : : /* kmemleak tracks the percpu allocations separately */
1655 : : kmemleak_free(ptr);
1656 : 0 : areas[group] = ptr;
1657 : :
1658 : 0 : base = min(ptr, base);
1659 : : }
1660 : :
1661 : : /*
1662 : : * Copy data and free unused parts. This should happen after all
1663 : : * allocations are complete; otherwise, we may end up with
1664 : : * overlapping groups.
1665 : : */
1666 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++) {
1667 : 0 : struct pcpu_group_info *gi = &ai->groups[group];
1668 : 0 : void *ptr = areas[group];
1669 : :
1670 [ # # ]: 0 : for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1671 [ # # ]: 0 : if (gi->cpu_map[i] == NR_CPUS) {
1672 : : /* unused unit, free whole */
1673 : 0 : free_fn(ptr, ai->unit_size);
1674 : 0 : continue;
1675 : : }
1676 : : /* copy and return the unused part */
1677 : 0 : memcpy(ptr, __per_cpu_load, ai->static_size);
1678 : 0 : free_fn(ptr + size_sum, ai->unit_size - size_sum);
1679 : : }
1680 : : }
1681 : :
1682 : : /* base address is now known, determine group base offsets */
1683 : : max_distance = 0;
1684 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++) {
1685 : 0 : ai->groups[group].base_offset = areas[group] - base;
1686 : 0 : max_distance = max_t(size_t, max_distance,
1687 : : ai->groups[group].base_offset);
1688 : : }
1689 : 0 : max_distance += ai->unit_size;
1690 : :
1691 : : /* warn if maximum distance is further than 75% of vmalloc space */
1692 [ # # ]: 0 : if (max_distance > VMALLOC_TOTAL * 3 / 4) {
1693 : 0 : pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1694 : : "space 0x%lx\n", max_distance,
1695 : : VMALLOC_TOTAL);
1696 : : #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1697 : : /* and fail if we have fallback */
1698 : : rc = -EINVAL;
1699 : : goto out_free;
1700 : : #endif
1701 : : }
1702 : :
1703 : 0 : pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1704 : : PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1705 : : ai->dyn_size, ai->unit_size);
1706 : :
1707 : 0 : rc = pcpu_setup_first_chunk(ai, base);
1708 : 0 : goto out_free;
1709 : :
1710 : : out_free_areas:
1711 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++)
1712 [ # # ]: 0 : if (areas[group])
1713 : 0 : free_fn(areas[group],
1714 : 0 : ai->groups[group].nr_units * ai->unit_size);
1715 : : out_free:
1716 : 0 : pcpu_free_alloc_info(ai);
1717 [ # # ]: 0 : if (areas)
1718 : 0 : memblock_free_early(__pa(areas), areas_size);
1719 : 0 : return rc;
1720 : : }
1721 : : #endif /* BUILD_EMBED_FIRST_CHUNK */
1722 : :
1723 : : #ifdef BUILD_PAGE_FIRST_CHUNK
1724 : : /**
1725 : : * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1726 : : * @reserved_size: the size of reserved percpu area in bytes
1727 : : * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1728 : : * @free_fn: function to free percpu page, always called with PAGE_SIZE
1729 : : * @populate_pte_fn: function to populate pte
1730 : : *
1731 : : * This is a helper to ease setting up page-remapped first percpu
1732 : : * chunk and can be called where pcpu_setup_first_chunk() is expected.
1733 : : *
1734 : : * This is the basic allocator. Static percpu area is allocated
1735 : : * page-by-page into vmalloc area.
1736 : : *
1737 : : * RETURNS:
1738 : : * 0 on success, -errno on failure.
1739 : : */
1740 : : int __init pcpu_page_first_chunk(size_t reserved_size,
1741 : : pcpu_fc_alloc_fn_t alloc_fn,
1742 : : pcpu_fc_free_fn_t free_fn,
1743 : : pcpu_fc_populate_pte_fn_t populate_pte_fn)
1744 : : {
1745 : : static struct vm_struct vm;
1746 : : struct pcpu_alloc_info *ai;
1747 : : char psize_str[16];
1748 : : int unit_pages;
1749 : : size_t pages_size;
1750 : : struct page **pages;
1751 : : int unit, i, j, rc;
1752 : :
1753 : : snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1754 : :
1755 : : ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1756 : : if (IS_ERR(ai))
1757 : : return PTR_ERR(ai);
1758 : : BUG_ON(ai->nr_groups != 1);
1759 : : BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1760 : :
1761 : : unit_pages = ai->unit_size >> PAGE_SHIFT;
1762 : :
1763 : : /* unaligned allocations can't be freed, round up to page size */
1764 : : pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1765 : : sizeof(pages[0]));
1766 : : pages = memblock_virt_alloc(pages_size, 0);
1767 : :
1768 : : /* allocate pages */
1769 : : j = 0;
1770 : : for (unit = 0; unit < num_possible_cpus(); unit++)
1771 : : for (i = 0; i < unit_pages; i++) {
1772 : : unsigned int cpu = ai->groups[0].cpu_map[unit];
1773 : : void *ptr;
1774 : :
1775 : : ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1776 : : if (!ptr) {
1777 : : pr_warning("PERCPU: failed to allocate %s page "
1778 : : "for cpu%u\n", psize_str, cpu);
1779 : : goto enomem;
1780 : : }
1781 : : /* kmemleak tracks the percpu allocations separately */
1782 : : kmemleak_free(ptr);
1783 : : pages[j++] = virt_to_page(ptr);
1784 : : }
1785 : :
1786 : : /* allocate vm area, map the pages and copy static data */
1787 : : vm.flags = VM_ALLOC;
1788 : : vm.size = num_possible_cpus() * ai->unit_size;
1789 : : vm_area_register_early(&vm, PAGE_SIZE);
1790 : :
1791 : : for (unit = 0; unit < num_possible_cpus(); unit++) {
1792 : : unsigned long unit_addr =
1793 : : (unsigned long)vm.addr + unit * ai->unit_size;
1794 : :
1795 : : for (i = 0; i < unit_pages; i++)
1796 : : populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1797 : :
1798 : : /* pte already populated, the following shouldn't fail */
1799 : : rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1800 : : unit_pages);
1801 : : if (rc < 0)
1802 : : panic("failed to map percpu area, err=%d\n", rc);
1803 : :
1804 : : /*
1805 : : * FIXME: Archs with virtual cache should flush local
1806 : : * cache for the linear mapping here - something
1807 : : * equivalent to flush_cache_vmap() on the local cpu.
1808 : : * flush_cache_vmap() can't be used as most supporting
1809 : : * data structures are not set up yet.
1810 : : */
1811 : :
1812 : : /* copy static data */
1813 : : memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1814 : : }
1815 : :
1816 : : /* we're ready, commit */
1817 : : pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1818 : : unit_pages, psize_str, vm.addr, ai->static_size,
1819 : : ai->reserved_size, ai->dyn_size);
1820 : :
1821 : : rc = pcpu_setup_first_chunk(ai, vm.addr);
1822 : : goto out_free_ar;
1823 : :
1824 : : enomem:
1825 : : while (--j >= 0)
1826 : : free_fn(page_address(pages[j]), PAGE_SIZE);
1827 : : rc = -ENOMEM;
1828 : : out_free_ar:
1829 : : memblock_free_early(__pa(pages), pages_size);
1830 : : pcpu_free_alloc_info(ai);
1831 : : return rc;
1832 : : }
1833 : : #endif /* BUILD_PAGE_FIRST_CHUNK */
1834 : :
1835 : : #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1836 : : /*
1837 : : * Generic SMP percpu area setup.
1838 : : *
1839 : : * The embedding helper is used because its behavior closely resembles
1840 : : * the original non-dynamic generic percpu area setup. This is
1841 : : * important because many archs have addressing restrictions and might
1842 : : * fail if the percpu area is located far away from the previous
1843 : : * location. As an added bonus, in non-NUMA cases, embedding is
1844 : : * generally a good idea TLB-wise because percpu area can piggy back
1845 : : * on the physical linear memory mapping which uses large page
1846 : : * mappings on applicable archs.
1847 : : */
1848 : : unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1849 : : EXPORT_SYMBOL(__per_cpu_offset);
1850 : :
1851 : 0 : static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1852 : : size_t align)
1853 : : {
1854 : 0 : return memblock_virt_alloc_from_nopanic(
1855 : : size, align, __pa(MAX_DMA_ADDRESS));
1856 : : }
1857 : :
1858 : 0 : static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1859 : : {
1860 : 0 : memblock_free_early(__pa(ptr), size);
1861 : 0 : }
1862 : :
1863 : 0 : void __init setup_per_cpu_areas(void)
1864 : : {
1865 : : unsigned long delta;
1866 : : unsigned int cpu;
1867 : : int rc;
1868 : :
1869 : : /*
1870 : : * Always reserve area for module percpu variables. That's
1871 : : * what the legacy allocator did.
1872 : : */
1873 : 0 : rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1874 : : PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1875 : : pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1876 [ # # ]: 0 : if (rc < 0)
1877 : 0 : panic("Failed to initialize percpu areas.");
1878 : :
1879 : 0 : delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1880 [ # # ]: 0 : for_each_possible_cpu(cpu)
1881 : 0 : __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1882 : 0 : }
1883 : : #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1884 : :
1885 : : #else /* CONFIG_SMP */
1886 : :
1887 : : /*
1888 : : * UP percpu area setup.
1889 : : *
1890 : : * UP always uses km-based percpu allocator with identity mapping.
1891 : : * Static percpu variables are indistinguishable from the usual static
1892 : : * variables and don't require any special preparation.
1893 : : */
1894 : : void __init setup_per_cpu_areas(void)
1895 : : {
1896 : : const size_t unit_size =
1897 : : roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1898 : : PERCPU_DYNAMIC_RESERVE));
1899 : : struct pcpu_alloc_info *ai;
1900 : : void *fc;
1901 : :
1902 : : ai = pcpu_alloc_alloc_info(1, 1);
1903 : : fc = memblock_virt_alloc_from_nopanic(unit_size,
1904 : : PAGE_SIZE,
1905 : : __pa(MAX_DMA_ADDRESS));
1906 : : if (!ai || !fc)
1907 : : panic("Failed to allocate memory for percpu areas.");
1908 : : /* kmemleak tracks the percpu allocations separately */
1909 : : kmemleak_free(fc);
1910 : :
1911 : : ai->dyn_size = unit_size;
1912 : : ai->unit_size = unit_size;
1913 : : ai->atom_size = unit_size;
1914 : : ai->alloc_size = unit_size;
1915 : : ai->groups[0].nr_units = 1;
1916 : : ai->groups[0].cpu_map[0] = 0;
1917 : :
1918 : : if (pcpu_setup_first_chunk(ai, fc) < 0)
1919 : : panic("Failed to initialize percpu areas.");
1920 : : }
1921 : :
1922 : : #endif /* CONFIG_SMP */
1923 : :
1924 : : /*
1925 : : * First and reserved chunks are initialized with temporary allocation
1926 : : * map in initdata so that they can be used before slab is online.
1927 : : * This function is called after slab is brought up and replaces those
1928 : : * with properly allocated maps.
1929 : : */
1930 : 0 : void __init percpu_init_late(void)
1931 : : {
1932 : 0 : struct pcpu_chunk *target_chunks[] =
1933 : : { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1934 : : struct pcpu_chunk *chunk;
1935 : : unsigned long flags;
1936 : : int i;
1937 : :
1938 [ # # ]: 0 : for (i = 0; (chunk = target_chunks[i]); i++) {
1939 : : int *map;
1940 : : const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1941 : :
1942 : : BUILD_BUG_ON(size > PAGE_SIZE);
1943 : :
1944 : 0 : map = pcpu_mem_zalloc(size);
1945 [ # # ]: 0 : BUG_ON(!map);
1946 : :
1947 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
1948 : 0 : memcpy(map, chunk->map, size);
1949 : 0 : chunk->map = map;
1950 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1951 : : }
1952 : 0 : }
|