Branch data Line data Source code
1 : : /*
2 : : * Copyright (C) 2007 Oracle. All rights reserved.
3 : : *
4 : : * This program is free software; you can redistribute it and/or
5 : : * modify it under the terms of the GNU General Public
6 : : * License v2 as published by the Free Software Foundation.
7 : : *
8 : : * This program is distributed in the hope that it will be useful,
9 : : * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 : : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 : : * General Public License for more details.
12 : : *
13 : : * You should have received a copy of the GNU General Public
14 : : * License along with this program; if not, write to the
15 : : * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 : : * Boston, MA 021110-1307, USA.
17 : : */
18 : :
19 : : #include <linux/kthread.h>
20 : : #include <linux/slab.h>
21 : : #include <linux/list.h>
22 : : #include <linux/spinlock.h>
23 : : #include <linux/freezer.h>
24 : : #include "async-thread.h"
25 : :
26 : : #define WORK_QUEUED_BIT 0
27 : : #define WORK_DONE_BIT 1
28 : : #define WORK_ORDER_DONE_BIT 2
29 : : #define WORK_HIGH_PRIO_BIT 3
30 : :
31 : : /*
32 : : * container for the kthread task pointer and the list of pending work
33 : : * One of these is allocated per thread.
34 : : */
35 : : struct btrfs_worker_thread {
36 : : /* pool we belong to */
37 : : struct btrfs_workers *workers;
38 : :
39 : : /* list of struct btrfs_work that are waiting for service */
40 : : struct list_head pending;
41 : : struct list_head prio_pending;
42 : :
43 : : /* list of worker threads from struct btrfs_workers */
44 : : struct list_head worker_list;
45 : :
46 : : /* kthread */
47 : : struct task_struct *task;
48 : :
49 : : /* number of things on the pending list */
50 : : atomic_t num_pending;
51 : :
52 : : /* reference counter for this struct */
53 : : atomic_t refs;
54 : :
55 : : unsigned long sequence;
56 : :
57 : : /* protects the pending list. */
58 : : spinlock_t lock;
59 : :
60 : : /* set to non-zero when this thread is already awake and kicking */
61 : : int working;
62 : :
63 : : /* are we currently idle */
64 : : int idle;
65 : : };
66 : :
67 : : static int __btrfs_start_workers(struct btrfs_workers *workers);
68 : :
69 : : /*
70 : : * btrfs_start_workers uses kthread_run, which can block waiting for memory
71 : : * for a very long time. It will actually throttle on page writeback,
72 : : * and so it may not make progress until after our btrfs worker threads
73 : : * process all of the pending work structs in their queue
74 : : *
75 : : * This means we can't use btrfs_start_workers from inside a btrfs worker
76 : : * thread that is used as part of cleaning dirty memory, which pretty much
77 : : * involves all of the worker threads.
78 : : *
79 : : * Instead we have a helper queue who never has more than one thread
80 : : * where we scheduler thread start operations. This worker_start struct
81 : : * is used to contain the work and hold a pointer to the queue that needs
82 : : * another worker.
83 : : */
84 : : struct worker_start {
85 : : struct btrfs_work work;
86 : : struct btrfs_workers *queue;
87 : : };
88 : :
89 : 0 : static void start_new_worker_func(struct btrfs_work *work)
90 : : {
91 : : struct worker_start *start;
92 : : start = container_of(work, struct worker_start, work);
93 : 0 : __btrfs_start_workers(start->queue);
94 : 0 : kfree(start);
95 : 0 : }
96 : :
97 : : /*
98 : : * helper function to move a thread onto the idle list after it
99 : : * has finished some requests.
100 : : */
101 : 0 : static void check_idle_worker(struct btrfs_worker_thread *worker)
102 : : {
103 [ # # ][ # # ]: 0 : if (!worker->idle && atomic_read(&worker->num_pending) <
104 : 0 : worker->workers->idle_thresh / 2) {
105 : : unsigned long flags;
106 : 0 : spin_lock_irqsave(&worker->workers->lock, flags);
107 : 0 : worker->idle = 1;
108 : :
109 : : /* the list may be empty if the worker is just starting */
110 [ # # ][ # # ]: 0 : if (!list_empty(&worker->worker_list) &&
111 : 0 : !worker->workers->stopping) {
112 : 0 : list_move(&worker->worker_list,
113 : : &worker->workers->idle_list);
114 : : }
115 : 0 : spin_unlock_irqrestore(&worker->workers->lock, flags);
116 : : }
117 : 0 : }
118 : :
119 : : /*
120 : : * helper function to move a thread off the idle list after new
121 : : * pending work is added.
122 : : */
123 : 0 : static void check_busy_worker(struct btrfs_worker_thread *worker)
124 : : {
125 [ # # ][ # # ]: 0 : if (worker->idle && atomic_read(&worker->num_pending) >=
126 : 0 : worker->workers->idle_thresh) {
127 : : unsigned long flags;
128 : 0 : spin_lock_irqsave(&worker->workers->lock, flags);
129 : 0 : worker->idle = 0;
130 : :
131 [ # # ][ # # ]: 0 : if (!list_empty(&worker->worker_list) &&
132 : 0 : !worker->workers->stopping) {
133 : 0 : list_move_tail(&worker->worker_list,
134 : : &worker->workers->worker_list);
135 : : }
136 : 0 : spin_unlock_irqrestore(&worker->workers->lock, flags);
137 : : }
138 : 0 : }
139 : :
140 : 0 : static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
141 : : {
142 : 0 : struct btrfs_workers *workers = worker->workers;
143 : : struct worker_start *start;
144 : : unsigned long flags;
145 : :
146 : 0 : rmb();
147 [ # # ]: 0 : if (!workers->atomic_start_pending)
148 : : return;
149 : :
150 : : start = kzalloc(sizeof(*start), GFP_NOFS);
151 [ # # ]: 0 : if (!start)
152 : : return;
153 : :
154 : 0 : start->work.func = start_new_worker_func;
155 : 0 : start->queue = workers;
156 : :
157 : 0 : spin_lock_irqsave(&workers->lock, flags);
158 [ # # ]: 0 : if (!workers->atomic_start_pending)
159 : : goto out;
160 : :
161 : 0 : workers->atomic_start_pending = 0;
162 [ # # ]: 0 : if (workers->num_workers + workers->num_workers_starting >=
163 : 0 : workers->max_workers)
164 : : goto out;
165 : :
166 : 0 : workers->num_workers_starting += 1;
167 : : spin_unlock_irqrestore(&workers->lock, flags);
168 : 0 : btrfs_queue_worker(workers->atomic_worker_start, &start->work);
169 : : return;
170 : :
171 : : out:
172 : 0 : kfree(start);
173 : : spin_unlock_irqrestore(&workers->lock, flags);
174 : : }
175 : :
176 : 0 : static noinline void run_ordered_completions(struct btrfs_workers *workers,
177 : : struct btrfs_work *work)
178 : : {
179 [ # # ]: 0 : if (!workers->ordered)
180 : 0 : return;
181 : :
182 : 0 : set_bit(WORK_DONE_BIT, &work->flags);
183 : :
184 : : spin_lock(&workers->order_lock);
185 : :
186 : : while (1) {
187 [ # # ]: 0 : if (!list_empty(&workers->prio_order_list)) {
188 : 0 : work = list_entry(workers->prio_order_list.next,
189 : : struct btrfs_work, order_list);
190 [ # # ]: 0 : } else if (!list_empty(&workers->order_list)) {
191 : 0 : work = list_entry(workers->order_list.next,
192 : : struct btrfs_work, order_list);
193 : : } else {
194 : : break;
195 : : }
196 [ # # ]: 0 : if (!test_bit(WORK_DONE_BIT, &work->flags))
197 : : break;
198 : :
199 : : /* we are going to call the ordered done function, but
200 : : * we leave the work item on the list as a barrier so
201 : : * that later work items that are done don't have their
202 : : * functions called before this one returns
203 : : */
204 [ # # ]: 0 : if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
205 : : break;
206 : :
207 : : spin_unlock(&workers->order_lock);
208 : :
209 : 0 : work->ordered_func(work);
210 : :
211 : : /* now take the lock again and drop our item from the list */
212 : : spin_lock(&workers->order_lock);
213 : : list_del(&work->order_list);
214 : : spin_unlock(&workers->order_lock);
215 : :
216 : : /*
217 : : * we don't want to call the ordered free functions
218 : : * with the lock held though
219 : : */
220 : 0 : work->ordered_free(work);
221 : : spin_lock(&workers->order_lock);
222 : : }
223 : :
224 : : spin_unlock(&workers->order_lock);
225 : : }
226 : :
227 : 0 : static void put_worker(struct btrfs_worker_thread *worker)
228 : : {
229 [ # # ]: 0 : if (atomic_dec_and_test(&worker->refs))
230 : 0 : kfree(worker);
231 : 0 : }
232 : :
233 : 0 : static int try_worker_shutdown(struct btrfs_worker_thread *worker)
234 : : {
235 : : int freeit = 0;
236 : :
237 : : spin_lock_irq(&worker->lock);
238 : 0 : spin_lock(&worker->workers->lock);
239 [ # # ][ # # ]: 0 : if (worker->workers->num_workers > 1 &&
240 [ # # ]: 0 : worker->idle &&
241 [ # # ]: 0 : !worker->working &&
242 [ # # ]: 0 : !list_empty(&worker->worker_list) &&
243 [ # # ]: 0 : list_empty(&worker->prio_pending) &&
244 [ # # ]: 0 : list_empty(&worker->pending) &&
245 : 0 : atomic_read(&worker->num_pending) == 0) {
246 : : freeit = 1;
247 : : list_del_init(&worker->worker_list);
248 : 0 : worker->workers->num_workers--;
249 : : }
250 : 0 : spin_unlock(&worker->workers->lock);
251 : : spin_unlock_irq(&worker->lock);
252 : :
253 [ # # ]: 0 : if (freeit)
254 : 0 : put_worker(worker);
255 : 0 : return freeit;
256 : : }
257 : :
258 : 0 : static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
259 : : struct list_head *prio_head,
260 : : struct list_head *head)
261 : : {
262 : : struct btrfs_work *work = NULL;
263 : : struct list_head *cur = NULL;
264 : :
265 [ # # ]: 0 : if (!list_empty(prio_head))
266 : : cur = prio_head->next;
267 : :
268 : 0 : smp_mb();
269 [ # # ]: 0 : if (!list_empty(&worker->prio_pending))
270 : : goto refill;
271 : :
272 [ # # ]: 0 : if (!list_empty(head))
273 : : cur = head->next;
274 : :
275 [ # # ]: 0 : if (cur)
276 : : goto out;
277 : :
278 : : refill:
279 : : spin_lock_irq(&worker->lock);
280 : : list_splice_tail_init(&worker->prio_pending, prio_head);
281 : 0 : list_splice_tail_init(&worker->pending, head);
282 : :
283 [ # # ]: 0 : if (!list_empty(prio_head))
284 : : cur = prio_head->next;
285 [ # # ]: 0 : else if (!list_empty(head))
286 : : cur = head->next;
287 : : spin_unlock_irq(&worker->lock);
288 : :
289 [ # # ]: 0 : if (!cur)
290 : : goto out_fail;
291 : :
292 : : out:
293 : 0 : work = list_entry(cur, struct btrfs_work, list);
294 : :
295 : : out_fail:
296 : 0 : return work;
297 : : }
298 : :
299 : : /*
300 : : * main loop for servicing work items
301 : : */
302 : 0 : static int worker_loop(void *arg)
303 : : {
304 : 0 : struct btrfs_worker_thread *worker = arg;
305 : : struct list_head head;
306 : : struct list_head prio_head;
307 : : struct btrfs_work *work;
308 : :
309 : : INIT_LIST_HEAD(&head);
310 : : INIT_LIST_HEAD(&prio_head);
311 : :
312 : : do {
313 : : again:
314 : : while (1) {
315 : :
316 : :
317 : 0 : work = get_next_work(worker, &prio_head, &head);
318 [ # # ]: 0 : if (!work)
319 : : break;
320 : :
321 : : list_del(&work->list);
322 : 0 : clear_bit(WORK_QUEUED_BIT, &work->flags);
323 : :
324 : 0 : work->worker = worker;
325 : :
326 : 0 : work->func(work);
327 : :
328 : 0 : atomic_dec(&worker->num_pending);
329 : : /*
330 : : * unless this is an ordered work queue,
331 : : * 'work' was probably freed by func above.
332 : : */
333 : 0 : run_ordered_completions(worker->workers, work);
334 : :
335 : 0 : check_pending_worker_creates(worker);
336 : 0 : cond_resched();
337 : 0 : }
338 : :
339 : : spin_lock_irq(&worker->lock);
340 : 0 : check_idle_worker(worker);
341 : :
342 [ # # ]: 0 : if (freezing(current)) {
343 : 0 : worker->working = 0;
344 : : spin_unlock_irq(&worker->lock);
345 : : try_to_freeze();
346 : : } else {
347 : : spin_unlock_irq(&worker->lock);
348 [ # # ]: 0 : if (!kthread_should_stop()) {
349 : 0 : cpu_relax();
350 : : /*
351 : : * we've dropped the lock, did someone else
352 : : * jump_in?
353 : : */
354 : 0 : smp_mb();
355 [ # # ][ # # ]: 0 : if (!list_empty(&worker->pending) ||
356 : 0 : !list_empty(&worker->prio_pending))
357 : 0 : continue;
358 : :
359 : : /*
360 : : * this short schedule allows more work to
361 : : * come in without the queue functions
362 : : * needing to go through wake_up_process()
363 : : *
364 : : * worker->working is still 1, so nobody
365 : : * is going to try and wake us up
366 : : */
367 : 0 : schedule_timeout(1);
368 : 0 : smp_mb();
369 [ # # ][ # # ]: 0 : if (!list_empty(&worker->pending) ||
370 : : !list_empty(&worker->prio_pending))
371 : 0 : continue;
372 : :
373 [ # # ]: 0 : if (kthread_should_stop())
374 : : break;
375 : :
376 : : /* still no more work?, sleep for real */
377 : : spin_lock_irq(&worker->lock);
378 : 0 : set_current_state(TASK_INTERRUPTIBLE);
379 [ # # ][ # # ]: 0 : if (!list_empty(&worker->pending) ||
380 : : !list_empty(&worker->prio_pending)) {
381 : : spin_unlock_irq(&worker->lock);
382 : 0 : set_current_state(TASK_RUNNING);
383 : 0 : goto again;
384 : : }
385 : :
386 : : /*
387 : : * this makes sure we get a wakeup when someone
388 : : * adds something new to the queue
389 : : */
390 : 0 : worker->working = 0;
391 : : spin_unlock_irq(&worker->lock);
392 : :
393 [ # # ]: 0 : if (!kthread_should_stop()) {
394 : 0 : schedule_timeout(HZ * 120);
395 [ # # # # ]: 0 : if (!worker->working &&
396 : 0 : try_worker_shutdown(worker)) {
397 : : return 0;
398 : : }
399 : : }
400 : : }
401 : 0 : __set_current_state(TASK_RUNNING);
402 : : }
403 [ # # ]: 0 : } while (!kthread_should_stop());
404 : : return 0;
405 : : }
406 : :
407 : : /*
408 : : * this will wait for all the worker threads to shutdown
409 : : */
410 : 0 : void btrfs_stop_workers(struct btrfs_workers *workers)
411 : : {
412 : : struct list_head *cur;
413 : : struct btrfs_worker_thread *worker;
414 : : int can_stop;
415 : :
416 : : spin_lock_irq(&workers->lock);
417 : 0 : workers->stopping = 1;
418 : 0 : list_splice_init(&workers->idle_list, &workers->worker_list);
419 [ # # ]: 0 : while (!list_empty(&workers->worker_list)) {
420 : : cur = workers->worker_list.next;
421 : 0 : worker = list_entry(cur, struct btrfs_worker_thread,
422 : : worker_list);
423 : :
424 : 0 : atomic_inc(&worker->refs);
425 : 0 : workers->num_workers -= 1;
426 [ # # ]: 0 : if (!list_empty(&worker->worker_list)) {
427 : : list_del_init(&worker->worker_list);
428 : 0 : put_worker(worker);
429 : : can_stop = 1;
430 : : } else
431 : : can_stop = 0;
432 : : spin_unlock_irq(&workers->lock);
433 [ # # ]: 0 : if (can_stop)
434 : 0 : kthread_stop(worker->task);
435 : : spin_lock_irq(&workers->lock);
436 : 0 : put_worker(worker);
437 : : }
438 : : spin_unlock_irq(&workers->lock);
439 : 0 : }
440 : :
441 : : /*
442 : : * simple init on struct btrfs_workers
443 : : */
444 : 0 : void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
445 : : struct btrfs_workers *async_helper)
446 : : {
447 : 0 : workers->num_workers = 0;
448 : 0 : workers->num_workers_starting = 0;
449 : 0 : INIT_LIST_HEAD(&workers->worker_list);
450 : 0 : INIT_LIST_HEAD(&workers->idle_list);
451 : 0 : INIT_LIST_HEAD(&workers->order_list);
452 : 0 : INIT_LIST_HEAD(&workers->prio_order_list);
453 : 0 : spin_lock_init(&workers->lock);
454 : 0 : spin_lock_init(&workers->order_lock);
455 : 0 : workers->max_workers = max;
456 : 0 : workers->idle_thresh = 32;
457 : 0 : workers->name = name;
458 : 0 : workers->ordered = 0;
459 : 0 : workers->atomic_start_pending = 0;
460 : 0 : workers->atomic_worker_start = async_helper;
461 : 0 : workers->stopping = 0;
462 : 0 : }
463 : :
464 : : /*
465 : : * starts new worker threads. This does not enforce the max worker
466 : : * count in case you need to temporarily go past it.
467 : : */
468 : 0 : static int __btrfs_start_workers(struct btrfs_workers *workers)
469 : : {
470 : : struct btrfs_worker_thread *worker;
471 : : int ret = 0;
472 : :
473 : : worker = kzalloc(sizeof(*worker), GFP_NOFS);
474 [ # # ]: 0 : if (!worker) {
475 : : ret = -ENOMEM;
476 : : goto fail;
477 : : }
478 : :
479 : 0 : INIT_LIST_HEAD(&worker->pending);
480 : 0 : INIT_LIST_HEAD(&worker->prio_pending);
481 : 0 : INIT_LIST_HEAD(&worker->worker_list);
482 : 0 : spin_lock_init(&worker->lock);
483 : :
484 : 0 : atomic_set(&worker->num_pending, 0);
485 : 0 : atomic_set(&worker->refs, 1);
486 : 0 : worker->workers = workers;
487 : 0 : worker->task = kthread_create(worker_loop, worker,
488 : : "btrfs-%s-%d", workers->name,
489 : : workers->num_workers + 1);
490 [ # # ]: 0 : if (IS_ERR(worker->task)) {
491 : : ret = PTR_ERR(worker->task);
492 : 0 : goto fail;
493 : : }
494 : :
495 : : spin_lock_irq(&workers->lock);
496 [ # # ]: 0 : if (workers->stopping) {
497 : : spin_unlock_irq(&workers->lock);
498 : : ret = -EINVAL;
499 : : goto fail_kthread;
500 : : }
501 : 0 : list_add_tail(&worker->worker_list, &workers->idle_list);
502 : 0 : worker->idle = 1;
503 : 0 : workers->num_workers++;
504 : 0 : workers->num_workers_starting--;
505 [ # # ]: 0 : WARN_ON(workers->num_workers_starting < 0);
506 : : spin_unlock_irq(&workers->lock);
507 : :
508 : 0 : wake_up_process(worker->task);
509 : 0 : return 0;
510 : :
511 : : fail_kthread:
512 : 0 : kthread_stop(worker->task);
513 : : fail:
514 : 0 : kfree(worker);
515 : : spin_lock_irq(&workers->lock);
516 : 0 : workers->num_workers_starting--;
517 : : spin_unlock_irq(&workers->lock);
518 : 0 : return ret;
519 : : }
520 : :
521 : 0 : int btrfs_start_workers(struct btrfs_workers *workers)
522 : : {
523 : : spin_lock_irq(&workers->lock);
524 : 0 : workers->num_workers_starting++;
525 : : spin_unlock_irq(&workers->lock);
526 : 0 : return __btrfs_start_workers(workers);
527 : : }
528 : :
529 : : /*
530 : : * run through the list and find a worker thread that doesn't have a lot
531 : : * to do right now. This can return null if we aren't yet at the thread
532 : : * count limit and all of the threads are busy.
533 : : */
534 : 0 : static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
535 : : {
536 : : struct btrfs_worker_thread *worker;
537 : : struct list_head *next;
538 : : int enforce_min;
539 : :
540 : 0 : enforce_min = (workers->num_workers + workers->num_workers_starting) <
541 : 0 : workers->max_workers;
542 : :
543 : : /*
544 : : * if we find an idle thread, don't move it to the end of the
545 : : * idle list. This improves the chance that the next submission
546 : : * will reuse the same thread, and maybe catch it while it is still
547 : : * working
548 : : */
549 [ # # ]: 0 : if (!list_empty(&workers->idle_list)) {
550 : : next = workers->idle_list.next;
551 : : worker = list_entry(next, struct btrfs_worker_thread,
552 : : worker_list);
553 : 0 : return worker;
554 : : }
555 [ # # ][ # # ]: 0 : if (enforce_min || list_empty(&workers->worker_list))
556 : : return NULL;
557 : :
558 : : /*
559 : : * if we pick a busy task, move the task to the end of the list.
560 : : * hopefully this will keep things somewhat evenly balanced.
561 : : * Do the move in batches based on the sequence number. This groups
562 : : * requests submitted at roughly the same time onto the same worker.
563 : : */
564 : : next = workers->worker_list.next;
565 : 0 : worker = list_entry(next, struct btrfs_worker_thread, worker_list);
566 : 0 : worker->sequence++;
567 : :
568 [ # # ]: 0 : if (worker->sequence % workers->idle_thresh == 0)
569 : : list_move_tail(next, &workers->worker_list);
570 : 0 : return worker;
571 : : }
572 : :
573 : : /*
574 : : * selects a worker thread to take the next job. This will either find
575 : : * an idle worker, start a new worker up to the max count, or just return
576 : : * one of the existing busy workers.
577 : : */
578 : 0 : static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
579 : : {
580 : : struct btrfs_worker_thread *worker;
581 : : unsigned long flags;
582 : : struct list_head *fallback;
583 : : int ret;
584 : :
585 : 0 : spin_lock_irqsave(&workers->lock, flags);
586 : : again:
587 : 0 : worker = next_worker(workers);
588 : :
589 [ # # ]: 0 : if (!worker) {
590 [ # # ]: 0 : if (workers->num_workers + workers->num_workers_starting >=
591 : 0 : workers->max_workers) {
592 : : goto fallback;
593 [ # # ]: 0 : } else if (workers->atomic_worker_start) {
594 : 0 : workers->atomic_start_pending = 1;
595 : 0 : goto fallback;
596 : : } else {
597 : 0 : workers->num_workers_starting++;
598 : : spin_unlock_irqrestore(&workers->lock, flags);
599 : : /* we're below the limit, start another worker */
600 : 0 : ret = __btrfs_start_workers(workers);
601 : 0 : spin_lock_irqsave(&workers->lock, flags);
602 [ # # ]: 0 : if (ret)
603 : : goto fallback;
604 : : goto again;
605 : : }
606 : : }
607 : : goto found;
608 : :
609 : : fallback:
610 : : fallback = NULL;
611 : : /*
612 : : * we have failed to find any workers, just
613 : : * return the first one we can find.
614 : : */
615 [ # # ]: 0 : if (!list_empty(&workers->worker_list))
616 : : fallback = workers->worker_list.next;
617 [ # # ]: 0 : if (!list_empty(&workers->idle_list))
618 : : fallback = workers->idle_list.next;
619 [ # # ]: 0 : BUG_ON(!fallback);
620 : 0 : worker = list_entry(fallback,
621 : : struct btrfs_worker_thread, worker_list);
622 : : found:
623 : : /*
624 : : * this makes sure the worker doesn't exit before it is placed
625 : : * onto a busy/idle list
626 : : */
627 : 0 : atomic_inc(&worker->num_pending);
628 : : spin_unlock_irqrestore(&workers->lock, flags);
629 : 0 : return worker;
630 : : }
631 : :
632 : : /*
633 : : * btrfs_requeue_work just puts the work item back on the tail of the list
634 : : * it was taken from. It is intended for use with long running work functions
635 : : * that make some progress and want to give the cpu up for others.
636 : : */
637 : 0 : void btrfs_requeue_work(struct btrfs_work *work)
638 : : {
639 : 0 : struct btrfs_worker_thread *worker = work->worker;
640 : : unsigned long flags;
641 : : int wake = 0;
642 : :
643 [ # # ]: 0 : if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
644 : 0 : return;
645 : :
646 : 0 : spin_lock_irqsave(&worker->lock, flags);
647 [ # # ]: 0 : if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
648 : 0 : list_add_tail(&work->list, &worker->prio_pending);
649 : : else
650 : 0 : list_add_tail(&work->list, &worker->pending);
651 : 0 : atomic_inc(&worker->num_pending);
652 : :
653 : : /* by definition we're busy, take ourselves off the idle
654 : : * list
655 : : */
656 [ # # ]: 0 : if (worker->idle) {
657 : 0 : spin_lock(&worker->workers->lock);
658 : 0 : worker->idle = 0;
659 : 0 : list_move_tail(&worker->worker_list,
660 : 0 : &worker->workers->worker_list);
661 : 0 : spin_unlock(&worker->workers->lock);
662 : : }
663 [ # # ]: 0 : if (!worker->working) {
664 : : wake = 1;
665 : 0 : worker->working = 1;
666 : : }
667 : :
668 [ # # ]: 0 : if (wake)
669 : 0 : wake_up_process(worker->task);
670 : : spin_unlock_irqrestore(&worker->lock, flags);
671 : : }
672 : :
673 : 0 : void btrfs_set_work_high_prio(struct btrfs_work *work)
674 : : {
675 : 0 : set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
676 : 0 : }
677 : :
678 : : /*
679 : : * places a struct btrfs_work into the pending queue of one of the kthreads
680 : : */
681 : 0 : void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
682 : : {
683 : : struct btrfs_worker_thread *worker;
684 : : unsigned long flags;
685 : : int wake = 0;
686 : :
687 : : /* don't requeue something already on a list */
688 [ # # ]: 0 : if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
689 : 0 : return;
690 : :
691 : 0 : worker = find_worker(workers);
692 [ # # ]: 0 : if (workers->ordered) {
693 : : /*
694 : : * you're not allowed to do ordered queues from an
695 : : * interrupt handler
696 : : */
697 : : spin_lock(&workers->order_lock);
698 [ # # ]: 0 : if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
699 : 0 : list_add_tail(&work->order_list,
700 : : &workers->prio_order_list);
701 : : } else {
702 : 0 : list_add_tail(&work->order_list, &workers->order_list);
703 : : }
704 : : spin_unlock(&workers->order_lock);
705 : : } else {
706 : 0 : INIT_LIST_HEAD(&work->order_list);
707 : : }
708 : :
709 : 0 : spin_lock_irqsave(&worker->lock, flags);
710 : :
711 [ # # ]: 0 : if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
712 : 0 : list_add_tail(&work->list, &worker->prio_pending);
713 : : else
714 : 0 : list_add_tail(&work->list, &worker->pending);
715 : 0 : check_busy_worker(worker);
716 : :
717 : : /*
718 : : * avoid calling into wake_up_process if this thread has already
719 : : * been kicked
720 : : */
721 [ # # ]: 0 : if (!worker->working)
722 : : wake = 1;
723 : 0 : worker->working = 1;
724 : :
725 [ # # ]: 0 : if (wake)
726 : 0 : wake_up_process(worker->task);
727 : : spin_unlock_irqrestore(&worker->lock, flags);
728 : : }
|