Branch data Line data Source code
1 : :
2 : : #ifdef CONFIG_SCHEDSTATS
3 : :
4 : : /*
5 : : * Expects runqueue lock to be held for atomicity of update
6 : : */
7 : : static inline void
8 : : rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9 : : {
10 [ + ]: 14810894 : if (rq) {
11 : 14813900 : rq->rq_sched_info.run_delay += delta;
12 : 14813900 : rq->rq_sched_info.pcount++;
13 : : }
14 : : }
15 : :
16 : : /*
17 : : * Expects runqueue lock to be held for atomicity of update
18 : : */
19 : : static inline void
20 : : rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21 : : {
22 [ + + ]: 14813036 : if (rq)
23 : 14812888 : rq->rq_cpu_time += delta;
24 : : }
25 : :
26 : : static inline void
27 : : rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28 : : {
29 [ + ]: 10909777 : if (rq)
30 : 10909815 : rq->rq_sched_info.run_delay += delta;
31 : : }
32 : : # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33 : : # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
34 : : # define schedstat_set(var, val) do { var = (val); } while (0)
35 : : #else /* !CONFIG_SCHEDSTATS */
36 : : static inline void
37 : : rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38 : : {}
39 : : static inline void
40 : : rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
41 : : {}
42 : : static inline void
43 : : rq_sched_info_depart(struct rq *rq, unsigned long long delta)
44 : : {}
45 : : # define schedstat_inc(rq, field) do { } while (0)
46 : : # define schedstat_add(rq, field, amt) do { } while (0)
47 : : # define schedstat_set(var, val) do { } while (0)
48 : : #endif
49 : :
50 : : #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
51 : : static inline void sched_info_reset_dequeued(struct task_struct *t)
52 : : {
53 : 25720671 : t->sched_info.last_queued = 0;
54 : : }
55 : :
56 : : /*
57 : : * We are interested in knowing how long it was from the *first* time a
58 : : * task was queued to the time that it finally hit a cpu, we call this routine
59 : : * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 : : * delta taken on each cpu would annul the skew.
61 : : */
62 : 10909777 : static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
63 : : {
64 : : unsigned long long now = rq_clock(rq), delta = 0;
65 : :
66 : : if (unlikely(sched_info_on()))
67 [ + + ]: 10909777 : if (t->sched_info.last_queued)
68 : 35696 : delta = now - t->sched_info.last_queued;
69 : : sched_info_reset_dequeued(t);
70 : 10909777 : t->sched_info.run_delay += delta;
71 : :
72 : : rq_sched_info_dequeued(rq, delta);
73 : : }
74 : :
75 : : /*
76 : : * Called when a task finally hits the cpu. We can now calculate how
77 : : * long it was waiting to run. We also note when it began so that we
78 : : * can keep stats on how long its timeslice is.
79 : : */
80 : 0 : static void sched_info_arrive(struct rq *rq, struct task_struct *t)
81 : : {
82 : : unsigned long long now = rq_clock(rq), delta = 0;
83 : :
84 [ + ]: 14810894 : if (t->sched_info.last_queued)
85 : 14811707 : delta = now - t->sched_info.last_queued;
86 : : sched_info_reset_dequeued(t);
87 : 14810894 : t->sched_info.run_delay += delta;
88 : 14810894 : t->sched_info.last_arrival = now;
89 : 14810894 : t->sched_info.pcount++;
90 : :
91 : : rq_sched_info_arrive(rq, delta);
92 : 0 : }
93 : :
94 : : /*
95 : : * This function is only called from enqueue_task(), but also only updates
96 : : * the timestamp if it is already not set. It's assumed that
97 : : * sched_info_dequeued() will clear that stamp when appropriate.
98 : : */
99 : 10909827 : static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
100 : : {
101 : : if (unlikely(sched_info_on()))
102 [ + + + ]: 14850771 : if (!t->sched_info.last_queued)
103 : 14850671 : t->sched_info.last_queued = rq_clock(rq);
104 : : }
105 : :
106 : : /*
107 : : * Called when a process ceases being the active-running process involuntarily
108 : : * due, typically, to expiring its time slice (this may also be called when
109 : : * switching to the idle task). Now we can calculate how long we ran.
110 : : * Also, if the process is still in the TASK_RUNNING state, call
111 : : * sched_info_queued() to mark that it has now again started waiting on
112 : : * the runqueue.
113 : : */
114 : 14813036 : static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
115 : : {
116 : 14813036 : unsigned long long delta = rq_clock(rq) -
117 : 14813036 : t->sched_info.last_arrival;
118 : :
119 : : rq_sched_info_depart(rq, delta);
120 : :
121 [ + + ]: 14813036 : if (t->state == TASK_RUNNING)
122 : : sched_info_queued(rq, t);
123 : : }
124 : :
125 : : /*
126 : : * Called when tasks are switched involuntarily due, typically, to expiring
127 : : * their time slice. (This may also be called when switching to or from
128 : : * the idle task.) We are only called when prev != next.
129 : : */
130 : : static inline void
131 : : __sched_info_switch(struct rq *rq,
132 : : struct task_struct *prev, struct task_struct *next)
133 : : {
134 : : /*
135 : : * prev now departs the cpu. It's not interesting to record
136 : : * stats about how efficient we were at scheduling the idle
137 : : * process, however.
138 : : */
139 [ + + ]: 138397305 : if (prev != rq->idle)
140 : : sched_info_depart(rq, prev);
141 : :
142 [ + + ]: 138397305 : if (next != rq->idle)
143 : 14812106 : sched_info_arrive(rq, next);
144 : : }
145 : : static inline void
146 : : sched_info_switch(struct rq *rq,
147 : : struct task_struct *prev, struct task_struct *next)
148 : : {
149 : : if (unlikely(sched_info_on()))
150 : : __sched_info_switch(rq, prev, next);
151 : : }
152 : : #else
153 : : #define sched_info_queued(rq, t) do { } while (0)
154 : : #define sched_info_reset_dequeued(t) do { } while (0)
155 : : #define sched_info_dequeued(rq, t) do { } while (0)
156 : : #define sched_info_depart(rq, t) do { } while (0)
157 : : #define sched_info_arrive(rq, next) do { } while (0)
158 : : #define sched_info_switch(rq, t, next) do { } while (0)
159 : : #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
160 : :
161 : : /*
162 : : * The following are functions that support scheduler-internal time accounting.
163 : : * These functions are generally called at the timer tick. None of this depends
164 : : * on CONFIG_SCHEDSTATS.
165 : : */
166 : :
167 : : /**
168 : : * cputimer_running - return true if cputimer is running
169 : : *
170 : : * @tsk: Pointer to target task.
171 : : */
172 : : static inline bool cputimer_running(struct task_struct *tsk)
173 : :
174 : : {
175 : : struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
176 : :
177 [ + + ][ # # ]: 265808431 : if (!cputimer->running)
[ + + ]
178 : : return false;
179 : :
180 : : /*
181 : : * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
182 : : * in __exit_signal(), we won't account to the signal struct further
183 : : * cputime consumed by that task, even though the task can still be
184 : : * ticking after __exit_signal().
185 : : *
186 : : * In order to keep a consistent behaviour between thread group cputime
187 : : * and thread group cputimer accounting, lets also ignore the cputime
188 : : * elapsing after __exit_signal() in any thread group timer running.
189 : : *
190 : : * This makes sure that POSIX CPU clocks and timers are synchronized, so
191 : : * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
192 : : * clock delta is behind the expiring timer value.
193 : : */
194 [ + - ][ # # ]: 4062 : if (unlikely(!tsk->sighand))
[ + - ]
195 : : return false;
196 : :
197 : : return true;
198 : : }
199 : :
200 : : /**
201 : : * account_group_user_time - Maintain utime for a thread group.
202 : : *
203 : : * @tsk: Pointer to task structure.
204 : : * @cputime: Time value by which to increment the utime field of the
205 : : * thread_group_cputime structure.
206 : : *
207 : : * If thread group time is being maintained, get the structure for the
208 : : * running CPU and update the utime field there.
209 : : */
210 : : static inline void account_group_user_time(struct task_struct *tsk,
211 : : cputime_t cputime)
212 : : {
213 : 6644533 : struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
214 : :
215 [ # # ][ + + ]: 6644533 : if (!cputimer_running(tsk))
216 : : return;
217 : :
218 : 1160 : raw_spin_lock(&cputimer->lock);
219 : 1160 : cputimer->cputime.utime += cputime;
220 : : raw_spin_unlock(&cputimer->lock);
221 : : }
222 : :
223 : : /**
224 : : * account_group_system_time - Maintain stime for a thread group.
225 : : *
226 : : * @tsk: Pointer to task structure.
227 : : * @cputime: Time value by which to increment the stime field of the
228 : : * thread_group_cputime structure.
229 : : *
230 : : * If thread group time is being maintained, get the structure for the
231 : : * running CPU and update the stime field there.
232 : : */
233 : : static inline void account_group_system_time(struct task_struct *tsk,
234 : : cputime_t cputime)
235 : : {
236 : 0 : struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
237 : :
238 [ # # ]: 0 : if (!cputimer_running(tsk))
239 : : return;
240 : :
241 : 840 : raw_spin_lock(&cputimer->lock);
242 : 840 : cputimer->cputime.stime += cputime;
243 : : raw_spin_unlock(&cputimer->lock);
244 : : }
245 : :
246 : : /**
247 : : * account_group_exec_runtime - Maintain exec runtime for a thread group.
248 : : *
249 : : * @tsk: Pointer to task structure.
250 : : * @ns: Time value by which to increment the sum_exec_runtime field
251 : : * of the thread_group_cputime structure.
252 : : *
253 : : * If thread group time is being maintained, get the structure for the
254 : : * running CPU and update the sum_exec_runtime field there.
255 : : */
256 : : static inline void account_group_exec_runtime(struct task_struct *tsk,
257 : : unsigned long long ns)
258 : : {
259 : 121799284 : struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
260 : :
261 [ + + ]: 121799284 : if (!cputimer_running(tsk))
262 : : return;
263 : :
264 : 2062 : raw_spin_lock(&cputimer->lock);
265 : 2062 : cputimer->cputime.sum_exec_runtime += ns;
266 : : raw_spin_unlock(&cputimer->lock);
267 : : }
|