From 0cd1ef8b0877fce91ebe138e895bf228c618beef Mon Sep 17 00:00:00 2001
From: Anders Roxell <anders.roxell@linaro.org>
Date: Mon, 27 May 2019 09:51:54 +0200
Subject: [PATCH] seqlock: mark functions as __always_inline to not be
 traceable

With the function graph tracer, each traced function calls sched_clock()
through trace_clock_local(), or it can call __ktime_get_fast_ns()
through ktime_get_mono_fast_ns(); to take a timestamp. We must ensure
that these functions raw_read_seqcount()/read_seqcount_retry() and
raw_read_seqcount_latch()/read_seqcount_retry() do not in turn trigger
the graph tracer.  All 4 functions is marked as inline. However, if
CONFIG_OPTIMIZE_INLINING is set that may make the two functions tracable
which they shouldn't.

Rework so that functions that can call either sched_clock() through
trace_clock_local, or call __ktime_get_fast_ns() through
ktime_get_mono_fast_ns(), are marked with __always_inline would
annotate both raw_read_seqcount()/read_seqcount_retry() and
raw_read_seqcount_latch()/read_seqcount_retry(). They will now be
inlined even if CONFIG_OPTIMIZE_INLINING is turned on.

Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
---
 include/linux/seqlock.h | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..06cc474fb79e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -127,7 +127,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
  * seqcount without any lockdep checking and without checking or
  * masking the LSB. Calling code is responsible for handling that.
  */
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
+static __always_inline unsigned raw_read_seqcount(const seqcount_t *s)
 {
 	unsigned ret = READ_ONCE(s->sequence);
 	smp_rmb();
@@ -200,7 +200,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
  * Use carefully, only in critical code, and comment how the barrier is
  * provided.
  */
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+static
+inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
 	return unlikely(s->sequence != start);
 }
@@ -215,7 +216,8 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
  * If the critical section was invalid, it must be ignored (and typically
  * retried).
  */
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+static
+__always_inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
 	smp_rmb();
 	return __read_seqcount_retry(s, start);
@@ -276,7 +278,7 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
 	s->sequence++;
 }
 
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+static __always_inline int raw_read_seqcount_latch(seqcount_t *s)
 {
 	/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
 	int seq = READ_ONCE(s->sequence); /* ^^^ */
-- 
2.20.1

