diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index aaf6793ededa..340538df10b0 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -95,12 +95,20 @@ u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; - u64 now; + u64 now, prev_time; raw_local_irq_save(flags); this_cpu = raw_smp_processor_id(); + + prev_time = READ_ONCE(trace_clock_struct.prev_time); + /* order the updates between prev_time and now */ + smp_rmb(); now = sched_clock_cpu(this_cpu); + + if ((s64)(now - prev_time) < 0) + now = prev_time + 1; + /* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: @@ -108,20 +116,21 @@ u64 notrace trace_clock_global(void) if (unlikely(in_nmi())) goto out; - arch_spin_lock(&trace_clock_struct.lock); - - /* - * TODO: if this happens often then maybe we should reset - * my_scd->clock to prev_time+1, to make sure - * we start ticking with the local clock from now on? - */ - if ((s64)(now - trace_clock_struct.prev_time) < 0) - now = trace_clock_struct.prev_time + 1; - - trace_clock_struct.prev_time = now; - - arch_spin_unlock(&trace_clock_struct.lock); - + /* Tracing can cause strange recursion, always use a try lock */ + if (arch_spin_trylock(&trace_clock_struct.lock)) { + /* + * TODO: if this happens often then maybe we should reset + * my_scd->clock to prev_time+1, to make sure + * we start ticking with the local clock from now on? + */ + prev_time = READ_ONCE(trace_clock_struct.prev_time); + if ((s64)(now - prev_time) < 0) + now = prev_time + 1; + + trace_clock_struct.prev_time = now; + /* The unlock should be the wmb for the above rmb */ + arch_spin_unlock(&trace_clock_struct.lock); + } out: raw_local_irq_restore(flags);