From 71fe9db332172db3aad49a7a18c848f9ac8e14fb Mon Sep 17 00:00:00 2001 From: Serhei Makarov Date: Thu, 3 Nov 2022 12:56:11 -0400 Subject: [PATCH] Revert "runtime: stat: avoid allocating stat_data memory on offline CPUs" This reverts commit ba42203ae957bb62805e18eac30459eb74cde3d2. There are indications that on some non-x86 platforms (ppc64le) this patch may be causing problems i.e. 'sleeping function called in invalid context' warnings. Reverting for the release, may return this patch if I get a clearer idea of the cause of the problem. --- runtime/linux/stat_runtime.h | 30 +++++------------------------- runtime/stat.c | 8 -------- 2 files changed, 5 insertions(+), 33 deletions(-) diff --git a/runtime/linux/stat_runtime.h b/runtime/linux/stat_runtime.h index bded327ed..edb239ae5 100644 --- a/runtime/linux/stat_runtime.h +++ b/runtime/linux/stat_runtime.h @@ -21,9 +21,6 @@ #endif #define STAT_PUT_CPU() do {} while (0) -#define _stp_stat_get_agg(stat) ((stat)->agg) -#define _stp_stat_per_cpu_ptr(stat, cpu) *per_cpu_ptr((stat)->sd, (cpu)) - /** Stat struct. Maps do not need this */ typedef struct _Stat { struct _Hist hist; @@ -32,15 +29,12 @@ typedef struct _Stat { stat_data *agg; /* The stat data is per-cpu data. */ - stat_data **sd; + stat_data *sd; } *Stat; -static void _stp_stat_free(Stat st); - static Stat _stp_stat_alloc(size_t stat_data_size) { Stat st; - unsigned int cpu; if (stat_data_size < sizeof(stat_data)) return NULL; @@ -56,40 +50,26 @@ static Stat _stp_stat_alloc(size_t stat_data_size) return NULL; } - st->sd = _stp_alloc_percpu (sizeof (stat_data *)); + st->sd = _stp_alloc_percpu (stat_data_size); if (st->sd == NULL) { _stp_kfree (st->agg); _stp_kfree (st); return NULL; } - for_each_online_cpu(cpu) { - stat_data *sd = _stp_vzalloc_node(stat_data_size, - cpu_to_node(cpu)); - if (unlikely(sd == NULL)) { - _stp_stat_free (st); - return NULL; - } - *per_cpu_ptr(st->sd, cpu) = sd; - } - return st; } static void _stp_stat_free(Stat st) { if (st) { - unsigned int cpu; - - for_each_possible_cpu(cpu) { - stat_data *sd = _stp_stat_per_cpu_ptr(st, cpu); - if (likely(sd)) - _stp_vfree (sd); - } _stp_free_percpu (st->sd); _stp_kfree (st->agg); _stp_kfree (st); } } +#define _stp_stat_get_agg(stat) ((stat)->agg) +#define _stp_stat_per_cpu_ptr(stat, cpu) per_cpu_ptr((stat)->sd, (cpu)) + #endif /* _LINUX_STAT_RUNTIME_H_ */ diff --git a/runtime/stat.c b/runtime/stat.c index 8a91b0203..7fc8ac525 100644 --- a/runtime/stat.c +++ b/runtime/stat.c @@ -149,8 +149,6 @@ static inline void _stp_stat_add (Stat st, int64_t val, int stat_op_count, int stat_op_max, int stat_op_variance) { stat_data *sd = _stp_stat_per_cpu_ptr (st, STAT_GET_CPU()); - if (unlikely(sd == NULL)) - return; STAT_LOCK(sd); __stp_stat_add (&st->hist, sd, val, stat_op_count, stat_op_sum, stat_op_min, stat_op_max, stat_op_variance); @@ -190,8 +188,6 @@ static stat_data *_stp_stat_get (Stat st, int clear) for_each_possible_cpu(i) { stat_data *sd = _stp_stat_per_cpu_ptr (st, i); - if (unlikely(sd == NULL)) - continue; STAT_LOCK(sd); if (sd->count) { agg->shift = sd->shift; @@ -224,8 +220,6 @@ static stat_data *_stp_stat_get (Stat st, int clear) */ for_each_possible_cpu(i) { sd = _stp_stat_per_cpu_ptr (st, i); - if (unlikely(sd == NULL)) - continue; STAT_LOCK(sd); if (sd->count) { S1 += sd->count * (sd->avg_s - agg->avg_s) * (sd->avg_s - agg->avg_s); @@ -269,8 +263,6 @@ static void _stp_stat_clear (Stat st) for_each_possible_cpu(i) { stat_data *sd = _stp_stat_per_cpu_ptr (st, i); - if (unlikely(sd == NULL)) - continue; STAT_LOCK(sd); _stp_stat_clear_data (st, sd); STAT_UNLOCK(sd); -- 2.43.5