diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 7f34d683f4075689039c9cc21094f947e5cf3f6b..9ce798816e84f9d10a9a2a10d01163e99b376258 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -449,14 +449,24 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats) void cpufreq_task_stats_init(struct task_struct *p) { - size_t alloc_size; - void *temp; unsigned long flags; - spin_lock_irqsave(&task_time_in_state_lock, flags); p->time_in_state = NULL; spin_unlock_irqrestore(&task_time_in_state_lock, flags); WRITE_ONCE(p->max_state, 0); + spin_lock_irqsave(&task_concurrent_active_time_lock, flags); + p->concurrent_active_time = NULL; + spin_unlock_irqrestore(&task_concurrent_active_time_lock, flags); + spin_lock_irqsave(&task_concurrent_policy_time_lock, flags); + p->concurrent_policy_time = NULL; + spin_unlock_irqrestore(&task_concurrent_policy_time_lock, flags); +} + +void cpufreq_task_stats_alloc(struct task_struct *p) +{ + size_t alloc_size; + void *temp; + unsigned long flags; if (!cpufreq_stats_initialized) return; @@ -1008,6 +1018,13 @@ static int process_notifier(struct notifier_block *self, return NOTIFY_OK; } +void cpufreq_task_stats_free(struct task_struct *p) +{ + kfree(p->time_in_state); + kfree(p->concurrent_active_time); + kfree(p->concurrent_policy_time); +} + static const struct seq_operations uid_time_in_state_seq_ops = { .start = uid_seq_start, .next = uid_seq_next, diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index d89bbfdb70d4a67e14c93eaa46e5fb5d4b2c6717..0a035ccb7585c77af9b399b8c95671d123c48f89 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -717,6 +717,8 @@ unsigned long cpufreq_scale_max_freq_capacity(int cpu); void acct_update_power(struct task_struct *p, cputime_t cputime); void cpufreq_task_stats_init(struct task_struct *p); +void cpufreq_task_stats_alloc(struct task_struct *p); +void cpufreq_task_stats_free(struct task_struct *p); void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end); int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *p); @@ -729,6 +731,8 @@ int single_uid_time_in_state_open(struct inode *inode, struct file *file); static inline void acct_update_power(struct task_struct *p, cputime_t cputime) {} static inline void cpufreq_task_stats_init(struct task_struct *p) {} +static inline void cpufreq_task_stats_alloc(struct task_struct *p) {} +static inline void cpufreq_task_stats_free(struct task_struct *p) {} static inline void cpufreq_task_stats_exit(struct task_struct *p) {} static inline void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end) {} diff --git a/kernel/fork.c b/kernel/fork.c index 2c3077d1890bc24a4fe8955cb9f013ac99d8cfed..eeeb410abe3882ae044ba192e7753df93f1a49b8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -78,6 +78,7 @@ #include <linux/compiler.h> #include <linux/sysctl.h> #include <linux/kcov.h> +#include <linux/cpufreq.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -227,6 +228,9 @@ static void account_kernel_stack(unsigned long *stack, int account) void free_task(struct task_struct *tsk) { +#ifdef CONFIG_CPU_FREQ_STAT + cpufreq_task_stats_free(tsk); +#endif account_kernel_stack(tsk->stack, -1); arch_release_thread_stack(tsk->stack); free_thread_stack(tsk->stack); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 414a36a7bcb774d8e5aececdd7a2ed7c27729736..7004fb5e92968ebbd5d1806840228cc4f086a37f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2179,6 +2179,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif +#ifdef CONFIG_CPU_FREQ_STAT + cpufreq_task_stats_init(p); +#endif + RB_CLEAR_NODE(&p->dl.rb_node); init_dl_task_timer(&p->dl); __dl_clear_params(p); @@ -2256,11 +2260,12 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) unsigned long flags; int cpu = get_cpu(); + __sched_fork(clone_flags, p); + #ifdef CONFIG_CPU_FREQ_STAT - cpufreq_task_stats_init(p); + cpufreq_task_stats_alloc(p); #endif - __sched_fork(clone_flags, p); /* * We mark the process as running here. This guarantees that * nobody will actually run it, and a signal or other external