From dae4b9c16d218581bb759d4d8d1cbedae122b4c0 Mon Sep 17 00:00:00 2001 From: Praveen Chidambaram <pchidamb@codeaurora.org> Date: Mon, 11 Mar 2013 14:50:06 -0600 Subject: [PATCH] Revert "msm: cpufreq: Remove cross-calling limitation" This reverts commit be01a17afb6b6e85456217caa1ac11eb0b6674e1. Re-introduce cross calling in cpufreq to prevent race conditions in the hardware when changing PLLs. A core could be in retention, while another core, could try and set the frequency of the core in retention, causing a change in PLL and when the core exits retention, the PLL mismatch could hang the SPM state machine. Change-Id: I99b454a0f146652f2d8a369f79f6d35748160dcb Signed-off-by: Praveen Chidambaram <pchidamb@codeaurora.org> --- arch/arm/mach-msm/cpufreq.c | 54 +++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c index 9d9ea5cb3c9b..06627263e071 100644 --- a/arch/arm/mach-msm/cpufreq.c +++ b/arch/arm/mach-msm/cpufreq.c @@ -32,6 +32,17 @@ #include "acpuclock.h" +struct cpufreq_work_struct { + struct work_struct work; + struct cpufreq_policy *policy; + struct completion complete; + int frequency; + int status; +}; + +static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work); +static struct workqueue_struct *msm_cpufreq_wq; + struct cpufreq_suspend_t { struct mutex suspend_mutex; int device_suspended; @@ -78,6 +89,15 @@ static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq) return ret; } +static void set_cpu_work(struct work_struct *work) +{ + struct cpufreq_work_struct *cpu_work = + container_of(work, struct cpufreq_work_struct, work); + + cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency); + complete(&cpu_work->complete); +} + static int msm_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -86,11 +106,17 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy, int index; struct cpufreq_frequency_table *table; + struct cpufreq_work_struct *cpu_work = NULL; + cpumask_var_t mask; + if (!cpu_active(policy->cpu)) { pr_info("cpufreq: cpu %d is not active.\n", policy->cpu); return -ENODEV; } + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) { @@ -112,9 +138,27 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy, policy->cpu, target_freq, relation, policy->min, policy->max, table[index].frequency); - ret = set_cpu_freq(policy, table[index].frequency); + cpu_work = &per_cpu(cpufreq_work, policy->cpu); + cpu_work->policy = policy; + cpu_work->frequency = table[index].frequency; + cpu_work->status = -ENODEV; + + cpumask_clear(mask); + cpumask_set_cpu(policy->cpu, mask); + if (cpumask_equal(mask, ¤t->cpus_allowed)) { + ret = set_cpu_freq(cpu_work->policy, cpu_work->frequency); + goto done; + } else { + cancel_work_sync(&cpu_work->work); + INIT_COMPLETION(cpu_work->complete); + queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work); + wait_for_completion(&cpu_work->complete); + } + + ret = cpu_work->status; done: + free_cpumask_var(mask); mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex); return ret; } @@ -197,6 +241,7 @@ static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy) int cur_freq; int index; struct cpufreq_frequency_table *table; + struct cpufreq_work_struct *cpu_work = NULL; table = cpufreq_frequency_get_table(policy->cpu); if (table == NULL) @@ -225,7 +270,7 @@ static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy) CPUFREQ_RELATION_H, &index) && cpufreq_frequency_table_target(policy, table, cur_freq, CPUFREQ_RELATION_L, &index)) { - pr_info("%s: cpu%d at invalid freq: %d\n", __func__, + pr_info("cpufreq: cpu%d at invalid freq: %d\n", policy->cpu, cur_freq); return -EINVAL; } @@ -246,6 +291,10 @@ static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = acpuclk_get_switch_time() * NSEC_PER_USEC; + cpu_work = &per_cpu(cpufreq_work, policy->cpu); + INIT_WORK(&cpu_work->work, set_cpu_work); + init_completion(&cpu_work->complete); + return 0; } @@ -333,6 +382,7 @@ static int __init msm_cpufreq_register(void) per_cpu(cpufreq_suspend, cpu).device_suspended = 0; } + msm_cpufreq_wq = create_workqueue("msm-cpufreq"); register_hotcpu_notifier(&msm_cpufreq_cpu_notifier); return cpufreq_register_driver(&msm_cpufreq_driver); -- GitLab