Skip to content
Snippets Groups Projects
Commit f8da52fb authored by Yabin Cui's avatar Yabin Cui
Browse files

Perf: Fix the condition to toggle PMU IRQ when CPUs are hotplugged.

Bug: 19863147

Refactor the interrupt disabling so that interrupts are disabled when
a cpu is hotplugged out, even if there are no perf events on that cpu,
but it holds the PMU irq.

(partially cherry-picked from "Perf: interrupt disable without bringing cpus up")

Change-Id: I9253d6a3bfa51b4b71d3ca51d4c306dd49ca5ef7
parent fe2cff85
Branches
Tags
No related merge requests found
...@@ -25,6 +25,12 @@ enum arm_pmu_type { ...@@ -25,6 +25,12 @@ enum arm_pmu_type {
ARM_NUM_PMU_DEVICES, ARM_NUM_PMU_DEVICES,
}; };
enum arm_pmu_state {
ARM_PMU_STATE_OFF = 0,
ARM_PMU_STATE_GOING_DOWN,
ARM_PMU_STATE_RUNNING,
};
/* /*
* struct arm_pmu_platdata - ARM PMU platform data * struct arm_pmu_platdata - ARM PMU platform data
* *
...@@ -115,6 +121,7 @@ struct arm_pmu { ...@@ -115,6 +121,7 @@ struct arm_pmu {
cpumask_t active_irqs; cpumask_t active_irqs;
const char *name; const char *name;
int num_events; int num_events;
int pmu_state;
atomic_t active_events; atomic_t active_events;
struct mutex reserve_mutex; struct mutex reserve_mutex;
u64 max_period; u64 max_period;
......
...@@ -405,6 +405,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) ...@@ -405,6 +405,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
int i, irq, irqs; int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device; struct platform_device *pmu_device = armpmu->plat_device;
/*
* If a cpu comes online during this function, do not enable its irq.
* If a cpu goes offline, it should disable its irq.
*/
armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
irqs = min(pmu_device->num_resources, num_possible_cpus()); irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) { for (i = 0; i < irqs; ++i) {
...@@ -414,6 +420,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu) ...@@ -414,6 +420,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
armpmu->free_pmu_irq(irq); armpmu->free_pmu_irq(irq);
} }
armpmu->pmu_state = ARM_PMU_STATE_OFF;
release_pmu(armpmu->type); release_pmu(armpmu->type);
} }
...@@ -485,6 +493,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -485,6 +493,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
cpumask_set_cpu(i, &armpmu->active_irqs); cpumask_set_cpu(i, &armpmu->active_irqs);
} }
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
return 0; return 0;
} }
...@@ -782,50 +791,41 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b, ...@@ -782,50 +791,41 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
int irq; int irq;
unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
int ret = NOTIFY_DONE;
if (cpu_has_active_perf()) { if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_STARTING)) {
switch ((action & ~CPU_TASKS_FROZEN)) { return NOTIFY_DONE;
}
if (masked_action == CPU_STARTING) {
ret = NOTIFY_OK;
}
switch (masked_action) {
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
/* if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
* If this is on a multicore CPU, we need /* Disarm the PMU IRQ before disappearing. */
* to disarm the PMU IRQ before disappearing. if (cpu_pmu->plat_device) {
*/
if (cpu_pmu &&
cpu_pmu->plat_device->dev.platform_data) {
irq = platform_get_irq(cpu_pmu->plat_device, 0); irq = platform_get_irq(cpu_pmu->plat_device, 0);
smp_call_function_single((int)hcpu, smp_call_function_single((int)hcpu, disable_irq_callback, &irq, 1);
disable_irq_callback, &irq, 1);
} }
return NOTIFY_DONE;
case CPU_UP_PREPARE:
/*
* If this is on a multicore CPU, we need
* to arm the PMU IRQ before appearing.
*/
if (cpu_pmu &&
cpu_pmu->plat_device->dev.platform_data) {
irq = platform_get_irq(cpu_pmu->plat_device, 0);
smp_call_function_single((int)hcpu,
enable_irq_callback, &irq, 1);
} }
return NOTIFY_DONE; break;
case CPU_STARTING: case CPU_STARTING:
if (cpu_pmu && cpu_pmu->reset) { /* Reset PMU to clear counters for ftrace buffer. */
if (cpu_pmu->reset) {
cpu_pmu->reset(NULL); cpu_pmu->reset(NULL);
return NOTIFY_OK;
} }
default: if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
return NOTIFY_DONE; /* Arm the PMU IRQ before appearing. */
if (cpu_pmu->plat_device) {
irq = platform_get_irq(cpu_pmu->plat_device, 0);
enable_irq_callback(&irq);
} }
} }
break;
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) }
return NOTIFY_DONE; return ret;
return NOTIFY_OK;
} }
static void armpmu_update_counters(void) static void armpmu_update_counters(void)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <linux/irq.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/pmu.h> #include <asm/pmu.h>
#include <mach/irqs.h> #include <mach/irqs.h>
...@@ -41,12 +42,12 @@ static void ...@@ -41,12 +42,12 @@ static void
multicore_free_irq(int irq) multicore_free_irq(int irq)
{ {
int cpu; int cpu;
struct irq_desc *desc = irq_to_desc(irq);
if (irq >= 0) { if ((irq >= 0) && desc) {
for_each_cpu(cpu, cpu_online_mask) { for_each_cpu(cpu, desc->percpu_enabled)
smp_call_function_single(cpu, smp_call_function_single(cpu,
disable_irq_callback, &irq, 1); disable_irq_callback, &irq, 1);
}
free_percpu_irq(irq, &pmu_irq_cookie); free_percpu_irq(irq, &pmu_irq_cookie);
} }
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment