diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index d1a3e6194817d07fdfd7d1e715294476ece9ff45..e6ae480b2bb9a5d97e88408a7d485e74c678eaea 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -25,6 +25,12 @@ enum arm_pmu_type { ARM_NUM_PMU_DEVICES, }; +enum arm_pmu_state { + ARM_PMU_STATE_OFF = 0, + ARM_PMU_STATE_GOING_DOWN, + ARM_PMU_STATE_RUNNING, +}; + /* * struct arm_pmu_platdata - ARM PMU platform data * @@ -115,6 +121,7 @@ struct arm_pmu { cpumask_t active_irqs; const char *name; int num_events; + int pmu_state; atomic_t active_events; struct mutex reserve_mutex; u64 max_period; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index aed4443011a5eb2074edcb604b0e0735146289b4..2f04348d62f07143ec836287883323387f3a3843 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -405,6 +405,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) int i, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; + /* + * If a cpu comes online during this function, do not enable its irq. + * If a cpu goes offline, it should disable its irq. + */ + armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN; + irqs = min(pmu_device->num_resources, num_possible_cpus()); for (i = 0; i < irqs; ++i) { @@ -414,6 +420,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu) armpmu->free_pmu_irq(irq); } + armpmu->pmu_state = ARM_PMU_STATE_OFF; + release_pmu(armpmu->type); } @@ -485,6 +493,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) cpumask_set_cpu(i, &armpmu->active_irqs); } + armpmu->pmu_state = ARM_PMU_STATE_RUNNING; return 0; } @@ -782,50 +791,41 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { int irq; + unsigned long masked_action = action & ~CPU_TASKS_FROZEN; + int ret = NOTIFY_DONE; - if (cpu_has_active_perf()) { - switch ((action & ~CPU_TASKS_FROZEN)) { - - case CPU_DOWN_PREPARE: - /* - * If this is on a multicore CPU, we need - * to disarm the PMU IRQ before disappearing. - */ - if (cpu_pmu && - cpu_pmu->plat_device->dev.platform_data) { + if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_STARTING)) { + return NOTIFY_DONE; + } + if (masked_action == CPU_STARTING) { + ret = NOTIFY_OK; + } + switch (masked_action) { + case CPU_DOWN_PREPARE: + if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) { + /* Disarm the PMU IRQ before disappearing. */ + if (cpu_pmu->plat_device) { irq = platform_get_irq(cpu_pmu->plat_device, 0); - smp_call_function_single((int)hcpu, - disable_irq_callback, &irq, 1); + smp_call_function_single((int)hcpu, disable_irq_callback, &irq, 1); } - return NOTIFY_DONE; + } + break; - case CPU_UP_PREPARE: - /* - * If this is on a multicore CPU, we need - * to arm the PMU IRQ before appearing. - */ - if (cpu_pmu && - cpu_pmu->plat_device->dev.platform_data) { + case CPU_STARTING: + /* Reset PMU to clear counters for ftrace buffer. */ + if (cpu_pmu->reset) { + cpu_pmu->reset(NULL); + } + if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) { + /* Arm the PMU IRQ before appearing. */ + if (cpu_pmu->plat_device) { irq = platform_get_irq(cpu_pmu->plat_device, 0); - smp_call_function_single((int)hcpu, - enable_irq_callback, &irq, 1); - } - return NOTIFY_DONE; - - case CPU_STARTING: - if (cpu_pmu && cpu_pmu->reset) { - cpu_pmu->reset(NULL); - return NOTIFY_OK; + enable_irq_callback(&irq); } - default: - return NOTIFY_DONE; } + break; } - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - return NOTIFY_OK; + return ret; } static void armpmu_update_counters(void) diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c index 8d65eea345e5bd45e9c35fabb37d6d1f77682cef..2b2de6b38eceb60be019d04c0edd802f89f40e14 100644 --- a/arch/arm/mach-msm/pmu.c +++ b/arch/arm/mach-msm/pmu.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ +#include <linux/irq.h> #include <linux/platform_device.h> #include <asm/pmu.h> #include <mach/irqs.h> @@ -41,12 +42,12 @@ static void multicore_free_irq(int irq) { int cpu; + struct irq_desc *desc = irq_to_desc(irq); - if (irq >= 0) { - for_each_cpu(cpu, cpu_online_mask) { + if ((irq >= 0) && desc) { + for_each_cpu(cpu, desc->percpu_enabled) smp_call_function_single(cpu, disable_irq_callback, &irq, 1); - } free_percpu_irq(irq, &pmu_irq_cookie); } }