From f8da52fb32981cddfe7b0d66180ea7fbda933284 Mon Sep 17 00:00:00 2001
From: Yabin Cui <yabinc@google.com>
Date: Thu, 28 May 2015 20:16:08 -0700
Subject: [PATCH] Perf: Fix the condition to toggle PMU IRQ when CPUs are
 hotplugged.

Bug: 19863147

Refactor the interrupt disabling so that interrupts are disabled when
a cpu is hotplugged out, even if there are no perf events on that cpu,
but it holds the PMU irq.

(partially cherry-picked from "Perf: interrupt disable without bringing cpus up")

Change-Id: I9253d6a3bfa51b4b71d3ca51d4c306dd49ca5ef7
---
 arch/arm/include/asm/pmu.h   |  7 ++++
 arch/arm/kernel/perf_event.c | 72 ++++++++++++++++++------------------
 arch/arm/mach-msm/pmu.c      |  7 ++--
 3 files changed, 47 insertions(+), 39 deletions(-)

diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index d1a3e6194817..e6ae480b2bb9 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -25,6 +25,12 @@ enum arm_pmu_type {
 	ARM_NUM_PMU_DEVICES,
 };
 
+enum arm_pmu_state {
+	ARM_PMU_STATE_OFF	= 0,
+	ARM_PMU_STATE_GOING_DOWN,
+	ARM_PMU_STATE_RUNNING,
+};
+
 /*
  * struct arm_pmu_platdata - ARM PMU platform data
  *
@@ -115,6 +121,7 @@ struct arm_pmu {
 	cpumask_t	active_irqs;
 	const char	*name;
 	int		num_events;
+	int		pmu_state;
 	atomic_t	active_events;
 	struct mutex	reserve_mutex;
 	u64		max_period;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index aed4443011a5..2f04348d62f0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -405,6 +405,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
 	int i, irq, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
+	/*
+	 * If a cpu comes online during this function, do not enable its irq.
+	 * If a cpu goes offline, it should disable its irq.
+	 */
+	armpmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
 	for (i = 0; i < irqs; ++i) {
@@ -414,6 +420,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
 		armpmu->free_pmu_irq(irq);
 	}
 
+	armpmu->pmu_state = ARM_PMU_STATE_OFF;
+
 	release_pmu(armpmu->type);
 }
 
@@ -485,6 +493,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
 
 		cpumask_set_cpu(i, &armpmu->active_irqs);
 	}
+	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
 
 	return 0;
 }
@@ -782,50 +791,41 @@ static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
 					unsigned long action, void *hcpu)
 {
 	int irq;
+	unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
+	int ret = NOTIFY_DONE;
 
-	if (cpu_has_active_perf()) {
-		switch ((action & ~CPU_TASKS_FROZEN)) {
-
-		case CPU_DOWN_PREPARE:
-			/*
-			 * If this is on a multicore CPU, we need
-			 * to disarm the PMU IRQ before disappearing.
-			 */
-			if (cpu_pmu &&
-				cpu_pmu->plat_device->dev.platform_data) {
+	if ((masked_action != CPU_DOWN_PREPARE) && (masked_action != CPU_STARTING)) {
+		return NOTIFY_DONE;
+	}
+	if (masked_action == CPU_STARTING) {
+		ret = NOTIFY_OK;
+	}
+	switch (masked_action) {
+	case CPU_DOWN_PREPARE:
+		if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
+			/* Disarm the PMU IRQ before disappearing. */
+			if (cpu_pmu->plat_device) {
 				irq = platform_get_irq(cpu_pmu->plat_device, 0);
-				smp_call_function_single((int)hcpu,
-						disable_irq_callback, &irq, 1);
+				smp_call_function_single((int)hcpu, disable_irq_callback, &irq, 1);
 			}
-			return NOTIFY_DONE;
+		}
+		break;
 
-		case CPU_UP_PREPARE:
-			/*
-			 * If this is on a multicore CPU, we need
-			 * to arm the PMU IRQ before appearing.
-			 */
-			if (cpu_pmu &&
-				cpu_pmu->plat_device->dev.platform_data) {
+	case CPU_STARTING:
+		/* Reset PMU to clear counters for ftrace buffer. */
+		if (cpu_pmu->reset) {
+			cpu_pmu->reset(NULL);
+		}
+		if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
+			/* Arm the PMU IRQ before appearing. */
+			if (cpu_pmu->plat_device) {
 				irq = platform_get_irq(cpu_pmu->plat_device, 0);
-				smp_call_function_single((int)hcpu,
-						enable_irq_callback, &irq, 1);
-			}
-			return NOTIFY_DONE;
-
-		case CPU_STARTING:
-			if (cpu_pmu && cpu_pmu->reset) {
-				cpu_pmu->reset(NULL);
-				return NOTIFY_OK;
+				enable_irq_callback(&irq);
 			}
-		default:
-			return NOTIFY_DONE;
 		}
+		break;
 	}
-
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
+	return ret;
 }
 
 static void armpmu_update_counters(void)
diff --git a/arch/arm/mach-msm/pmu.c b/arch/arm/mach-msm/pmu.c
index 8d65eea345e5..2b2de6b38ece 100644
--- a/arch/arm/mach-msm/pmu.c
+++ b/arch/arm/mach-msm/pmu.c
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/irq.h>
 #include <linux/platform_device.h>
 #include <asm/pmu.h>
 #include <mach/irqs.h>
@@ -41,12 +42,12 @@ static void
 multicore_free_irq(int irq)
 {
 	int cpu;
+	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (irq >= 0) {
-		for_each_cpu(cpu, cpu_online_mask) {
+	if ((irq >= 0) && desc) {
+		for_each_cpu(cpu, desc->percpu_enabled)
 			smp_call_function_single(cpu,
 					disable_irq_callback, &irq, 1);
-		}
 		free_percpu_irq(irq, &pmu_irq_cookie);
 	}
 }
-- 
GitLab