diff --git a/arch/arm/boot/dts/qcom/msm8226-720p-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8226-720p-mtp.dtsi
index 425a210c00a8b8c28e98164ec423707193e949b8..cc9b88f6d307937a442c0945017120e0bd01fbf3 100644
--- a/arch/arm/boot/dts/qcom/msm8226-720p-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8226-720p-mtp.dtsi
@@ -175,6 +175,10 @@
 
 	status = "ok";
 };
+&sdhc_3 {
+	qcom,sup-voltages = <1800 1800>;
+	status = "ok";
+};
 
 &spmi_bus {
 	qcom,pm8226@0 {
diff --git a/arch/arm/boot/dts/qcom/msm8226-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8226-pinctrl.dtsi
index b7c74f686b51e8639c9ac2d924943177634a1ac8..ffac3f9b121a305d5607d17c6c20d82bc4471c13 100644
--- a/arch/arm/boot/dts/qcom/msm8226-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8226-pinctrl.dtsi
@@ -136,5 +136,57 @@
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
+                /*WiFi sdio*/
+		pmx_sdc3_clk { 
+			qcom,pins = <&gp 44>; // Need to check correct GPIO number. 
+			qcom,num-grp-pins = <1>; 
+			qcom,pin-func = <2>; // GPIO function for SDC pin 
+			label = "sdc3_clk"; 
+
+			sdc3_clk_on: clk_on { 
+			bias-disable; // No pull-up on CLK 
+			drive-strength = <16>; // Drive strength in use (from 2 to 16, in 2mA steps) 
+			}; 
+
+			sdc3_clk_off: clk_off { 
+			bias-disable; 
+			drive-strength = <2>; // Drive strength in not-use(no detected device, suspend, and runtime suspend) 
+			}; 
+		}; 
+
+		pmx_sdc3_cmd { 
+			qcom,pins = <&gp 43>; 
+			qcom,num-grp-pins = <1>; 
+			qcom,pin-func = <2>; 
+			label = "sdc3_cmd"; 
+
+			sdc3_cmd_on: cmd_on { 
+			bias-pull-up; // Pull-up on CMD 
+			drive-strength = <10>; 
+			}; 
+
+			sdc3_cmd_off: cmd_off { 
+			bias-pull-up; 
+			drive-strength = <2>; 
+			}; 
+		}; 
+
+		pmx_sdc3_dat { 
+			qcom,pins = <&gp 39>, <&gp 40>, <&gp 41>, <&gp 42>; // DAT3 DAT2 DAT1 DAT0 
+			qcom,num-grp-pins = <4>; 
+			qcom,pin-func = <2>; 
+			label = "sdc3_dat"; 
+
+			sdc3_dat_on: dat_on { 
+			bias-pull-up; // Pull-up on DAT 
+			drive-strength = <10>; 
+			}; 
+
+			sdc3_dat_off: dat_off { 
+			bias-pull-up; 
+			drive-strength = <2>; 
+			}; 
+		}; 
+		
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/msm8226.dtsi b/arch/arm/boot/dts/qcom/msm8226.dtsi
index 9491d0122ecf0636676895b7d0b030f13621d38f..c9a1e9d97dbbef69369ba0c68699faab475bebc0 100755
--- a/arch/arm/boot/dts/qcom/msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8226.dtsi
@@ -532,30 +532,6 @@
 		compatible = "qcom,adsp-loader";
 		qcom,adsp-state = <0>;
 	};
-
-	sound {
-		compatible = "qcom,msm8226-audio-tapan";
-		qcom,model = "msm8226-tapan-snd-card";
-		qcom,tapan-mclk-clk-freq = <9600000>;
-		qcom,prim-auxpcm-gpio-clk  = <&msmgpio 63 0>;
-		qcom,prim-auxpcm-gpio-sync = <&msmgpio 64 0>;
-		qcom,prim-auxpcm-gpio-din  = <&msmgpio 65 0>;
-		qcom,prim-auxpcm-gpio-dout = <&msmgpio 66 0>;
-		qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
-	};
-
-	sound-9302 {
-		compatible = "qcom,msm8226-audio-tapan";
-		qcom,model = "msm8226-tapan9302-snd-card";
-		qcom,tapan-mclk-clk-freq = <9600000>;
-		qcom,prim-auxpcm-gpio-clk  = <&msmgpio 63 0>;
-		qcom,prim-auxpcm-gpio-sync = <&msmgpio 64 0>;
-		qcom,prim-auxpcm-gpio-din  = <&msmgpio 65 0>;
-		qcom,prim-auxpcm-gpio-dout = <&msmgpio 66 0>;
-		qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
-		qcom,tapan-codec-9302;
-	};
-
 	qcom,msm-pcm {
 		compatible = "qcom,msm-pcm-dsp";
 		qcom,msm-pcm-dsp-id = <0>;
@@ -985,7 +961,11 @@
 			      2 &msmgpio 41 0x8>;
 		interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
 
-		status = "disabled";
+                pinctrl-names = "active", "sleep"; 
+                pinctrl-0 = <&sdc3_clk_on &sdc3_cmd_on &sdc3_dat_on>; 
+                pinctrl-1 = <&sdc3_clk_off &sdc3_cmd_off &sdc3_dat_off>; 
+
+		status = "ok";
 	};
 
 	spmi_bus: qcom,spmi@fc4c0000 {
diff --git a/arch/arm/configs/apq8026-lw_defconfig b/arch/arm/configs/apq8026-lw_defconfig
index 991021c2ef4f9ab8f7497c6ace7807db113df033..e0944653ed8fda6cbec46d62d37433c41159d0ef 100755
--- a/arch/arm/configs/apq8026-lw_defconfig
+++ b/arch/arm/configs/apq8026-lw_defconfig
@@ -174,9 +174,8 @@ CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_CLS_ACT=y
-CONFIG_CFG80211=y
 CONFIG_NL80211_TESTMODE=y
-CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_INTERNAL_REGDB=n
 CONFIG_NFC_QNCI=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
@@ -212,10 +211,12 @@ CONFIG_PPPOPNS=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
-CONFIG_WCNSS_CORE=y
-CONFIG_WCNSS_CORE_PRONTO=y
-CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+#CONFIG_WCNSS_CORE=n
+#CONFIG_WCNSS_CORE_PRONTO=n
+#CONFIG_WCNSS_MEM_PRE_ALLOC=n
+#CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=n
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_EVBUG=m
 CONFIG_KEYBOARD_GPIO=y
@@ -366,6 +367,15 @@ CONFIG_MMC_BLOCK_TEST=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
+CONFIG_WLAN=y
+CONFIG_BCMDHD=y
+CONFIG_WIRELESS_EXT=n
+CONFIG_CFG80211=y
+CONFIG_DHD_USE_SCHED_SCAN=n
+CONFIG_BROADCOM_WIFI_RESERVED_MEM=y
+CONFIG_BCM4343=y
+CONFIG_BCMDHD_SDIO=y
+CONFIG_BCMDHD_PCIE=n
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7d14b80c3bcd5b107078b5e5cdf6696a947daffe..6661b17fee126e7b274528be6ec7a5fe21551b5e 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_ARCH_MSM8974) += msm_mpmctr.o
 
 obj-$(CONFIG_ARCH_RANDOM) += early_random.o
 obj-$(CONFIG_PERFMAP) += perfmap.o
+obj-$(CONFIG_BCMDHD) += board-8026-wifi.o
diff --git a/arch/arm/mach-msm/board-8026-wifi.c b/arch/arm/mach-msm/board-8026-wifi.c
new file mode 100644
index 0000000000000000000000000000000000000000..8ee30680e86c35ae3be4af8e1859faa7cf325217
--- /dev/null
+++ b/arch/arm/mach-msm/board-8026-wifi.c
@@ -0,0 +1,399 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/skbuff.h>
+#include <linux/wlan_plat.h>
+#include <mach/gpio.h>
+#include <linux/gpio.h>
+
+#define GPIO_WL_HOST_WAKE 66
+#define GPIO_WL_REG_ON 110
+
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+
+#define WLAN_STATIC_SCAN_BUF0		5
+#define WLAN_STATIC_SCAN_BUF1		6
+#define WLAN_STATIC_DHD_INFO_BUF	7
+#define WLAN_SCAN_BUF_SIZE		(64 * 1024)
+#define WLAN_DHD_INFO_BUF_SIZE	(16 * 1024)
+#define PREALLOC_WLAN_SEC_NUM		4
+#define PREALLOC_WLAN_BUF_NUM		160
+#define PREALLOC_WLAN_SECTION_HEADER	24
+
+#define WLAN_SECTION_SIZE_0	(PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_1	(PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_2	(PREALLOC_WLAN_BUF_NUM * 512)
+#define WLAN_SECTION_SIZE_3	(PREALLOC_WLAN_BUF_NUM * 1024)
+
+#define DHD_SKB_HDRSIZE			336
+#define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+
+#define WLAN_SKB_BUF_NUM	17
+
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
+
+struct wlan_mem_prealloc {
+	void *mem_ptr;
+	unsigned long size;
+};
+
+static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = {
+	{NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)}
+};
+
+void *wlan_static_scan_buf0;
+void *wlan_static_scan_buf1;
+void *wlan_static_dhd_info_buf;
+
+static void *brcm_wlan_mem_prealloc(int section, unsigned long size)
+{
+	if (section == PREALLOC_WLAN_SEC_NUM)
+		return wlan_static_skb;
+
+	if (section == WLAN_STATIC_SCAN_BUF0)
+		return wlan_static_scan_buf0;
+
+	if (section == WLAN_STATIC_SCAN_BUF1)
+		return wlan_static_scan_buf1;
+
+	if (section == WLAN_STATIC_DHD_INFO_BUF) {
+		if (size > WLAN_DHD_INFO_BUF_SIZE) {
+			pr_err("request DHD_INFO size(%lu) is bigger than static size(%d).\n", size, WLAN_DHD_INFO_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_info_buf;
+	}
+
+	if ((section < 0) || (section > PREALLOC_WLAN_SEC_NUM))
+		return NULL;
+
+	if (wlan_mem_array[section].size < size)
+		return NULL;
+
+	return wlan_mem_array[section].mem_ptr;
+}
+
+static int brcm_init_wlan_mem(void)
+{
+	int i;
+	int j;
+
+	for (i = 0; i < 8; i++) {
+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
+		if (!wlan_static_skb[i])
+			goto err_skb_alloc;
+	}
+
+	for (; i < 16; i++) {
+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
+		if (!wlan_static_skb[i])
+			goto err_skb_alloc;
+	}
+
+	wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
+	if (!wlan_static_skb[i])
+		goto err_skb_alloc;
+
+	for (i = 0 ; i < PREALLOC_WLAN_SEC_NUM ; i++) {
+		wlan_mem_array[i].mem_ptr =
+				kmalloc(wlan_mem_array[i].size, GFP_KERNEL);
+
+		if (!wlan_mem_array[i].mem_ptr)
+			goto err_mem_alloc;
+	}
+
+	wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_scan_buf0)
+		goto err_mem_alloc;
+
+	wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_scan_buf1)
+		goto err_mem_alloc;
+
+	wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_info_buf)
+		goto err_mem_alloc;
+
+	printk(KERN_INFO"%s: WIFI MEM Allocated\n", __func__);
+	return 0;
+
+ err_mem_alloc:
+	pr_err("Failed to mem_alloc for WLAN\n");
+	for (j = 0 ; j < i ; j++)
+		kfree(wlan_mem_array[j].mem_ptr);
+
+	i = WLAN_SKB_BUF_NUM;
+
+ err_skb_alloc:
+	pr_err("Failed to skb_alloc for WLAN\n");
+	for (j = 0 ; j < i ; j++)
+		dev_kfree_skb(wlan_static_skb[j]);
+
+	return -ENOMEM;
+}
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+
+static unsigned config_gpio_wl_reg_on[] = {
+	GPIO_CFG(GPIO_WL_REG_ON, 0, GPIO_CFG_OUTPUT,
+		GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) };
+
+static unsigned config_gpio_wl_host_wake[] = {
+	GPIO_CFG(GPIO_WL_HOST_WAKE, 0, GPIO_CFG_INPUT,
+		GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) };
+
+
+int __init brcm_wifi_init_gpio(void)
+{
+	if (gpio_tlmm_config(config_gpio_wl_reg_on[0], GPIO_CFG_ENABLE))
+		printk(KERN_ERR "%s: Failed to configure GPIO"
+			" - WL_REG_ON\n", __func__);
+
+	if (gpio_request(GPIO_WL_REG_ON, "WL_REG_ON"))
+		printk(KERN_ERR "Failed to request gpio %d for WL_REG_ON\n",
+			GPIO_WL_REG_ON);
+
+	if (gpio_direction_output(GPIO_WL_REG_ON, 0))
+		printk(KERN_ERR "%s: WL_REG_ON  "
+			"failed to pull down\n", __func__);
+
+
+	if (gpio_tlmm_config(config_gpio_wl_host_wake[0], GPIO_CFG_ENABLE))
+		printk(KERN_ERR "%s: Failed to configure GPIO"
+			" - WL_HOST_WAKE\n", __func__);
+
+	if (gpio_request(GPIO_WL_HOST_WAKE, "WL_HOST_WAKE"))
+		printk(KERN_ERR "Failed to request gpio for WL_HOST_WAKE\n");
+
+	if (gpio_direction_input(GPIO_WL_HOST_WAKE))
+		printk(KERN_ERR "%s: WL_HOST_WAKE  "
+			"failed to pull down\n", __func__);
+
+
+	return 0;
+}
+
+static int brcm_wlan_power(int onoff)
+{
+	printk(KERN_INFO"------------------------------------------------");
+	printk(KERN_INFO"------------------------------------------------\n");
+	printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off");
+	if (onoff) {
+		/*
+		if (gpio_request(GPIO_WL_REG_ON, "WL_REG_ON"))
+		{
+			printk("Failed to request for WL_REG_ON\n");
+		}*/
+		if (gpio_direction_output(GPIO_WL_REG_ON, 1)) {
+			printk(KERN_ERR "%s: WL_REG_ON  failed to pull up\n",
+				__func__);
+			return -EIO;
+		}
+	} else {
+		/*
+		if (gpio_request(GPIO_WL_REG_ON, "WL_REG_ON"))
+		{
+			printk("Failed to request for WL_REG_ON\n");
+		}
+		*/
+		if (gpio_direction_output(GPIO_WL_REG_ON, 0)) {
+			printk(KERN_ERR "%s: WL_REG_ON  failed to pull down\n",
+				__func__);
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int brcm_wlan_reset(int onoff)
+{
+  /*
+	gpio_set_value(GPIO_WLAN_ENABLE,
+			onoff ? GPIO_LEVEL_HIGH : GPIO_LEVEL_LOW);
+  */
+	return 0;
+}
+
+
+static int brcm_wifi_cd; /* WIFI virtual 'card detect' status */
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+
+int brcm_wifi_status_register(
+		void (*callback)(int card_present, void *dev_id),
+		void *dev_id)
+{
+   int * p = (int *)dev_id;
+	if (wifi_status_cb)
+		return -EAGAIN;
+	wifi_status_cb = callback;
+	wifi_status_cb_devid = dev_id;
+	printk(KERN_ERR "check print\n");
+	printk(KERN_ERR "%s: callback is %p, devid is %d\n",
+		__func__, wifi_status_cb, *p);
+	return 0;
+}
+
+#if 1
+unsigned int brcm_wifi_status(struct device *dev)
+{
+	printk("%s:%d status %d\n",__func__,__LINE__,brcm_wifi_cd);
+	return brcm_wifi_cd;
+}
+#endif
+
+
+static int brcm_wlan_set_carddetect(int val)
+{
+	pr_debug("%s: wifi_status_cb : %p, devid : %p, val : %d\n",
+		__func__, wifi_status_cb, wifi_status_cb_devid, val);
+	brcm_wifi_cd = val;
+	if (wifi_status_cb)
+	{
+		printk(KERN_ERR "%s: callback is %p, devid is %p\n",
+		__func__, wifi_status_cb, wifi_status_cb_devid);
+		wifi_status_cb(val, wifi_status_cb_devid);
+	}
+	else
+		pr_warning("%s: Nobody to notify\n", __func__);
+
+	/* msleep(200); wait for carddetect */
+	return 0;
+}
+
+/* Customized Locale table : OPTIONAL feature */
+#define WLC_CNTRY_BUF_SZ        4
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];
+	char custom_locale[WLC_CNTRY_BUF_SZ];
+	int  custom_locale_rev;
+};
+
+static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = {
+	/* Table should be filled out based
+	on custom platform regulatory requirement */
+	{"",   "XZ", 11},  /* Universal if Country code is unknown or empty */
+	{"AE", "AE", 1},
+	{"AR", "AR", 1},
+	{"AT", "AT", 1},
+	{"AU", "AU", 2},
+	{"BE", "BE", 1},
+	{"BG", "BG", 1},
+	{"BN", "BN", 1},
+	{"CA", "CA", 2},
+	{"CH", "CH", 1},
+	{"CY", "CY", 1},
+	{"CZ", "CZ", 1},
+	{"DE", "DE", 3},
+	{"DK", "DK", 1},
+	{"EE", "EE", 1},
+	{"ES", "ES", 1},
+	{"FI", "FI", 1},
+	{"FR", "FR", 1},
+	{"GB", "GB", 1},
+	{"GR", "GR", 1},
+	{"HR", "HR", 1},
+	{"HU", "HU", 1},
+	{"IE", "IE", 1},
+	{"IS", "IS", 1},
+	{"IT", "IT", 1},
+	{"JP", "JP", 5},
+	{"KR", "KR", 24},
+	{"KW", "KW", 1},
+	{"LI", "LI", 1},
+	{"LT", "LT", 1},
+	{"LU", "LU", 1},
+	{"LV", "LV", 1},
+	{"MA", "MA", 1},
+	{"MT", "MT", 1},
+	{"MX", "MX", 1},
+	{"NL", "NL", 1},
+	{"NO", "NO", 1},
+	{"PL", "PL", 1},
+	{"PT", "PT", 1},
+	{"PY", "PY", 1},
+	{"RO", "RO", 1},
+	{"RU", "RU", 5},
+	{"SE", "SE", 1},
+	{"SG", "SG", 4},
+	{"SI", "SI", 1},
+	{"SK", "SK", 1},
+	{"TR", "TR", 7},
+	{"TW", "TW", 2},
+	{"US", "US", 46}
+};
+
+
+static void *brcm_wlan_get_country_code(char *ccode)
+{
+	int size = ARRAY_SIZE(brcm_wlan_translate_custom_table);
+	int i;
+
+	if (!ccode)
+		return NULL;
+
+	for (i = 0; i < size; i++)
+		if (strcmp(ccode,
+		brcm_wlan_translate_custom_table[i].iso_abbrev) == 0)
+			return &brcm_wlan_translate_custom_table[i];
+	return &brcm_wlan_translate_custom_table[0];
+}
+
+
+static struct resource brcm_wlan_resources[] = {
+	[0] = {
+		.name	= "bcmdhd_wlan_irq",
+		//.start	= MSM_GPIO_TO_INT(GPIO_WL_HOST_WAKE),
+		//.end	= MSM_GPIO_TO_INT(GPIO_WL_HOST_WAKE),
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE
+			| IORESOURCE_IRQ_HIGHLEVEL,
+	},
+};
+
+static struct wifi_platform_data brcm_wlan_control = {
+	.set_power	= brcm_wlan_power,
+	.set_reset	= brcm_wlan_reset,
+	.set_carddetect	= brcm_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+	.mem_prealloc	= brcm_wlan_mem_prealloc,
+#endif
+	.get_country_code = brcm_wlan_get_country_code,
+};
+
+static struct platform_device brcm_device_wlan = {
+	.name		= "bcmdhd_wlan",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(brcm_wlan_resources),
+	.resource	= brcm_wlan_resources,
+	.dev		= {
+		.platform_data = &brcm_wlan_control,
+	},
+};
+
+int __init brcm_wlan_init(void)
+{
+	printk(KERN_INFO"%s: start\n", __func__);
+
+	brcm_wifi_init_gpio();
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+	brcm_init_wlan_mem();
+#endif
+
+    /* set wifi gpio*/
+    brcm_wlan_resources[0].start = gpio_to_irq(GPIO_WL_HOST_WAKE);
+    brcm_wlan_resources[0].end = gpio_to_irq(GPIO_WL_HOST_WAKE);
+    /* set wifi to power off */
+    brcm_wlan_power(0);
+
+
+	return platform_device_register(&brcm_device_wlan);
+}
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 762ae850fd7e5ab998824205f8129bfbdb5b9f82..0de3ed5e7ea785b786206180614367c1e4f6beeb 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -52,6 +52,9 @@
 #include "clock.h"
 #include "platsmp.h"
 
+#ifdef CONFIG_BCMDHD
+extern int brcm_wlan_init(void);
+#endif
 static struct of_dev_auxdata msm_hsic_host_adata[] = {
 	OF_DEV_AUXDATA("qcom,hsic-host", 0xF9A00000, "msm_hsic_host", NULL),
 	{}
@@ -118,6 +121,9 @@ void __init msm8226_init(void)
 
 	msm8226_init_gpiomux();
 	msm8226_add_drivers();
+#ifdef CONFIG_BCMDHD
+        (void)brcm_wlan_init();
+#endif
 }
 
 static const char *msm8226_dt_match[] __initconst = {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0245faf0052abb9460587d1856c61e113fa0939e..ce4cb0553a6ad57ff60d8d3161e0ae28df614239 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2096,7 +2096,16 @@ void mmc_power_up(struct mmc_host *host)
 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
 	 * time required to reach a stable voltage.
 	 */
-	mmc_delay(10);
+	
+    if (2 == host->index) /* index:2 for sdio device */
+    {
+        mmc_delay(200);
+    }
+    else
+    {
+        mmc_delay(10);
+    }
+	
 
 	/* Set signal voltage to 3.3V */
 	__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index ef4f9b7dfaad8182232781963ec153bd94a90af2..462c07db9d1b40aacd24d20c138cf9c5fc89d464 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2937,6 +2937,15 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
 		msm_host->use_cdclp533 = true;
 }
 
+#ifdef CONFIG_BCMDHD
+    extern int brcm_wifi_status_register(void (*callback)(int card_present, void *dev_id), void *dev_id);
+    void sdcc_status_notify_cb(int card_present, void *dev_id)
+    {
+        struct sdhci_host *host = (struct sdhci_host*)dev_id;
+	    mmc_detect_change(host->mmc, 0);
+    }
+#endif
+
 static int sdhci_msm_probe(struct platform_device *pdev)
 {
 	struct sdhci_host *host;
@@ -3261,6 +3270,12 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 			spin_unlock_irqrestore(&host->lock, flags);
 		}
 	}
+#ifdef CONFIG_BCMDHD
+    if (!strcmp("msm_sdcc.3",dev_name(&pdev->dev)))
+    {
+        brcm_wifi_status_register(sdcc_status_notify_cb, host);
+    } 
+#endif
 
 	ret = sdhci_add_host(host);
 	if (ret) {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 629eed873273d3f29f9f59fce29bb53fa1416774..870679dada3d68f75f14cb404ecc21d6d74c60f6 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3654,10 +3654,17 @@ int sdhci_add_host(struct sdhci_host *host)
 	 * Enable polling on when card detection is broken and no card detect
 	 * gpio is present.
 	 */
+
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    !(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
 	    (mmc_gpio_get_cd(host->mmc) < 0))
-		mmc->caps |= MMC_CAP_NEEDS_POLL;
+	{
+	    if (strcmp("msm_sdcc.3", host->hw_name))//no poll for wifi 
+        {
+            mmc->caps |= MMC_CAP_NEEDS_POLL;
+        } 
+	}
+		
 
 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
 	host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
@@ -3787,9 +3794,19 @@ int sdhci_add_host(struct sdhci_host *host)
 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
 		}
 	}
+	
+	/*porting  form qct */
+	if(!strcmp(host->hw_name, "msm_sdcc.3")) { 
+        caps[0] |= (SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_180); 
+    } 
 
 	if (caps[0] & SDHCI_CAN_VDD_330) {
 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
+        if(!strcmp(host->hw_name, "msm_sdcc.3")) {
+            mmc->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 
+            MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 
+            MMC_CAP_UHS_DDR50);
+		} 
 
 		mmc->max_current_330 = ((max_current_caps &
 				   SDHCI_MAX_CURRENT_330_MASK) >>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 2526160ecc70bfb1a5fced9275864bc9924a71ff..e656d33dd7b9d9dee2a53016ef6f2a3af14f7017 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -355,6 +355,7 @@ config CLD_LL_CORE
 
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
 source "drivers/net/wireless/brcm80211/Kconfig"
 source "drivers/net/wireless/hostap/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 41bfa9120eff8d81c9a58e8cdb6d3559133efb36..798e1bf1332c987fca5bec911c813a3926dba20f 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -61,3 +61,4 @@ obj-$(CONFIG_BRCMSMAC)	+= brcm80211/
 obj-$(CONFIG_LIBRA_SDIOIF)	+= libra/
 obj-$(CONFIG_WCNSS_CORE)	+= wcnss/
 obj-$(CONFIG_CNSS)		+= cnss/
+obj-$(CONFIG_BCMDHD)	+= bcmdhd/
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..b5e991d1d06524580c2f26d62bfd1cfcb92b2daf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,104 @@
+config BCMDHD
+	tristate "Broadcom FullMAC wireless cards support"
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom FullMAC chipset.
+
+	  If you choose to build a module, it'll be called dhd. Say M if
+	  unsure.
+
+config BCM4330
+	tristate "Broadcom 4330 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4330 chipset.
+
+config BCM4334
+	tristate "Broadcom 4334 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4334 chipset.
+
+config BCM4335
+	tristate "Broadcom 4335 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4335 chipset.
+
+config BCM4339
+	tristate "Broadcom 4339 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4339 chipset.
+
+config BCM4354
+	tristate "Broadcom 4354 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4354 chipset.
+
+config BCM4343
+	tristate "Broadcom 4343 wireless cards support"
+	depends on WLAN
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4343 chipset.
+	  
+config BCMDHD_SDIO
+	bool "SDIO bus interface support"
+	depends on BCMDHD && MMC
+	default y
+
+config BCMDHD_PCIE
+	bool "PCIe bus interface support"
+	depends on BCMDHD && PCI && !BCMDHD_SDIO
+
+
+config BCMDHD_FW_PATH
+	depends on BCMDHD
+	string "Firmware path"
+	default "/system/vendor/bcm/fw/bcm43430_mfg.bin"
+	---help---
+	  Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+	depends on BCMDHD
+	string "NVRAM path"
+	default "/system/vendor/bcm/nvram/bcm4343s.txt"
+	---help---
+	  Path to the calibration file.
+
+config BCMDHD_WEXT
+	bool "Enable WEXT support"
+	depends on BCMDHD && CFG80211 = n
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Enables WEXT support
+
+config DHD_USE_STATIC_BUF
+	bool "Enable memory preallocation"
+	depends on BCMDHD
+	default n
+	---help---
+	  Use memory preallocated in platform
+
+config DHD_USE_SCHED_SCAN
+	bool "Use CFG80211 sched scan"
+	depends on BCMDHD && CFG80211
+	default n
+	---help---
+	  Use CFG80211 sched scan
+	  
+config BROADCOM_WIFI_RESERVED_MEM
+	bool "Use static memory"
+	depends on BCMDHD 
+	default y
+	---help---
+	  Use static memory for bcmdhd
+
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..2294315711d0840f39dd41e9447f675ef09b3e13
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,136 @@
+# bcmdhd
+#
+#
+#
+#
+#
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER               \
+	-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE            \
+	-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DCUSTOMER_HW2 -DWLP2P \
+	-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT                                \
+	-DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT             \
+	-DEMBEDDED_PLATFORM -DPNO_SUPPORT          \
+	-DDHD_USE_IDLECOUNT -DSET_RANDOM_MAC_SOFTAP -DROAM_ENABLE -DVSDB      \
+	-DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST                            \
+	-DESCAN_RESULT_PATCH -DSUPPORT_PM2_ONLY -DWLTDLS                      \
+	-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD       \
+	-DMIRACAST_AMPDU_SIZE=8	-DROAM_ENABLE	-DWL_IFACE_COMB_NUM_CHANNELS	\
+	-Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include   \
+	-Idrivers/net/wireless/bcmdhd/common/include
+
+DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
+	dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
+	dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
+	bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
+	hnd_pktq.o hnd_pktpool.o
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+ifneq ($(CONFIG_WIRELESS_EXT),)
+bcmdhd-objs += wl_iw.o
+DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
+endif
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o wl_cfg_btcoex.o
+DHDCFLAGS += -DWL_CFG80211 -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
+DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
+DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
+DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7
+endif
+ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),)
+DHDCFLAGS += -DWL_SCHED_SCAN
+endif
+# For Static Buffer
+ifeq ($(CONFIG_BROADCOM_WIFI_RESERVED_MEM),y)
+  DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
+endif
+
+
+# Prevent rx thread monopolize
+DHDCFLAGS += -DWAIT_DEQUEUE
+
+# Use Android wake lock mechanism
+DHDCFLAGS += -DCONFIG_HAS_WAKELOCK
+
+# SKB TAILPAD to avoid out of boundary memory access
+DHDCFLAGS += -DDHDENABLE_TAILPAD
+
+DHDCFLAGS += -DCUSTOM_DPC_CPUCORE=0
+DHDCFLAGS += -DCUSTOM_PSPRETEND_THR=30
+
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+else
+DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
+endif
+
+#########################
+# Chip dependent feature
+#########################
+ifneq ($(CONFIG_BCM4339),)
+DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
+DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 
+DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+DHDCFLAGS += -DUSE_WL_TXBF
+DHDCFLAGS += -DUSE_WL_FRAMEBURST
+DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
+DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+DHDCFLAGS += -DREPEAT_READFRAME
+DHDCFLAGS += -DROAM_AP_ENV_DETECTION
+endif
+
+ifneq ($(CONFIG_BCM4343),)
+DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
+
+DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+DHDCFLAGS += -DDISABLE_FLOW_CONTROL
+
+DHDCFLAGS += -DUSE_WL_FRAMEBURST
+DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=32
+DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+
+DHDCFLAGS += -DREPEAT_READFRAME
+DHDCFLAGS += -DROAM_AP_ENV_DETECTION
+endif
+
+
+ifneq ($(CONFIG_BCM4334),)
+DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
+
+DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+DHDCFLAGS += -DDISABLE_FLOW_CONTROL
+
+DHDCFLAGS += -DUSE_WL_FRAMEBURST
+DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=32
+DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+
+DHDCFLAGS += -DREPEAT_READFRAME
+DHDCFLAGS += -DROAM_AP_ENV_DETECTION
+endif
+
+
+bcmdhd-$(CONFIG_BCMDHD_SDIO) += \
+		bcmsdh.o \
+		bcmsdh_linux.o \
+		bcmsdh_sdmmc.o \
+		bcmsdh_sdmmc_linux.o \
+		dhd_sdio.o \
+		dhd_cdc.o \
+		dhd_wlfc.o
+bcmdhd-$(CONFIG_BCMDHD_PCIE) += \
+		dhd_pcie.o \
+		dhd_pcie_linux.o \
+		dhd_msgbuf.o \
+		dhd_log.o \
+		circularbuf.o \
+		pcie_core.o
+ccflags-$(CONFIG_BCMDHD_SDIO) += \
+	-DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS -DOOB_INTR_ONLY \
+	-DHW_OOB -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \
+	-DUSE_SDIOFIFO_IOVAR
+ccflags-$(CONFIG_BCMDHD_PCIE) += \
+	-DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 0000000000000000000000000000000000000000..2551911fbf2400f2795341a0fc3ae4326cc5b9d5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,1097 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+#define BCM47162_DMP() (0)
+#define BCM5357_DMP() (0)
+#define BCM4707_DMP() (0)
+#define PMU_DMP() (0)
+#define remap_coreid(sih, coreid)	(coreid)
+#define remap_corerev(sih, corerev)	(corerev)
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), *eromptr);
+		(*eromptr)++;
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		nom++;
+	}
+
+	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom) {
+		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	}
+	return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+        uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		/* This is not what we want, "push" it back */
+		(*eromptr)--;
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, *eromptr, *eromlim;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		/* Set wrappers address */
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		/* Now point the window at the erom */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = regs;
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = (uint32 *)(uintptr)erombase;
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+	         regs, erombase, eromptr, eromlim));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		/* Grok a component */
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+			ai_hwfixup(sii);
+			return;
+		}
+
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+		         "nsw = %d, nmp = %d & nsp = %d\n",
+		         mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
+#else
+		BCM_REFERENCE(crev);
+#endif
+
+		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+			continue;
+		if ((nmw + nsw == 0)) {
+			/* A component which is not a core */
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->oob_router = addrl;
+				}
+			}
+			if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
+				cid != PMU_CORE_ID && cid != GCI_CORE_ID)
+				continue;
+		}
+
+		idx = sii->numcores;
+
+		cores_info->cia[idx] = cia;
+		cores_info->cib[idx] = cib;
+		cores_info->coreid[idx] = remap_coreid(sih, cid);
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
+			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		/* First Slave Address Descriptor should be port 0:
+		 * the main register space for the core
+		 */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			do {
+			/* Try again to see if it is a bridge */
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else {
+					if (br == TRUE) {
+						break;
+					}
+					else if ((addrh != 0) || (sizeh != 0) ||
+						(sizel != SI_CORE_SIZE)) {
+						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
+							"0x%x\n", addrh, sizeh, sizel));
+						SI_ERROR(("First Slave ASD for"
+							"core 0x%04x malformed "
+							"(0x%08x)\n", cid, asd));
+						goto error;
+					}
+				}
+			} while (1);
+		}
+		cores_info->coresba[idx] = addrl;
+		cores_info->coresba_size[idx] = sizel;
+		/* Get any more ASDs in port 0 */
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+				cores_info->coresba2[idx] = addrl;
+				cores_info->coresba2_size[idx] = sizel;
+			}
+			j++;
+		} while (asd != 0);
+
+		/* Go through the ASDs for other slave ports */
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+
+				if (asd == 0)
+					break;
+				j++;
+			} while (1);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		/* Now get master wrappers */
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				cores_info->wrapba[idx] = addrl;
+		}
+
+		/* And finally slave wrappers */
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				cores_info->wrapba[idx] = addrl;
+		}
+
+
+		/* Don't record bridges */
+		if (br)
+			continue;
+
+		/* Done with core */
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 addr, wrap;
+	void *regs;
+
+	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+		return (NULL);
+
+	addr = cores_info->coresba[coreidx];
+	wrap = cores_info->wrapba[coreidx];
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(addr,
+				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		sii->curmap = regs = cores_info->regs[coreidx];
+		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+		}
+		sii->curwrap = cores_info->wrappers[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+		regs = sii->curmap;
+		/* point bar0 2nd 4KB window to the primary wrapper */
+		if (PCIE_GEN2(sii))
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+		else
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		sii->curwrap = (void *)((uintptr)wrap);
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = NULL;
+	uint32 erombase, *eromptr, *eromlim;
+	uint i, j, cidx;
+	uint32 cia, cib, nmp, nsp;
+	uint32 asd, addrl, addrh, sizel, sizeh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == CC_CORE_ID) {
+			cc = (chipcregs_t *)cores_info->regs[i];
+			break;
+		}
+	}
+	if (cc == NULL)
+		goto error;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	cidx = sii->curidx;
+	cia = cores_info->cia[cidx];
+	cib = cores_info->cib[cidx];
+
+	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+	/* scan for cores */
+	while (eromptr < eromlim) {
+		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+			break;
+		}
+	}
+
+	/* skip master ports */
+	for (i = 0; i < nmp; i++)
+		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+	/* Skip ASDs in port 0 */
+	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+	if (asd == 0) {
+		/* Try again to see if it is a bridge */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+		              &sizel, &sizeh);
+	}
+
+	j = 1;
+	do {
+		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+		              &sizel, &sizeh);
+		j++;
+	} while (asd != 0);
+
+	/* Go through the ASDs for other slave ports */
+	for (i = 1; i < nsp; i++) {
+		j = 0;
+		do {
+			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				&sizel, &sizeh);
+			if (asd == 0)
+				break;
+
+			if (!asidx--) {
+				*addr = addrl;
+				*size = sizel;
+				return;
+			}
+			j++;
+		} while (1);
+
+		if (j == 0) {
+			SI_ERROR((" SP %d has no address descriptors\n", i));
+			break;
+		}
+	}
+
+error:
+	*size = 0;
+	return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+	return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba_size[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+	ASSERT(ai != NULL);
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+
+	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 *map = (uint32 *) sii->curwrap;
+
+	if (mask || val) {
+		uint32 w = R_REG(sii->osh, map+(offset/4));
+		w &= ~mask;
+		w |= val;
+		W_REG(sii->osh, map+(offset/4), w);
+	}
+
+	return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cia;
+
+	cia = cores_info->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cib;
+
+
+	cib = cores_info->cib[sii->curidx];
+	return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	/* readback */
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	volatile uint32 dummy;
+	uint32 status;
+	aidmp_t *ai;
+
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already in reset, just return */
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+		return;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+	/* if pending backplane ops still, try waiting longer */
+	if (status != 0) {
+		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+		/* during driver load we may need more time */
+		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+		/* if still pending ops, continue on and try disable anyway */
+		/* this is in big hammer path, so don't call wl_reinit in this case... */
+	}
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	dummy = R_REG(sii->osh, &ai->resetctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	volatile uint32 dummy;
+	uint loop_counter = 10;
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	/* put core into reset state */
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(10);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+		/* ensure there are no pending backplane operations */
+		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+		/* take core out of reset */
+		W_REG(sii->osh, &ai->resetctrl, 0);
+
+		/* ensure there are no pending backplane operations */
+		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+	}
+
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	osl_t *osh;
+	aidmp_t *ai;
+	uint i;
+
+	osh = sii->osh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		si_setcoreidx(&sii->pub, i);
+		ai = sii->curwrap;
+
+		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+		if (BCM47162_DMP()) {
+			bcm_bprintf(b, "Skipping mips74k in 47162a0\n");
+			continue;
+		}
+		if (BCM5357_DMP()) {
+			bcm_bprintf(b, "Skipping usb20h in 5357\n");
+			continue;
+		}
+		if (BCM4707_DMP()) {
+			bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
+			continue;
+		}
+
+		if (PMU_DMP()) {
+			bcm_bprintf(b, "Skipping pmu core\n");
+			continue;
+		}
+
+		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x"
+			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x"
+			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+			    "intstatus 0x%x config 0x%x itcr 0x%x\n",
+			    R_REG(osh, &ai->ioctrlset),
+			    R_REG(osh, &ai->ioctrlclear),
+			    R_REG(osh, &ai->ioctrl),
+			    R_REG(osh, &ai->iostatus),
+			    R_REG(osh, &ai->ioctrlwidth),
+			    R_REG(osh, &ai->iostatuswidth),
+			    R_REG(osh, &ai->resetctrl),
+			    R_REG(osh, &ai->resetstatus),
+			    R_REG(osh, &ai->resetreadid),
+			    R_REG(osh, &ai->resetwriteid),
+			    R_REG(osh, &ai->errlogctrl),
+			    R_REG(osh, &ai->errlogdone),
+			    R_REG(osh, &ai->errlogstatus),
+			    R_REG(osh, &ai->errlogaddrlo),
+			    R_REG(osh, &ai->errlogaddrhi),
+			    R_REG(osh, &ai->errlogid),
+			    R_REG(osh, &ai->errloguser),
+			    R_REG(osh, &ai->errlogflags),
+			    R_REG(osh, &ai->intstatus),
+			    R_REG(osh, &ai->config),
+			    R_REG(osh, &ai->itcr));
+	}
+}
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 0000000000000000000000000000000000000000..6e219b88f3bfe5654ddb442b14c18a6dfc1f9a71
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,198 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: bcmevent.c 487838 2014-06-27 05:51:44Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+	uint event;
+	const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+static const bcmevent_name_str_t bcmevent_names[] = {
+	BCMEVENT_NAME(WLC_E_SET_SSID),
+	BCMEVENT_NAME(WLC_E_JOIN),
+	BCMEVENT_NAME(WLC_E_START),
+	BCMEVENT_NAME(WLC_E_AUTH),
+	BCMEVENT_NAME(WLC_E_AUTH_IND),
+	BCMEVENT_NAME(WLC_E_DEAUTH),
+	BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+	BCMEVENT_NAME(WLC_E_ASSOC),
+	BCMEVENT_NAME(WLC_E_ASSOC_IND),
+	BCMEVENT_NAME(WLC_E_REASSOC),
+	BCMEVENT_NAME(WLC_E_REASSOC_IND),
+	BCMEVENT_NAME(WLC_E_DISASSOC),
+	BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+	BCMEVENT_NAME(WLC_E_QUIET_START),
+	BCMEVENT_NAME(WLC_E_QUIET_END),
+	BCMEVENT_NAME(WLC_E_BEACON_RX),
+	BCMEVENT_NAME(WLC_E_LINK),
+	BCMEVENT_NAME(WLC_E_MIC_ERROR),
+	BCMEVENT_NAME(WLC_E_NDIS_LINK),
+	BCMEVENT_NAME(WLC_E_ROAM),
+	BCMEVENT_NAME(WLC_E_TXFAIL),
+	BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+	BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+	BCMEVENT_NAME(WLC_E_PRUNE),
+	BCMEVENT_NAME(WLC_E_AUTOAUTH),
+	BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_ADDTS_IND),
+	BCMEVENT_NAME(WLC_E_DELTS_IND),
+	BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+	BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+	BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+	BCMEVENT_NAME(WLC_E_ROAM_PREP),
+	BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+	BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+	BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+	BCMEVENT_NAME(WLC_E_RADIO),
+	BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+#if defined(BCMCCX) && defined(CCX_SDK)
+	BCMEVENT_NAME(WLC_E_CCX_ASSOC_START),
+	BCMEVENT_NAME(WLC_E_CCX_ASSOC_ABORT),
+#endif /* BCMCCX && CCX_SDK */
+	BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+	BCMEVENT_NAME(WLC_E_PSK_SUP),
+	BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+	BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+	BCMEVENT_NAME(WLC_E_ICV_ERROR),
+	BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_TRACE),
+#ifdef WLBTAMP
+	BCMEVENT_NAME(WLC_E_BTA_HCI_EVENT),
+#endif
+	BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+	BCMEVENT_NAME(WLC_E_RSSI),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
+#ifdef WIFI_ACT_FRAME
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#endif
+#if 0 && (NDISVER >= 0x0620)
+	BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND),
+	BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND),
+	BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED),
+	BCMEVENT_NAME(WLC_E_AP_STARTED),
+	BCMEVENT_NAME(WLC_E_DFS_AP_STOP),
+	BCMEVENT_NAME(WLC_E_DFS_AP_RESUME),
+	BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS),
+	BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
+	BCMEVENT_NAME(WLC_E_AUTH_REQ),
+	BCMEVENT_NAME(WLC_E_IBSS_COALESCE),
+#endif 
+#ifdef BCMWAPI_WAI
+	BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+	BCMEVENT_NAME(WLC_E_WAI_MSG),
+#endif /* BCMWAPI_WAI */
+	BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+	BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+	BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+	BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+#ifdef WLMEDIA_HTSF
+	BCMEVENT_NAME(WLC_E_HTSFSYNC),
+#endif
+	BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+	BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+	BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+	BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+	BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+	BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+	BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+	BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+	BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+	BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+	BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+	BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+	BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+	BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+	BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+	BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT)
+	BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+	BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+	BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+	BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef WLAIBSS
+	BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL),
+#endif /* WLAIBSS */
+#ifdef WLBSSLOAD_REPORT
+	BCMEVENT_NAME(WLC_E_BSS_LOAD),
+#endif
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+	BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+#ifdef WLFBT
+	BCMEVENT_NAME(WLC_E_FBT_AUTH_REQ_IND),
+#endif /* WLFBT */
+	BCMEVENT_NAME(WLC_E_RMC_EVENT),
+};
+
+
+const char *bcmevent_get_name(uint event_type)
+{
+	/* note:  first coded this as a static const but some
+	 * ROMs already have something called event_name so
+	 * changed it so we don't have a variable for the
+	 * 'unknown string
+	 */
+	const char *event_name = NULL;
+
+	uint idx;
+	for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+		if (bcmevent_names[idx].event == event_type) {
+			event_name = bcmevent_names[idx].name;
+			break;
+		}
+	}
+
+	/* if we find an event name in the array, return it.
+	 * otherwise return unknown string.
+	 */
+	return ((event_name) ? event_name : "Unknown Event");
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 0000000000000000000000000000000000000000..f621572b7a797c4425e3238348ab5a264bbb675e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,694 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: bcmsdh.c 450676 2014-01-22 22:45:13Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if 0 && (NDISVER < 0x0630)
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+	bcmsdh->sdioh = sdioh;
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+	*regsva = SI_ENUM_BASE;
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+#if 0 && (NDISVER < 0x0630)
+		if (bcmsdh->sdioh)
+			sdioh_detach(osh, bcmsdh->sdioh);
+#endif
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+				"%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+	int err = 0;
+	uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bar0 != bcmsdh->sbwad || force_set) {
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+			(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+				(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+				(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+		if (!err)
+			bcmsdh->sbwad = bar0;
+		else
+			/* invalidate cached window var */
+			bcmsdh->sbwad = 0;
+
+	}
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+		return 0xFFFFFFFF;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+	                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+	                              addr, 4, nbytes, buf, NULL);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_sleep(sd, enab);
+#else
+	return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioout(sd, gpio, enab);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 0000000000000000000000000000000000000000..ad136bfc2f4690c0eecc1fa0c43569850e0c1bd7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,447 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmsdh_linux.c 461444 2014-03-12 02:55:28Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+	DHD_INTR_INVALID = 0,
+	DHD_INTR_INBAND,
+	DHD_INTR_HWOOB,
+	DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+	DHD_HOST_INTR_TYPE	intr_type;
+	int			oob_irq_num;	/* valid when hardware or software oob in use */
+	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
+	bool			oob_irq_registered;
+	bool			oob_irq_enabled;
+	bool			oob_irq_wake_enabled;
+	spinlock_t		oob_irq_spinlock;
+	bcmsdh_cb_fn_t		oob_irq_handler;
+	void			*oob_irq_handler_context;
+	void			*context;	/* context returned from upper layer */
+	void			*sdioh;		/* handle to lower layer (sdioh) */
+	void			*dev;		/* handle to the underlying device */
+	bool			dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num)
+{
+	ulong regs;
+	bcmsdh_info_t *bcmsdh;
+	uint32 vendevid;
+	bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+	bcmsdh = bcmsdh_attach(osh, sdioh, &regs);
+	if (bcmsdh == NULL) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+	if (bcmsdh_osinfo == NULL) {
+		SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+		goto err;
+	}
+	bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	bcmsdh->os_cxt = bcmsdh_osinfo;
+	bcmsdh_osinfo->sdioh = sdioh;
+	bcmsdh_osinfo->dev = dev;
+	osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dev && device_init_wakeup(dev, true) == 0)
+		bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+	/* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+	bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+		&bcmsdh_osinfo->oob_irq_flags);
+	if  (bcmsdh_osinfo->oob_irq_num < 0) {
+		SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* defined(BCMLXSDMMC) */
+
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(bcmsdh);
+	/* try to attach to the target device */
+	bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+		slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+	if (bcmsdh_osinfo->context == NULL) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	return bcmsdh;
+
+	/* error handling */
+err:
+	if (bcmsdh != NULL)
+		bcmsdh_detach(osh, bcmsdh);
+	if (bcmsdh_osinfo != NULL)
+		MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (bcmsdh_osinfo->dev)
+		device_init_wakeup(bcmsdh_osinfo->dev, false);
+	bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+	drvinfo.remove(bcmsdh_osinfo->context);
+	MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+	bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+	return 0;
+}
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+		return -EBUSY;
+	return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.resume)
+		return drvinfo.resume(bcmsdh_osinfo->context);
+	return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+	return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+	sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+	SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+	error = bcmsdh_register_client_driver();
+	if (error)
+		SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
+
+	return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+		if (bcmsdh_pci_driver.node.next == NULL)
+			return;
+#endif
+
+	bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+	unsigned long flags;
+	bcmsdh_os_info_t *bcmsdh_osinfo;
+
+	if (!bcmsdh)
+		return;
+
+	bcmsdh_osinfo = bcmsdh->os_cxt;
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+		if (enable)
+			enable_irq(bcmsdh_osinfo->oob_irq_num);
+		else
+			disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = enable;
+	}
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	bcmsdh_oob_intr_set(bcmsdh, FALSE);
+	bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
+		return -EBUSY;
+	}
+	SDLX_MSG(("%s OOB irq=%d flags=%X \n", __FUNCTION__,
+		(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+	bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+	bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+#if defined(CONFIG_ARCH_ODIN)
+	err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+	err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+	if (err) {
+		SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+		return err;
+	}
+
+		err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+		if (!err)
+			bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_registered = TRUE;
+	return err;
+}
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (!bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+		return;
+	}
+	if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+			err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+			if (!err)
+				bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+	}
+	if (bcmsdh_osinfo->oob_irq_enabled) {
+		disable_irq(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = FALSE;
+	}
+	free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+	bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif 
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 0000000000000000000000000000000000000000..d668d887d62fd3cecdc14943baecc867d2f8017f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1456 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 459285 2014-03-03 02:54:39Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+#define DEFAULT_SDIO_F2_BLKSIZE		512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK		511
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	sdio_claim_host(sd->func[1]);
+	err_ret = sdio_enable_func(sd->func[1]);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+	}
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+	sdioh_info_t *sd = NULL;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (func == NULL) {
+		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	sd->fake_func0.num = 0;
+	sd->fake_func0.card = func->card;
+	sd->func[0] = &sd->fake_func0;
+	sd->func[1] = func->card->sdio_func[0];
+	sd->func[2] = func->card->sdio_func[1];
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+	sd->use_rxchain = CUSTOM_RXCHAIN;
+	if (sd->func[1] == NULL || sd->func[2] == NULL) {
+		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+		goto fail;
+	}
+	sdio_set_drvdata(sd->func[1], sd);
+
+	sdio_claim_host(sd->func[1]);
+	sd->client_block_size[1] = 64;
+	err_ret = sdio_set_block_size(sd->func[1], 64);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+		goto fail;
+	}
+
+	sdio_claim_host(sd->func[2]);
+	sd->client_block_size[2] = sd_f2_blocksize;
+	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+	sdio_release_host(sd->func[2]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+			sd_f2_blocksize, err_ret));
+		goto fail;
+	}
+
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+
+fail:
+	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	return NULL;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		if (sd->func[2]) {
+			sdio_claim_host(sd->func[2]);
+			sdio_disable_func(sd->func[2]);
+			sdio_release_host(sd->func[2]);
+		}
+
+		/* Disable Function 1 */
+		if (sd->func[1]) {
+			sdio_claim_host(sd->func[1]);
+			sdio_disable_func(sd->func[1]);
+			sdio_release_host(sd->func[1]);
+		}
+
+		sd->func[1] = NULL;
+		sd->func[2] = NULL;
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	/* Enable F1 and F2 interrupts, clear master enable */
+	reg &= ~INTR_CTL_MASTER_EN;
+	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	/* Disable master interrupt with the last function interrupt */
+	if (!(reg & 0xFE))
+		reg = 0;
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (sd->func[2]) {
+		sdio_claim_host(sd->func[2]);
+		sdio_claim_irq(sd->func[2], IRQHandlerF2);
+		sdio_release_host(sd->func[2]);
+	}
+
+	if (sd->func[1]) {
+		sdio_claim_host(sd->func[1]);
+		sdio_claim_irq(sd->func[1], IRQHandler);
+		sdio_release_host(sd->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (sd->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(sd->func[1]);
+		sdio_release_irq(sd->func[1]);
+		sdio_release_host(sd->func[1]);
+	}
+
+	if (sd->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(sd->func[2]);
+		sdio_release_irq(sd->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(sd->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints", 	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG, 	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode", 	IOV_SDMODE, 	0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+	BCM_REFERENCE(bool_val);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = (int32)si->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+		else if (sd_ptr->offset & 2)
+			int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+		else
+			int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = 0;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+	else
+		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+				if (sd->func[2]) {
+					sdio_claim_host(sd->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+								err_ret));
+						}
+					}
+					sdio_release_host(sd->func[2]);
+				}
+			}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				while (sdio_abort_retry--) {
+					if (sd->func[func]) {
+						sdio_claim_host(sd->func[func]);
+						/*
+						 * this sdio_f0_writeb() can be replaced with
+						 * another api depending upon MMC driver change.
+						 * As of this time, this is temporaray one
+						 */
+						sdio_writeb(sd->func[func],
+							*byte, regaddr, &err_ret);
+						sdio_release_host(sd->func[func]);
+					}
+					if (!err_ret)
+						break;
+				}
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				if (sd->func[func]) {
+					sdio_claim_host(sd->func[func]);
+					sdio_f0_writeb(sd->func[func],
+						*byte, regaddr, &err_ret);
+					sdio_release_host(sd->func[func]);
+				}
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			if (sd->func[func]) {
+				sdio_claim_host(sd->func[func]);
+				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(sd->func[func]);
+			}
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		if (sd->func[func]) {
+			sdio_claim_host(sd->func[func]);
+			if (func == 0) {
+				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+			} else {
+				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+			}
+			sdio_release_host(sd->func[func]);
+		}
+	}
+
+	if (err_ret) {
+		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
+		} else {
+			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(sd->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(sd->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(sd->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+		/* Any error on CMD53 transaction should abort that function using function 0. */
+		while (sdio_abort_retry--) {
+			if (sd->func[0]) {
+				sdio_claim_host(sd->func[0]);
+				/*
+				 * this sdio_f0_writeb() can be replaced with another api
+				 * depending upon MMC driver change.
+				 * As of this time, this is temporaray one
+				 */
+				sdio_writeb(sd->func[0],
+					func, SDIOD_CCCR_IOABORT, &err_ret);
+				sdio_release_host(sd->func[0]);
+			}
+			if (!err_ret)
+				break;
+		}
+		if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+		{
+			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+				rw ? "Write" : "Read", err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+	void *pnext;
+	uint ttl_len, pkt_offset;
+	uint blk_num;
+	uint blk_size;
+	uint max_blk_count;
+	uint max_req_size;
+	struct mmc_request mmc_req;
+	struct mmc_command mmc_cmd;
+	struct mmc_data mmc_dat;
+	uint32 sg_count;
+	struct sdio_func *sdio_func = sd->func[func];
+	struct mmc_host *host = sdio_func->card->host;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	blk_size = sd->client_block_size[func];
+	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+	pkt_offset = 0;
+	pnext = pkt;
+
+	while (pnext != NULL) {
+		ttl_len = 0;
+		sg_count = 0;
+		memset(&mmc_req, 0, sizeof(struct mmc_request));
+		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+		memset(&mmc_dat, 0, sizeof(struct mmc_data));
+		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
+		 * data we can transfer with one command 53. blocks per command is limited by
+		 * host max_req_size and 9-bit max block number. when the total length of this
+		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+		 * commands (each transfer is still block aligned)
+		 */
+		while (pnext != NULL && ttl_len < max_req_size) {
+			int pkt_len;
+			int sg_data_size;
+			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+			ASSERT(pdata != NULL);
+			pkt_len = PKTLEN(sd->osh, pnext);
+			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+			/* sg_count is unlikely larger than the array size, and this is
+			 * NOT something we can handle here, but in case it happens, PLEASE put
+			 * a restriction on max tx/glom count (based on host->max_segs).
+			 */
+			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+				sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
+				return (SDIOH_API_RC_FAIL);
+			}
+			pdata += pkt_offset;
+
+			sg_data_size = pkt_len - pkt_offset;
+			if (sg_data_size > max_req_size - ttl_len)
+				sg_data_size = max_req_size - ttl_len;
+			/* some platforms put a restriction on the data size of each scatter-gather
+			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+			 * max_seg_size
+			 */
+			if (sg_data_size > host->max_seg_size)
+				sg_data_size = host->max_seg_size;
+			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+			ttl_len += sg_data_size;
+			pkt_offset += sg_data_size;
+			if (pkt_offset == pkt_len) {
+				pnext = PKTNEXT(sd->osh, pnext);
+				pkt_offset = 0;
+			}
+		}
+
+		if (ttl_len % blk_size != 0) {
+			sd_err(("%s, data length %d not aligned to block size %d\n",
+				__FUNCTION__,  ttl_len, blk_size));
+			return SDIOH_API_RC_FAIL;
+		}
+		blk_num = ttl_len / blk_size;
+		mmc_dat.sg = sd->sg_list;
+		mmc_dat.sg_len = sg_count;
+		mmc_dat.blksz = blk_size;
+		mmc_dat.blocks = blk_num;
+		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+		mmc_cmd.arg = write ? 1<<31 : 0;
+		mmc_cmd.arg |= (func & 0x7) << 28;
+		mmc_cmd.arg |= 1<<27;
+		mmc_cmd.arg |= fifo ? 0 : 1<<26;
+		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+		mmc_cmd.arg |= blk_num & 0x1FF;
+		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+		mmc_req.cmd = &mmc_cmd;
+		mmc_req.data = &mmc_dat;
+		if (!fifo)
+			addr += ttl_len;
+
+		sdio_claim_host(sdio_func);
+		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+		mmc_wait_for_req(host, &mmc_req);
+		sdio_release_host(sdio_func);
+
+		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+		if (0 != err_ret) {
+			sd_err(("%s:CMD53 %s failed with code %d\n",
+				__FUNCTION__, write ? "write" : "read", err_ret));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, uint8 *buf, uint len)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(buf);
+
+	/* NOTE:
+	 * For all writes, each packet length is aligned to 32 (or 4)
+	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+	 * is aligned to block boundary. If you want to align each packet to
+	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+	 *
+	 * For reads, the alignment is doen in sdioh_request_buffer.
+	 *
+	 */
+	sdio_claim_host(sd->func[func]);
+
+	if ((write) && (!fifo))
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (write)
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (fifo)
+		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+	else
+		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret)
+		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
+	else
+		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+			(write) ? "TX" : "RX", buf, addr, len));
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC status;
+	void *tmppkt;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	if (pkt) {
+		/* packet chain, only used for tx/rx glom, all packets length
+		 * are aligned, total length is a block multiple
+		 */
+		if (PKTNEXT(sd->osh, pkt))
+			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+
+		/* non-glom mode, ignore the buffer parameter and use the packet pointer
+		 * (this shouldn't happen)
+		 */
+		buffer = PKTDATA(sd->osh, pkt);
+		buf_len = PKTLEN(sd->osh, pkt);
+	}
+
+	ASSERT(buffer);
+
+	/* buffer and length are aligned, use it directly so we can avoid memory copy */
+	if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+
+	sd_err(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+		__FUNCTION__, write, buffer, buf_len));
+
+	/* otherwise, a memory copy is needed as the input buffer is not aligned */
+	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+	if (tmppkt == NULL) {
+		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (write)
+		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+	if (!write)
+		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+	return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd = sdio_get_drvdata(func);
+
+	ASSERT(sd != NULL);
+	sdio_release_host(sd->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(sd->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	int ret;
+
+	if (!sd) {
+		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+		return (0);
+	}
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (sd->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
+		   patch for it
+		*/
+		if ((ret = sdio_reset_comm(sd->func[0]->card))) {
+			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			if (sd->func[1]) {
+				/* Claim host controller */
+				sdio_claim_host(sd->func[1]);
+
+				sd->client_block_size[1] = 64;
+				ret = sdio_set_block_size(sd->func[1], 64);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+						"blocksize(%d)\n", ret));
+				}
+
+				/* Release host controller F1 */
+				sdio_release_host(sd->func[1]);
+			}
+
+			if (sd->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(sd->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(sd->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+			}
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(sd->func[0]);
+			if (sd->func[2])
+				sdio_claim_irq(sd->func[2], IRQHandlerF2);
+			if (sd->func[1])
+				sdio_claim_irq(sd->func[1], IRQHandler);
+			sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr(sd);
+#endif
+			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(sd->func[0]);
+		if (sd->func[1])
+			sdio_release_irq(sd->func[1]);
+		if (sd->func[2])
+			sdio_release_irq(sd->func[2]);
+		sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr(sd);
+#endif
+		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000000000000000000000000000000000000..a93f983f10247cc255c2bc0e8f69aee63503142a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,387 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 434777 2013-11-07 09:30:27Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334    0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324    0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239    43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern volatile bool dhd_mmc_suspend;
+
+static int sdioh_probe(struct sdio_func *func)
+{
+	int host_idx = func->card->host->index;
+	uint32 rca = func->card->rca;
+	wifi_adapter_info_t *adapter;
+	osl_t *osh = NULL;
+	sdioh_info_t *sdioh = NULL;
+
+	sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+	adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+	if (adapter  != NULL)
+		sd_err(("found adapter info '%s'\n", adapter->name));
+	else
+		sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+	wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+	 /* allocate SDIO Host Controller state info */
+	 osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+	 if (osh == NULL) {
+		 sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 osl_static_mem_init(osh, adapter);
+	 sdioh = sdioh_attach(osh, func);
+	 if (sdioh == NULL) {
+		 sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+	 if (sdioh->bcmsdh == NULL) {
+		 sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+
+	sdio_set_drvdata(func, sdioh);
+	return 0;
+
+fail:
+	if (sdioh != NULL)
+		sdioh_detach(osh, sdioh);
+	if (osh != NULL)
+		osl_detach(osh);
+	return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+	sdioh_info_t *sdioh;
+	osl_t *osh;
+
+	sdioh = sdio_get_drvdata(func);
+	if (sdioh == NULL) {
+		sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+		return;
+	}
+
+	osh = sdioh->osh;
+	bcmsdh_remove(sdioh->bcmsdh);
+	sdioh_detach(osh, sdioh);
+	osl_detach(osh);
+}
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+
+	if (func == NULL)
+		return -EINVAL;
+
+	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	/* 4318 doesn't have function 2 */
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		ret = sdioh_probe(func);
+
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	if (func == NULL) {
+		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+		return;
+	}
+
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		sdioh_remove(func);
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_NONE)		},
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+	int err;
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+	mmc_pm_flag_t sdio_flags;
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	sdioh = sdio_get_drvdata(func);
+	err = bcmsdh_suspend(sdioh->bcmsdh);
+	if (err)
+		return err;
+
+	sdio_flags = sdio_get_host_pm_caps(func);
+	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+		return  -EINVAL;
+	}
+
+	/* keep power while host suspended */
+	err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+	if (err) {
+		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+		return err;
+	}
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
+#endif 
+	dhd_mmc_suspend = TRUE;
+	smp_mb();
+
+	return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	sdioh = sdio_get_drvdata(func);
+	dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_resume(sdioh->bcmsdh);
+#endif 
+
+	smp_mb();
+	return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+	.suspend	= bcmsdh_sdmmc_suspend,
+	.resume		= bcmsdh_sdmmc_resume,
+};
+#endif  /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	if (func && (func->num != 2)) {
+		return 0;
+	}
+
+	if (notify_semaphore)
+		up(notify_semaphore);
+	return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+	.probe		= dummy_probe,
+	.remove		= dummy_remove,
+	.name		= "dummy_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+int sdio_func_reg_notify(void* semaphore)
+{
+	notify_semaphore = semaphore;
+	return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+	OSL_SLEEP(15);
+	sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+	.drv = {
+	.pm	= &bcmsdh_sdmmc_pm_ops,
+	},
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+	};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	if (!sd)
+		return BCME_BADARG;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	error = sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+	return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 0000000000000000000000000000000000000000..1cd880d7866226e5e6f18ea1accd895138ffd178
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,3019 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: bcmutils.c 488316 2014-06-30 15:22:21Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+
+void *_bcmutils_dummy_fn = NULL;
+
+
+#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
+#define CUST_IPV4_TOS_PREC_MASK 0x3F
+#define DCSP_MAX_VALUE 64
+/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */
+int dscp2priomap[DCSP_MAX_VALUE]=
+{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */
+	2, 0, 0, 0, 0, 0, 0, 0,
+	3, 0, 0, 0, 0, 0, 0, 0,
+	4, 0, 0, 0, 0, 0, 0, 0,
+	5, 0, 0, 0, 0, 0, 0, 0,
+	6, 0, 0, 0, 0, 0, 0, 0,
+	7, 0, 0, 0, 0, 0, 0, 0
+};
+#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
+
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096;	/* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+	int len;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p)) {
+		len = PKTLEN(osh, p);
+		total += len;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				total += PKTFRAGTOTLEN(osh, p);
+			}
+		}
+#endif
+	}
+
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				cnt += PKTFRAGTOTNUM(osh, p);
+			}
+		}
+#endif
+	}
+
+	return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+	uint cnt;
+	uint8 *pktdata;
+	uint len, remain, align64;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+		len = PKTLEN(osh, p);
+		if (len > 128) {
+			pktdata = (uint8 *)PKTDATA(osh, p);	/* starting address of data */
+			/* Check for page boundary straddle (2048B) */
+			if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+				cnt++;
+
+			align64 = (uint)((uintptr)pktdata & 0x3f);	/* aligned to 64B */
+			align64 = (64 - align64) & 0x3f;
+			len -= align64;		/* bytes from aligned 64B to end */
+			/* if aligned to 128B, check for MOD 128 between 1 to 4B */
+			remain = len % 128;
+			if (remain > 0 && remain <= 4)
+				cnt++;		/* add extra seg */
+		}
+	}
+
+	return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktdataoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint pkt_off = 0, len = 0;
+	uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		pdata = (uint8 *) PKTDATA(osh, p);
+		pkt_off = offset - len;
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return (uint8*) (pdata+pkt_off);
+}
+
+
+/* given a offset in pdata, find the pkt seg hdr */
+void *
+pktoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint len = 0;
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return p;
+}
+
+#endif /* BCMDRIVER */
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[] = {
+
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C,	/* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,		/* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,	/* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L,	/* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,	/* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+	       (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus)
+			return (ulong)-1;
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = DISCARD_QUAL(cp, char);
+
+	return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return DISCARD_QUAL(haystack, char);
+
+	nlen = (int)strlen(needle);
+	len = (int)strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return DISCARD_QUAL(&haystack[i], char);
+	return (NULL);
+}
+
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
+{
+	for (; s_len >= substr_len; s++, s_len--)
+		if (strncmp(s, substr, substr_len) == 0)
+			return DISCARD_QUAL(s, char);
+
+	return NULL;
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+	int i = 0;
+	char *ep;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+		p = ep;
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
+
+	int i = 0;
+	char *c;
+	for (;;) {
+		ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+		if (*c++ != '.' || i == IPV4_ADDR_LEN)
+			break;
+		p = c;
+	}
+	return (i == IPV4_ADDR_LEN);
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char hex[] =
+	  {
+		  '0', '1', '2', '3', '4', '5', '6', '7',
+		  '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+	  };
+	const uint8 *octet = ea->octet;
+	char *p = buf;
+	int i;
+
+	for (i = 0; i < 6; i++, octet++) {
+		*p++ = hex[(*octet >> 4) & 0xf];
+		*p++ = hex[*octet & 0xf];
+		*p++ = ':';
+	}
+
+	*(p-1) = '\0';
+
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+	         ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+	/* Implementing RFC 5952 Sections 4 + 5 */
+	/* Not thoroughly tested */
+	uint16 tmp[8];
+	uint16 *a = &tmp[0];
+	char *p = buf;
+	int i, i_max = -1, cnt = 0, cnt_max = 1;
+	uint8 *a4 = NULL;
+	memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if (a[i]) {
+			if (cnt > cnt_max) {
+				cnt_max = cnt;
+				i_max = i - cnt;
+			}
+			cnt = 0;
+		} else
+			cnt++;
+	}
+	if (cnt > cnt_max) {
+		cnt_max = cnt;
+		i_max = i - cnt;
+	}
+	if (i_max == 0 &&
+		/* IPv4-translated: ::ffff:0:a.b.c.d */
+		((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+		/* IPv4-mapped: ::ffff:a.b.c.d */
+		(cnt_max == 5 && a[5] == 0xffff)))
+		a4 = (uint8*) (a + 6);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if ((uint8*) (a + i) == a4) {
+			snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+			break;
+		} else if (i == i_max) {
+			*p++ = ':';
+			i += cnt_max - 1;
+			p[0] = ':';
+			p[1] = '\0';
+		} else {
+			if (i)
+				*p++ = ':';
+			p += snprintf(p, 8, "%x", ntoh16(a[i]));
+		}
+	}
+
+	return buf;
+}
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif	
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+			(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS46(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+	} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+		(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS46(ip_body);
+		uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+		switch (dscp) {
+		case DSCP_EF:
+			priority = PRIO_8021D_VO;
+			break;
+		case DSCP_AF31:
+		case DSCP_AF32:
+		case DSCP_AF33:
+			priority = PRIO_8021D_CL;
+			break;
+		case DSCP_AF21:
+		case DSCP_AF22:
+		case DSCP_AF23:
+		case DSCP_AF11:
+		case DSCP_AF12:
+		case DSCP_AF13:
+			priority = PRIO_8021D_EE;
+			break;
+		default:
+#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING
+			priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+#else
+			priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT)
+				& CUST_IPV4_TOS_PREC_MASK)];
+#endif
+			break;
+		}
+
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool BCMFASTPATH
+pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *ip_body;
+	bool rc = FALSE;
+
+	/* minimum length is ether header and IP header */
+	if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
+		return FALSE;
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
+		ip_body = pktdata + sizeof(struct ether_header);
+		*dscp = IP_DSCP46(ip_body);
+		rc = TRUE;
+	}
+	else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+		evh = (struct ethervlan_header *)eh;
+
+		/* minimum length is ethervlan header and IP header */
+		if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+			evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+			ip_body = pktdata + sizeof(struct ethervlan_header);
+			*dscp = IP_DSCP46(ip_body);
+			rc = TRUE;
+		}
+	}
+
+	return rc;
+}
+
+/* The 0.5KB string table is not removed by compiler even though it's unused */
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif	/* BCMDRIVER */
+
+
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+	/* dst buffer should always be valid */
+	ASSERT(dst);
+
+	/* data len must be within valid range */
+	ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+	/* source data buffer pointer should be valid, unless datalen is 0
+	 * meaning no data with this TLV
+	 */
+	ASSERT((data != NULL) || (datalen == 0));
+
+	/* only do work if the inputs are valid
+	 * - must have a dst to write to AND
+	 * - datalen must be within range AND
+	 * - the source data pointer must be non-NULL if datalen is non-zero
+	 * (this last condition detects datalen > 0 with a NULL data pointer)
+	 */
+	if ((dst != NULL) &&
+	    ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+	    ((data != NULL) || (datalen == 0))) {
+
+	        /* write type, len fields */
+		dst_tlv->id = (uint8)type;
+	        dst_tlv->len = (uint8)datalen;
+
+		/* if data is present, copy to the output buffer and update
+		 * pointer to output buffer
+		 */
+		if (datalen > 0) {
+
+			memcpy(dst_tlv->data, data, datalen);
+		}
+
+		/* update the output destination poitner to point past
+		 * the TLV written
+		 */
+		new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+
+	if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+		/* if len + tlv hdr len is more than destlen, don't do anything
+		 * just return the buffer untouched
+		 */
+		if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+			new_dst = bcm_write_tlv(type, data, datalen, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+	uint totlen;
+
+	ASSERT(dst && src);
+	if (dst && src) {
+
+		totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+		memcpy(dst, src_tlv, totlen);
+		new_dst = dst + totlen;
+	}
+
+	return (new_dst);
+}
+
+
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+	ASSERT(src);
+	if (src) {
+		if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+			new_dst = bcm_copy_tlv(src, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+    0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+    0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+    0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+    0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+    0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+    0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+    0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+    0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+    0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+    0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+    0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+    0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+    0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+    0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+    0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+    0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+    0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+    0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+    0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+    0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+    0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+    0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+    0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+    0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+    0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+    0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+    0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+    0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+    0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+    0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+    0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+    0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,	/* pointer to array of data to process */
+	uint  nbytes,	/* number of input data bytes to process */
+	uint8 crc	/* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+    0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+    0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+    0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+    0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+    0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+    0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+    0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+    0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+    0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+    0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+    0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+    0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+    0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+    0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+    0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+    0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+    0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+    0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+    0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+    0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+    0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+    0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+    0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+    0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+    0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+    0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+    0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+    0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+    0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+    0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+    0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+    0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint nbytes, /* number of input data bytes to process */
+    uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+static const uint32 crc32_table[256] = {
+    0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+    0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+    0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+    0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+    0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+    0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+    0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+    0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+    0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+    0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+    0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+    0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+    0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+    0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+    0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+    0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+    0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+    0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+    0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+    0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+    0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+    0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+    0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+    0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+    0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+    0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+    0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+    0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+    0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+    0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+    0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+    0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+    0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+    0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+    0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+    0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+    0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+    0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+    0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+    0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+    0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+    0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+    0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+    0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+    0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+    0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+    0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+    0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+    0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+    0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+    0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+    0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+    0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+    0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+    0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+    0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+    0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+    0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+    0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+    0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+    0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+    0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+    0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+    0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+	uint8 *pend;
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN 	1499 	/*  CRC Length */
+#define CBUFSIZ 	(CLEN+4)
+#define CNBUFS		5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (TLV_HDR_LEN + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen)
+{
+	bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key);
+	if (ret == NULL || ret->len < min_bodylen) {
+		return NULL;
+	}
+	return ret;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key) {
+			return (NULL);
+		}
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+	return NULL;
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
+{
+	int i, slen = 0;
+	uint32 bit, mask;
+	const char *name;
+	mask = bd->mask;
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0;  (name = bd->bitfield[i].name) != NULL; i++) {
+		bit = bd->bitfield[i].bit;
+		if ((flags & mask) == bit) {
+			if (len > (int)strlen(name)) {
+				slen = strlen(name);
+				strncpy(buf, name, slen+1);
+			}
+			break;
+		}
+	}
+	return slen;
+}
+
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0, nlen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags != 0) {
+			/* print any unnamed bits */
+			snprintf(hexstr, 16, "0x%X", flags);
+			name = hexstr;
+			flags = 0;	/* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		flags &= ~bit;
+		nlen = strlen(name);
+		slen += nlen;
+		/* count btwn flag space */
+		if (flags != 0)
+			slen += 1;
+		/* need NULL char as well */
+		if (len <= slen)
+			break;
+		/* copy NULL char but don't count it */
+		strncpy(p, name, nlen + 1);
+		p += nlen;
+		/* copy btwn flag space and NULL char */
+		if (flags != 0)
+			p += snprintf(p, 2, " ");
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		if (len < 2)
+			p -= 2 - len;	/* overwrite last char */
+		p += snprintf(p, 2, ">");
+	}
+
+	return (int)(p - buf);
+}
+#endif 
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += snprintf(p, 3, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	int len = sizeof(line);
+	int nchar;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			nchar = snprintf(p, len, "  %04d: ", i);	/* line prefix */
+			p += nchar;
+			len -= nchar;
+		}
+		if (len > 0) {
+			nchar = snprintf(p, len, "%02x ", buf[i]);
+			p += nchar;
+			len -= nchar;
+		}
+
+		if (i % 16 == 15) {
+			printf("%s\n", line);		/* flush line */
+			p = line;
+			len = sizeof(line);
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+	"NONE",
+	"WEP1",
+	"TKIP",
+	"WEP128",
+	"AES_CCM",
+	"AES_OCB_MSDU",
+	"AES_OCB_MPDU",
+#ifdef BCMCCX
+	"CKIP",
+	"CKIP_MMH",
+	"WEP_MMH",
+	"NALG",
+#else
+	"NALG",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF",
+#endif /* BCMCCX */
+	"WAPI",
+	"PMK",
+	"BIP",
+	"AES_GCM",
+	"AES_CCM256",
+	"AES_GCM256",
+	"BIP_CMAC256",
+	"BIP_GMAC",
+	"BIP_GMAC256",
+	"UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+	return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = (uint)strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+		               read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = (uint)strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	memcpy(&buf[len], data, datalen);
+	len += datalen;
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153		/* Offset for first entry */
+#define QDBM_TABLE_LEN 40	/* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: 	+0 	+1 	+2 	+3 	+4 	+5 	+6 	+7 */
+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+		                                    nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary) break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	/* r == 0 is also the case when strlen(fmt) is zero.
+	 * typically the case when "" is passed as argument.
+	 */
+	if ((r == -1) || (r >= (int)b->size)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+{
+	int i;
+
+	if (msg != NULL && msg[0] != '\0')
+		bcm_bprintf(b, "%s", msg);
+	for (i = 0; i < len; i ++)
+		bcm_bprintf(b, "%02X", buf[i]);
+	if (newline)
+		bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+{
+	bcm_tlv_t *ie;
+	uint8 ie_len;
+
+	ie = (bcm_tlv_t*)tlvs;
+
+	/* make sure we are looking at a valid IE */
+	if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+		return NULL;
+	}
+
+	/* Walk through the IEs looking for an OUI match */
+	do {
+		ie_len = ie->len;
+		if ((ie->id == DOT11_MNG_PROPR_ID) &&
+		    (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+		    !bcmp(ie->data, voui, DOT11_OUI_LEN))
+		{
+			/* compare optional type */
+			if (type_len == 0 ||
+			    !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+				return (ie);		/* a match */
+			}
+		}
+	} while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
+
+	return NULL;
+}
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif 
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	unsigned int buf_len, n;
+	unsigned int pad = 0;
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = (unsigned int)(dp - varbuf);
+	if (buf_len % 4) {
+		pad = 4 - buf_len % 4;
+		if (pad && (buf_len + pad <= len)) {
+			buf_len += pad;
+		}
+	}
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
+
+/* calculate a * b + c */
+void
+bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c)
+{
+#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;}
+	uint32 r1, r0;
+	uint32 a1, a0, b1, b0, t, cc = 0;
+
+	a1 = a >> 16;
+	a0 = a & 0xffff;
+	b1 = b >> 16;
+	b0 = b & 0xffff;
+
+	r0 = a0 * b0;
+	FORMALIZE(r0);
+
+	t = (a1 * b0) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	t = (a0 * b1) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	FORMALIZE(c);
+
+	r0 += c;
+	FORMALIZE(r0);
+
+	r0 |= (cc % 2) ? 0x80000000 : 0;
+	r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2);
+
+	*r_high = r1;
+	*r_low = r0;
+}
+
+/* calculate a / b */
+void
+bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b < 2)
+		return;
+
+	while (a1 != 0) {
+		r0 += (0xffffffff / b) * a1;
+		bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0);
+	}
+
+	r0 += a0 / b;
+	*r = r0;
+}
+
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+}
+
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
+
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+	return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
+
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+	return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+set_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+	uint startbyte = start/NBBY;
+	uint endbyte = end/NBBY;
+	uint i, startbytelastbit, endbytestartbit;
+
+	if (end >= start) {
+		if (endbyte - startbyte > 1)
+		{
+			startbytelastbit = (startbyte+1)*NBBY - 1;
+			endbytestartbit = endbyte*NBBY;
+			for (i = startbyte+1; i < endbyte; i++)
+				((uint8 *)array)[i] = 0xFF;
+			for (i = start; i <= startbytelastbit; i++)
+				setbit(array, i);
+			for (i = endbytestartbit; i <= end; i++)
+				setbit(array, i);
+		} else {
+			for (i = start; i <= end; i++)
+				setbit(array, i);
+		}
+	}
+	else {
+		set_bitrange(array, start, maxbit, maxbit);
+		set_bitrange(array, 0, end, maxbit);
+	}
+}
+
+void
+bcm_bitprint32(const uint32 u32)
+{
+	int i;
+	for (i = NBITS(uint32) - 1; i >= 0; i--) {
+		isbitset(u32, i) ? printf("1") : printf("0");
+		if ((i % NBBY) == 0) printf(" ");
+	}
+	printf("\n");
+}
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+{
+	while (len > 1) {
+		sum += (buf[0] << 8) | buf[1];
+		buf += 2;
+		len -= 2;
+	}
+
+	if (len > 0) {
+		sum += (*buf) << 8;
+	}
+
+	while (sum >> 16) {
+		sum = (sum & 0xffff) + (sum >> 16);
+	}
+
+	return ((uint16)~sum);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ *                       non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP:  Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX    (4 * 1024)  /* May increase to 16K */
+
+#define BCM_MWBMAP_BITS_WORD    (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX    (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX    (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP     (5)
+#define BCM_MWBMAP_MODOP(ix)    ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix)    ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix)    ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl)		((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr)		((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+	do { \
+		ASSERT((mwb != NULL) && \
+		       (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+		bcm_mwbmap_audit(mwb); \
+	} while (0)
+#define MWBMAP_ASSERT(exp)		ASSERT(exp)
+#define MWBMAP_DBG(x)           printf x
+#else   /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb)   do {} while (0)
+#define MWBMAP_ASSERT(exp)		do {} while (0)
+#define MWBMAP_DBG(x)
+#endif  /* !BCM_MWBMAP_DEBUG */
+
+
+typedef struct bcm_mwbmap {     /* Hierarchical multiword bitmap allocator    */
+	uint16 wmaps;               /* Total number of words in free wd bitmap    */
+	uint16 imaps;               /* Total number of words in free id bitmap    */
+	int16  ifree;               /* Count of free indices. Used only in audits */
+	uint16 total;               /* Total indices managed by multiword bitmap  */
+
+	void * magic;               /* Audit handle parameter from user           */
+
+	uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of            */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	int8   wd_count[BCM_MWBMAP_WORDS_MAX];  /* free id running count, 1st lvl */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+	uint32 id_bitmap[0];        /* Second level bitmap                        */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+	struct bcm_mwbmap * mwbmap_p;
+	uint32 wordix, size, words, extra;
+
+	/* Implementation Constraint: Uses 32bit word bitmap */
+	MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+	MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+	MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+	MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+	ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+	/* Determine the number of words needed in the multiword bitmap */
+	extra = BCM_MWBMAP_MODOP(items_max);
+	words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+	/* Allocate runtime state of multiword bitmap */
+	/* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+	size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+	mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+	if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+		ASSERT(0);
+		goto error1;
+	}
+	memset(mwbmap_p, 0, size);
+
+	/* Initialize runtime multiword bitmap state */
+	mwbmap_p->imaps = (uint16)words;
+	mwbmap_p->ifree = (int16)items_max;
+	mwbmap_p->total = (uint16)items_max;
+
+	/* Setup magic, for use in audit of handle */
+	mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+	/* Setup the second level bitmap of free indices */
+	/* Mark all indices as available */
+	for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+		mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Ensure that extra indices are tagged as un-available */
+	if (extra) { /* fixup the free ids in last bitmap and wd_count */
+		uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Setup the first level bitmap hierarchy */
+	extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+	words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+	mwbmap_p->wmaps = (uint16)words;
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+		mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+	if (extra) {
+		uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+	}
+
+	return mwbmap_p;
+
+error1:
+	return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+	                     + (sizeof(uint32) * mwbmap_p->imaps));
+	return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator.    */
+uint32 BCMFASTPATH
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	/* Start with the first hierarchy */
+	for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+		if (bitmap != 0U) {
+
+			uint32 count, bitix, *bitmap_p;
+
+			bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+			wordix   = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+			/* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+			count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			mwbmap_p->wd_count[wordix]--;
+			count = mwbmap_p->wd_count[wordix];
+			MWBMAP_ASSERT(count ==
+			              (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			MWBMAP_ASSERT(count >= 0);
+
+			/* clear wd_bitmap bit if id_map count is 0 */
+			bitmap = (count == 0) << bitix;
+
+			MWBMAP_DBG((
+			    "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+			*bitmap_p ^= bitmap;
+
+			/* Use bitix in the second hierarchy */
+			bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+			bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+			MWBMAP_ASSERT(bitmap != 0U);
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = BCM_MWBMAP_MULOP(wordix)
+			         + (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+			mwbmap_p->ifree--; /* decrement system wide free count */
+			MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+			MWBMAP_DBG((
+			    "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+			    mwbmap_p->ifree));
+
+			*bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+			return bitix;
+		}
+	}
+
+	ASSERT(mwbmap_p->ifree == 0);
+
+	return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == bitmap);
+
+	mwbmap_p->ifree--; /* update free count */
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	/* Update first hierarchy */
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+	count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	mwbmap_p->wd_count[bitix]--;
+	count = mwbmap_p->wd_count[bitix];
+	MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	MWBMAP_ASSERT(count >= 0);
+
+	bitmap   = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+	MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+	           BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+	           (*bitmap_p) ^ bitmap, count));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void BCMFASTPATH
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second level hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == 0U);	/* ASSERT not a double free */
+
+	mwbmap_p->ifree++; /* update free count */
+	ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+	MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p |= bitmap; /* mark as available */
+
+	/* Now update first level hierarchy */
+
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+	{
+		uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[bitix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+		MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+		MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+		            bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+	}
+#endif /* BCM_MWBMAP_DEBUG */
+
+	*bitmap_p |= bitmap;
+
+	return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+
+	return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+	uint32 ix, count;
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+	       mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+	for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+		printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+		bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+		printf("\n");
+	}
+	for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[ix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+		bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+		printf("\n");
+	}
+
+	return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+		for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+			if ((*bitmap_p) & (1 << bitix)) {
+				idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+				count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				count = mwbmap_p->wd_count[idmap_ix];
+				ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				ASSERT(count != 0U);
+				free_cnt += count;
+			}
+		}
+	}
+
+	ASSERT((int)free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+	uint16  total;     /* total number of ids managed by allocator */
+	uint16  start;     /* start value of 16bit ids to be managed */
+	uint32  failures;  /* count of failures */
+	void    *dbg;      /* debug placeholder */
+	int     stack_idx; /* index into stack of available ids */
+	uint16  stack[0];  /* stack of 16 bit ids */
+} id16_map_t;
+
+#define ID16_MAP_SZ(items)      (sizeof(id16_map_t) + \
+	                             (sizeof(uint16) * (items)))
+
+#if defined(BCM_DBG)
+
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
+
+typedef struct id16_map_dbg {
+	uint16  total;
+	bool    avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items)  (sizeof(id16_map_dbg_t) + \
+	                             (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x)         print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
+
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+	uint16 idx, val16;
+	id16_map_t * id16_map;
+
+	ASSERT(total_ids > 0);
+	ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+	id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+	if (id16_map == NULL) {
+		return NULL;
+	}
+
+	id16_map->total = total_ids;
+	id16_map->start = start_val16;
+	id16_map->failures = 0;
+	id16_map->dbg = NULL;
+
+	/* Populate stack with 16bit id values, commencing with start_val16 */
+	id16_map->stack_idx = 0;
+	val16 = start_val16;
+
+	for (idx = 0; idx < total_ids; idx++, val16++) {
+		id16_map->stack_idx = idx;
+		id16_map->stack[id16_map->stack_idx] = val16;
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+
+	if (id16_map->dbg) {
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		id16_map_dbg->total = total_ids;
+		for (idx = 0; idx < total_ids; idx++) {
+			id16_map_dbg->avail[idx] = TRUE;
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return (void *)id16_map;
+}
+
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
+{
+	uint16 total_ids;
+	id16_map_t * id16_map;
+
+	if (id16_map_hndl == NULL)
+		return NULL;
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	total_ids = id16_map->total;
+	ASSERT(total_ids > 0);
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+		id16_map->dbg = NULL;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->total = 0;
+	MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+	return NULL;
+}
+
+uint16 BCMFASTPATH /* Allocate a unique 16bit id */
+id16_map_alloc(void * id16_map_hndl)
+{
+	uint16 val16;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT(id16_map->total > 0);
+
+	if (id16_map->stack_idx < 0) {
+		id16_map->failures++;
+		return ID16_INVALID;
+	}
+
+	val16 = id16_map->stack[id16_map->stack_idx];
+	id16_map->stack_idx--;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+	ASSERT(val16 < (id16_map->start + id16_map->total));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+		id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return val16;
+}
+
+
+void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
+id16_map_free(void * id16_map_hndl, uint16 val16)
+{
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+	ASSERT(val16 < (id16_map->start + id16_map->total));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+		id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->stack_idx++;
+	id16_map->stack[id16_map->stack_idx] = val16;
+}
+
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
+{
+	ASSERT(id16_map_hndl != NULL);
+	return ((id16_map_t *)id16_map_hndl)->failures;
+}
+
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+	int idx;
+	int insane = 0;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total));
+	for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+		ASSERT(id16_map->stack[idx] >= id16_map->start);
+		ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+		if (id16_map->dbg) {
+			uint16 val16 = id16_map->stack[idx];
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+				insane |= 1;
+				ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+				              id16_map_hndl, idx, val16));
+			}
+		}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		uint16 avail = 0; /* Audit available ids counts */
+		for (idx = 0; idx < id16_map_dbg->total; idx++) {
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+				avail++;
+		}
+		if (avail && (avail != (id16_map->stack_idx + 1))) {
+			insane |= 1;
+			ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+			              id16_map_hndl, avail, id16_map->stack_idx));
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return (!!insane);
+}
+/* END: Simple id16 allocator */
+
+
+#endif /* BCMDRIVER */
+
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b == 0) {
+		r0 = a_low;
+		*r = r0;
+		return;
+	}
+
+	if (b < 32) {
+		a0 = a0 >> b;
+		a1 = a1 & ((1 << b) - 1);
+		a1 = a1 << (32 - b);
+		r0 = a0 | a1;
+		*r = r0;
+		return;
+	} else {
+		r0 = a1 >> (b - 32);
+		*r = r0;
+		return;
+	}
+
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) += offset;
+	if (*r_lo < r1_lo)
+		(*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) -= offset;
+	if (*r_lo > r1_lo)
+		(*r_hi) --;
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+	uint32 now;
+
+	if (!ctr_tbl->enabled)
+		return;
+
+	now = OSL_SYSUPTIME();
+
+	if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+		uint8 i = 0;
+		printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+		for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+			printf(" %u", ctr_tbl->cnt[i]);
+		}
+		printf("\n");
+
+		ctr_tbl->prev_log_print = now;
+		bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+	}
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
+
+#ifdef BCMDRIVER
+void
+dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size;
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+	if (pool)
+		MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size, i;
+	dll_pool_t * dll_pool_p;
+	dll_t * elem_p;
+
+	ASSERT(elem_size > sizeof(dll_t));
+
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+	if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) {
+		printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
+			elems_max, elem_size);
+		ASSERT(0);
+		return dll_pool_p;
+	}
+
+	bzero(dll_pool_p, mem_size);
+
+	dll_init(&dll_pool_p->free_list);
+	dll_pool_p->elems_max = elems_max;
+	dll_pool_p->elem_size = elem_size;
+
+	elem_p = dll_pool_p->elements;
+	for (i = 0; i < elems_max; i++) {
+		dll_append(&dll_pool_p->free_list, elem_p);
+		elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+	}
+
+	dll_pool_p->free_count = elems_max;
+
+	return dll_pool_p;
+}
+
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+	dll_t * elem_p;
+
+	if (dll_pool_p->free_count == 0) {
+		ASSERT(dll_empty(&dll_pool_p->free_list));
+		return NULL;
+	}
+
+	elem_p = dll_head_p(&dll_pool_p->free_list);
+	dll_delete(elem_p);
+	dll_pool_p->free_count -= 1;
+
+	return (void *)elem_p;
+}
+
+void
+dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_prepend(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_append(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
new file mode 100644
index 0000000000000000000000000000000000000000..4a848d2674de14626b8f84c2b08537be0a139510
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -0,0 +1,1211 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	/* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+/* Definitions for D11AC capable Chanspec type */
+
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+ *
+ * <band>:
+ *      (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
+ *      Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ *      channel number of the 5MHz, 10MHz, 20MHz channel,
+ *      or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel.
+ * <bandwidth>:
+ *      (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20.
+ * <primary-sideband>:
+ *      (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower.
+ *
+ *      For 2.4GHz band 40MHz channels, the same primary channel may be the
+ *      upper sideband for one 40MHz channel, and the lower sideband for an
+ *      overlapping 40MHz channel.  The U/L disambiguates which 40MHz channel
+ *      is being specified.
+ *
+ *      For 40MHz in the 5GHz band and all channel bandwidths greater than
+ *      40MHz, the U/L specificaion is not allowed since the channels are
+ *      non-overlapping and the primary sub-band is derived from its
+ *      position in the wide bandwidth channel.
+ *
+ * <1st80Channel>:
+ * <2nd80Channel>:
+ *      Required for 80+80, otherwise not allowed.
+ *      Specifies the center channel of the first and second 80MHz band.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
+ *
+ * <channel>:
+ *	primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <ctl-sideband>:
+ * 	"U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      5g8             20MHz     8          -              -
+ *      52              20MHz     52         -              -
+ *      52/40           40MHz     54         52-56          52
+ *      56/40           40MHz     54         52-56          56
+ *      52/80           80MHz     58         52-64          52
+ *      56/80           80MHz     58         52-64          56
+ *      60/80           80MHz     58         52-64          60
+ *      64/80           80MHz     58         52-64          64
+ *      52/160          160MHz    50         36-64          52
+ *      36/160          160MGz    50         36-64          36
+ *      36/80+80/42-106 80+80MHz  42,106     36-48,100-112  36
+ *
+ * 2 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      2g8             20MHz     8          -              -
+ *      8               20MHz     8          -              -
+ *      6               20MHz     6          -              -
+ *      6/40l           40MHz     8          6-10           6
+ *      6l              40MHz     8          6-10           6
+ *      6/40u           40MHz     4          2-6            6
+ *      6u              40MHz     4          2-6            6
+ */
+
+/* bandwidth ASCII string */
+static const char *wf_chspec_bw_str[] =
+{
+	"5",
+	"10",
+	"20",
+	"40",
+	"80",
+	"160",
+	"80+80",
+	"na"
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+	(sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+/* 40MHz channels in 5GHz band */
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+	(sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+/* 80MHz channels in 5GHz band */
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+	(sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+/* 160MHz channels in 5GHz band */
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+	(sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+
+/* convert bandwidth from chanspec to MHz */
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+	uint bw;
+
+	bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+	return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
+static uint8
+center_chan_to_edge(uint bw)
+{
+	/* edge channels separated by BW - 10MHz on each side
+	 * delta from cf to edge is half of that,
+	 * MHz to channel num conversion is 5MHz/channel
+	 */
+	return (uint8)(((bw - 20) / 2) / 5);
+}
+
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+	return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+/* return side band number given center channel and control channel
+ * return -1 on error
+ */
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+	uint lowest = channel_low_edge(center_ch, bw);
+	uint sb;
+
+	if ((ctl_ch - lowest) % 4) {
+		/* bad ctl channel, not mult 4 */
+		return -1;
+	}
+
+	sb = ((ctl_ch - lowest) / 4);
+
+	/* sb must be a index to a 20MHz channel in range */
+	if (sb >= (bw / 20)) {
+		/* ctl_ch must have been too high for the center_ch */
+		return -1;
+	}
+
+	return sb;
+}
+
+/* return control channel given center channel and side band */
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+	return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_80mhz_to_id(uint ch)
+{
+	uint i;
+	for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+		if (ch == wf_5g_80m_chans[i])
+			return i;
+	}
+
+	return -1;
+}
+
+/* wrapper function for wf_chspec_ntoa. In case of an error it puts
+ * the original chanspec in the output buffer, prepended with "invalid".
+ * Can be directly used in print routines as it takes care of null
+ */
+char *
+wf_chspec_ntoa_ex(chanspec_t chspec, char *buf)
+{
+	if (wf_chspec_ntoa(chspec, buf) == NULL)
+		snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec);
+	return buf;
+}
+
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band;
+	uint ctl_chan;
+
+	if (wf_chspec_malformed(chspec))
+		return NULL;
+
+	band = "";
+
+	/* check for non-default band spec */
+	if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+	/* ctl channel */
+	ctl_chan = wf_chspec_ctlchan(chspec);
+
+	/* bandwidth and ctl sideband */
+	if (CHSPEC_IS20(chspec)) {
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+	} else if (!CHSPEC_IS8080(chspec)) {
+		const char *bw;
+		const char *sb = "";
+
+		bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+		/* ctl sideband string if needed for 2g 40MHz */
+		if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+		}
+
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+		/* ctl sideband string instead of BW for 40MHz */
+		if (CHSPEC_IS40(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+		} else {
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+		}
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
+
+	} else {
+		/* 80+80 */
+		uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+		uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+		/* convert to channel number */
+		chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+		chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+		/* Outputs a max of CHANSPEC_STR_LEN chars including '\0'  */
+		snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+	}
+
+	return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+	unsigned long val;
+	char *endp = NULL;
+
+	val = strtoul(*p, &endp, 10);
+	/* if endp is the initial pointer value, then a number was not read */
+	if (endp == *p)
+		return 0;
+
+	/* advance the buffer pointer to the end of the integer string */
+	*p = endp;
+	/* return the parsed integer */
+	*num = (unsigned int)val;
+
+	return 1;
+}
+
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+	chanspec_t chspec;
+	uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+	uint num, ctl_ch;
+	uint ch1, ch2;
+	char c, sb_ul = '\0';
+	int i;
+
+	bw = 20;
+	chspec_sb = 0;
+	chspec_ch = ch1 = ch2 = 0;
+
+	/* parse channel num or band */
+	if (!read_uint(&a, &num))
+		return 0;
+
+	/* if we are looking at a 'g', then the first number was a band */
+	c = tolower((int)a[0]);
+	if (c == 'g') {
+		a ++; /* consume the char */
+
+		/* band must be "2" or "5" */
+		if (num == 2)
+			chspec_band = WL_CHANSPEC_BAND_2G;
+		else if (num == 5)
+			chspec_band = WL_CHANSPEC_BAND_5G;
+		else
+			return 0;
+
+		/* read the channel number */
+		if (!read_uint(&a, &ctl_ch))
+			return 0;
+
+		c = tolower((int)a[0]);
+	}
+	else {
+		/* first number is channel, use default for band */
+		ctl_ch = num;
+		chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+		               WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	}
+
+	if (c == '\0') {
+		/* default BW of 20MHz */
+		chspec_bw = WL_CHANSPEC_BW_20;
+		goto done_read;
+	}
+
+	a ++; /* consume the 'u','l', or '/' */
+
+	/* check 'u'/'l' */
+	if (c == 'u' || c == 'l') {
+		sb_ul = c;
+		chspec_bw = WL_CHANSPEC_BW_40;
+		goto done_read;
+	}
+
+	/* next letter must be '/' */
+	if (c != '/')
+		return 0;
+
+	/* read bandwidth */
+	if (!read_uint(&a, &bw))
+		return 0;
+
+	/* convert to chspec value */
+	if (bw == 20) {
+		chspec_bw = WL_CHANSPEC_BW_20;
+	} else if (bw == 40) {
+		chspec_bw = WL_CHANSPEC_BW_40;
+	} else if (bw == 80) {
+		chspec_bw = WL_CHANSPEC_BW_80;
+	} else if (bw == 160) {
+		chspec_bw = WL_CHANSPEC_BW_160;
+	} else {
+		return 0;
+	}
+
+	/* So far we have <band>g<chan>/<bw>
+	 * Can now be followed by u/l if bw = 40,
+	 * or '+80' if bw = 80, to make '80+80' bw.
+	 */
+
+	c = tolower((int)a[0]);
+
+	/* if we have a 2g/40 channel, we should have a l/u spec now */
+	if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+		if (c == 'u' || c == 'l') {
+			a ++; /* consume the u/l char */
+			sb_ul = c;
+			goto done_read;
+		}
+	}
+
+	/* check for 80+80 */
+	if (c == '+') {
+		/* 80+80 */
+		static const char *plus80 = "80/";
+
+		/* must be looking at '+80/'
+		 * check and consume this string.
+		 */
+		chspec_bw = WL_CHANSPEC_BW_8080;
+
+		a ++; /* consume the char '+' */
+
+		/* consume the '80/' string */
+		for (i = 0; i < 3; i++) {
+			if (*a++ != *plus80++) {
+				return 0;
+			}
+		}
+
+		/* read primary 80MHz channel */
+		if (!read_uint(&a, &ch1))
+			return 0;
+
+		/* must followed by '-' */
+		if (a[0] != '-')
+			return 0;
+		a ++; /* consume the char */
+
+		/* read secondary 80MHz channel */
+		if (!read_uint(&a, &ch2))
+			return 0;
+	}
+
+done_read:
+	/* skip trailing white space */
+	while (a[0] == ' ') {
+		a ++;
+	}
+
+	/* must be end of string */
+	if (a[0] != '\0')
+		return 0;
+
+	/* Now have all the chanspec string parts read;
+	 * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
+	 * chspec_band and chspec_bw are chanspec values.
+	 * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
+	 * a center channel (or two) and sideband.
+	 */
+
+	/* if a sb u/l string was given, just use that,
+	 * guaranteed to be bw = 40 by sting parse.
+	 */
+	if (sb_ul != '\0') {
+		if (sb_ul == 'l') {
+			chspec_ch = UPPER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+		} else if (sb_ul == 'u') {
+			chspec_ch = LOWER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+		}
+	}
+	/* if the bw is 20, center and sideband are trivial */
+	else if (chspec_bw == WL_CHANSPEC_BW_20) {
+		chspec_ch = ctl_ch;
+		chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
+	}
+	/* if the bw is 40/80/160, not 80+80, a single method
+	 * can be used to to find the center and sideband
+	 */
+	else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+		/* figure out ctl sideband based on ctl channel and bandwidth */
+		const uint8 *center_ch = NULL;
+		int num_ch = 0;
+		int sb = -1;
+
+		if (chspec_bw == WL_CHANSPEC_BW_40) {
+			center_ch = wf_5g_40m_chans;
+			num_ch = WF_NUM_5G_40M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+			center_ch = wf_5g_80m_chans;
+			num_ch = WF_NUM_5G_80M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+			center_ch = wf_5g_160m_chans;
+			num_ch = WF_NUM_5G_160M_CHANS;
+		} else {
+			return 0;
+		}
+
+		for (i = 0; i < num_ch; i ++) {
+			sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+			if (sb >= 0) {
+				chspec_ch = center_ch[i];
+				chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+				break;
+			}
+		}
+
+		/* check for no matching sb/center */
+		if (sb < 0) {
+			return 0;
+		}
+	}
+	/* Otherwise, bw is 80+80. Figure out channel pair and sb */
+	else {
+		int ch1_id = 0, ch2_id = 0;
+		int sb;
+
+		/* look up the channel ID for the specified channel numbers */
+		ch1_id = channel_80mhz_to_id(ch1);
+		ch2_id = channel_80mhz_to_id(ch2);
+
+		/* validate channels */
+		if (ch1_id < 0 || ch2_id < 0)
+			return 0;
+
+		/* combine 2 channel IDs in channel field of chspec */
+		chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+		             ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+		/* figure out primary 20 MHz sideband */
+
+		/* is the primary channel contained in the 1st 80MHz channel? */
+		sb = channel_to_sb(ch1, ctl_ch, bw);
+		if (sb < 0) {
+			/* no match for primary channel 'ctl_ch' in segment0 80MHz channel */
+			return 0;
+		}
+
+		chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+	}
+
+	chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+	if (wf_chspec_malformed(chspec))
+		return 0;
+
+	return chspec;
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	/* must be 2G or 5G band */
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth */
+		if (chspec_bw != WL_CHANSPEC_BW_20 &&
+		    chspec_bw != WL_CHANSPEC_BW_40) {
+			return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint ch1_id, ch2_id;
+
+			/* channel IDs in 80+80 must be in range */
+			ch1_id = CHSPEC_CHAN1(chanspec);
+			ch2_id = CHSPEC_CHAN2(chanspec);
+			if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+				return TRUE;
+
+		} else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+		           chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+			if (chspec_ch > MAXCHANNEL) {
+				return TRUE;
+			}
+		} else {
+			/* invalid bandwidth */
+			return TRUE;
+		}
+	} else {
+		/* must be 2G or 5G band */
+		return TRUE;
+	}
+
+	/* side band needs to be consistent with bandwidth */
+	if (chspec_bw == WL_CHANSPEC_BW_20) {
+		if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_80 ||
+	           chspec_bw == WL_CHANSPEC_BW_8080) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+			return TRUE;
+	}
+	else if (chspec_bw == WL_CHANSPEC_BW_160) {
+		ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU);
+	}
+	return FALSE;
+}
+
+/*
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ * RETURNS: TRUE if the chanspec is a valid 802.11 channel
+ */
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	if (wf_chspec_malformed(chanspec))
+		return FALSE;
+
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth and channel range */
+		if (chspec_bw == WL_CHANSPEC_BW_20) {
+			if (chspec_ch >= 1 && chspec_ch <= 14)
+				return TRUE;
+		} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+			if (chspec_ch >= 3 && chspec_ch <= 11)
+				return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint16 ch1, ch2;
+
+			ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+			ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+			/* the two channels must be separated by more than 80MHz by VHT req */
+			if ((ch2 > ch1 + CH_80MHZ_APART) ||
+			    (ch1 > ch2 + CH_80MHZ_APART))
+				return TRUE;
+		} else {
+			const uint8 *center_ch;
+			uint num_ch, i;
+
+			if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+				center_ch = wf_5g_40m_chans;
+				num_ch = WF_NUM_5G_40M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+				center_ch = wf_5g_80m_chans;
+				num_ch = WF_NUM_5G_80M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+				center_ch = wf_5g_160m_chans;
+				num_ch = WF_NUM_5G_160M_CHANS;
+			} else {
+				/* invalid bandwidth */
+				return FALSE;
+			}
+
+			/* check for a valid center channel */
+			if (chspec_bw == WL_CHANSPEC_BW_20) {
+				/* We don't have an array of legal 20MHz 5G channels, but they are
+				 * each side of the legal 40MHz channels.  Check the chanspec
+				 * channel against either side of the 40MHz channels.
+				 */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+					    chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+						break; /* match found */
+				}
+
+				if (i == num_ch) {
+					/* check for channel 165 which is not the side band
+					 * of 40MHz 5G channel
+					 */
+					if (chspec_ch == 165)
+						i = 0;
+
+					/* check for legacy JP channels on failure */
+					if (chspec_ch == 34 || chspec_ch == 38 ||
+					    chspec_ch == 42 || chspec_ch == 46)
+						i = 0;
+				}
+			} else {
+				/* check the chanspec channel to each legal channel */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == center_ch[i])
+						break; /* match found */
+				}
+			}
+
+			if (i < num_ch) {
+				/* match found */
+				return TRUE;
+			}
+		}
+	}
+
+	return FALSE;
+}
+
+/*
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
+ */
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+	uint center_chan;
+	uint bw_mhz;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (CHSPEC_IS20(chspec)) {
+		return CHSPEC_CHANNEL(chspec);
+	} else {
+		sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		if (CHSPEC_IS8080(chspec)) {
+			/* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband
+			 * (LL, LU, UL, LU) for the 80 MHz frequency segment 0.
+			 */
+			uint chan_id = CHSPEC_CHAN1(chspec);
+
+			bw_mhz = 80;
+
+			/* convert from channel index to channel number */
+			center_chan = wf_5g_80m_chans[chan_id];
+		}
+		else {
+			bw_mhz = bw_chspec_to_mhz(chspec);
+			center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+		}
+
+		return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+	}
+}
+
+/* given a chanspec, return the bandwidth string */
+char *
+wf_chspec_to_bw_str(chanspec_t chspec)
+{
+	return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+}
+
+/*
+ * This function returns the chanspec of the control channel of a given chanspec
+ */
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+	chanspec_t ctl_chspec = chspec;
+	uint8 ctl_chan;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (!CHSPEC_IS20(chspec)) {
+		ctl_chan = wf_chspec_ctlchan(chspec);
+		ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+		ctl_chspec |= CHSPEC_BAND(chspec);
+	}
+	return ctl_chspec;
+}
+
+/* return chanspec given control channel and bandwidth
+ * return 0 on error
+ */
+uint16
+wf_channel2chspec(uint ctl_ch, uint bw)
+{
+	uint16 chspec;
+	const uint8 *center_ch = NULL;
+	int num_ch = 0;
+	int sb = -1;
+	int i = 0;
+
+	chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+
+	chspec |= bw;
+
+	if (bw == WL_CHANSPEC_BW_40) {
+		center_ch = wf_5g_40m_chans;
+		num_ch = WF_NUM_5G_40M_CHANS;
+		bw = 40;
+	} else if (bw == WL_CHANSPEC_BW_80) {
+		center_ch = wf_5g_80m_chans;
+		num_ch = WF_NUM_5G_80M_CHANS;
+		bw = 80;
+	} else if (bw == WL_CHANSPEC_BW_160) {
+		center_ch = wf_5g_160m_chans;
+		num_ch = WF_NUM_5G_160M_CHANS;
+		bw = 160;
+	} else if (bw == WL_CHANSPEC_BW_20) {
+		chspec |= ctl_ch;
+		return chspec;
+	} else {
+		return 0;
+	}
+
+	for (i = 0; i < num_ch; i ++) {
+		sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+		if (sb >= 0) {
+			chspec |= center_ch[i];
+			chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
+			break;
+		}
+	}
+
+	/* check for no matching sb/center */
+	if (sb < 0) {
+		return 0;
+	}
+
+	return chspec;
+}
+
+/*
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec40 = chspec;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */
+	if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) {
+		chspec = wf_chspec_primary80_chspec(chspec);
+	}
+
+	/* determine primary 40 MHz sub-channel of an 80 MHz chanspec */
+	if (CHSPEC_IS80(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_UL) {
+			/* Primary 40MHz is on lower side */
+			center_chan -= CH_20MHZ_APART;
+			/* sideband bits are the same for LL/LU and L/U */
+		} else {
+			/* Primary 40MHz is on upper side */
+			center_chan += CH_20MHZ_APART;
+			/* sideband bits need to be adjusted by UL offset */
+			sb -= WL_CHANSPEC_CTL_SB_UL;
+		}
+
+		/* Create primary 40MHz chanspec */
+		chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+		            sb | center_chan);
+	}
+
+	return chspec40;
+}
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	/* take the default channel start frequency */
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	/* check that the frequency is in 1GHz range of the base */
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	/* check that frequency is a 5MHz multiple from the base */
+	if (offset != (ch * 5))
+		return -1;
+
+	/* restricted channel range check for 2.4G */
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch > 200))
+		freq = -1;
+	else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
+
+static const uint16 sidebands[] = {
+	WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU,
+	WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU,
+	WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU,
+	WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU
+};
+
+/*
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_chspec_80(uint8 center_channel, uint8 primary_channel)
+{
+
+	chanspec_t chanspec = INVCHANSPEC;
+	chanspec_t chanspec_cur;
+	uint i;
+
+	for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
+		chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
+		if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) {
+			chanspec = chanspec_cur;
+			break;
+		}
+	}
+	/* If the loop ended early, we are good, otherwise we did not
+	* find a 80MHz chanspec with the given center_channel that had a primary channel
+	*matching the given primary_channel.
+	*/
+	return chanspec;
+}
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0 - center channel number of one frequency segment
+ *    chan1 - center channel number of the other frequency segment
+ *
+ * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
+{
+	int sb = 0;
+	uint16 chanspec = 0;
+	int chan0_id = 0, chan1_id = 0;
+	int seg0, seg1;
+
+	chan0_id = channel_80mhz_to_id(chan0);
+	chan1_id = channel_80mhz_to_id(chan1);
+
+	/* make sure the channel numbers were valid */
+	if (chan0_id == -1 || chan1_id == -1)
+		return INVCHANSPEC;
+
+	/* does the primary channel fit with the 1st 80MHz channel ? */
+	sb = channel_to_sb(chan0, primary_20mhz, 80);
+	if (sb >= 0) {
+		/* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+		seg0 = chan0_id;
+		seg1 = chan1_id;
+	} else {
+		/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+		sb = channel_to_sb(chan1, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no match for ctl_ch to either 80MHz center channel */
+			return INVCHANSPEC;
+		}
+		/* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+		seg0 = chan1_id;
+		seg1 = chan0_id;
+	}
+
+	chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+	            (seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
+	            (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+	            WL_CHANSPEC_BW_8080 |
+	            WL_CHANSPEC_BAND_5G);
+
+	return chanspec;
+}
+
+/*
+ * This function returns the 80Mhz channel for the given id.
+ */
+static uint8
+wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id)
+{
+	if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS)
+		return wf_5g_80m_chans[chan_80Mhz_id];
+
+	return 0;
+}
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+	uint8 primary80_chan;
+
+	if (CHSPEC_IS80(chanspec))	{
+		primary80_chan = CHSPEC_CHANNEL(chanspec);
+	}
+	else if (CHSPEC_IS8080(chanspec)) {
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value primary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+		 */
+		if (sb < 4) {
+			primary80_chan = center_chan - CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */
+		else
+		{
+			primary80_chan = center_chan + CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20 and 40 Mhz */
+		primary80_chan = -1;
+	}
+	return primary80_chan;
+}
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40/80 Mhz chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+	uint8 secondary80_chan;
+
+	if (CHSPEC_IS8080(chanspec)) {
+		secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value  secondary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 upper band is the secondary 80Mhz band
+		 */
+		if (sb < 4) {
+			secondary80_chan = center_chan + CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+		else
+		{
+			secondary80_chan = center_chan - CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20, 40, and 80 Mhz */
+		secondary80_chan = -1;
+	}
+	return secondary80_chan;
+}
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ *    chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ *  returns the input chanspec in case the provided chanspec is an 80 MHz chanspec
+ *  returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec80;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+	if (CHSPEC_IS80(chspec)) {
+		chspec80 = chspec;
+	}
+	else if (CHSPEC_IS8080(chspec)) {
+
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+
+		sb = CHSPEC_CTL_SB(chspec);
+
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else if (CHSPEC_IS160(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+			/* Primary 80MHz is on lower side */
+			center_chan -= CH_40MHZ_APART;
+		}
+		else {
+			/* Primary 80MHz is on upper side */
+			center_chan += CH_40MHZ_APART;
+			sb -= WL_CHANSPEC_CTL_SB_ULL;
+		}
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else {
+		chspec80 = INVCHANSPEC;
+	}
+
+	return chspec80;
+}
+
+#ifdef WL11AC_80P80
+uint8
+wf_chspec_channel(chanspec_t chspec)
+{
+	if (CHSPEC_IS8080(chspec)) {
+		return wf_chspec_primary80_channel(chspec);
+	}
+	else {
+		return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK));
+	}
+}
+#endif /* WL11AC_80P80 */
diff --git a/drivers/net/wireless/bcmdhd/circularbuf.c b/drivers/net/wireless/bcmdhd/circularbuf.c
new file mode 100644
index 0000000000000000000000000000000000000000..bfb308c34f05d7826ea701b41c1d4c71f5a82872
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/circularbuf.c
@@ -0,0 +1,324 @@
+/** @file circularbuf.c
+ *
+ * PCIe host driver and dongle firmware need to communicate with each other. The mechanism consists
+ * of multiple circular buffers located in (DMA'able) host memory. A circular buffer is either used
+ * for host -> dongle (h2d) or dongle -> host communication. Both host driver and firmware make use
+ * of this source file. This source file contains functions to manage such a set of circular
+ * buffers, but does not contain the code to read or write the data itself into the buffers. It
+ * leaves that up to the software layer that uses this file, which can be implemented either using
+ * pio or DMA transfers. It also leaves the format of the data that is written and read to a higher
+ * layer. Typically the data is in the form of so-called 'message buffers'.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: circularbuf.c 467150 2014-04-02 17:30:43Z $
+ */
+
+#include <circularbuf.h>
+#include <bcmmsgbuf.h>
+#include <osl.h>
+
+#define CIRCULARBUF_READ_SPACE_AT_END(x)		\
+			((x->w_ptr >= x->rp_ptr) ? (x->w_ptr - x->rp_ptr) : (x->e_ptr - x->rp_ptr))
+
+#define CIRCULARBUF_READ_SPACE_AVAIL(x)		\
+			(((CIRCULARBUF_READ_SPACE_AT_END(x) == 0) && (x->w_ptr < x->rp_ptr)) ? \
+				x->w_ptr : CIRCULARBUF_READ_SPACE_AT_END(x))
+
+int cbuf_msg_level = CBUF_ERROR_VAL | CBUF_TRACE_VAL | CBUF_INFORM_VAL;
+
+/* #define CBUF_DEBUG */
+#ifdef CBUF_DEBUG
+#define CBUF_DEBUG_CHECK(x)	x
+#else
+#define CBUF_DEBUG_CHECK(x)
+#endif	/* CBUF_DEBUG */
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function   : circularbuf_init
+ * Description:
+ *
+ *
+ * Input Args : buf_base_addr: address of DMA'able host memory provided by caller
+ *
+ *
+ * Return Values :
+ *
+ * -----------------------------------------------------------------------------
+ */
+void
+circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len)
+{
+	handle->buf_addr = buf_base_addr;
+
+	handle->depth = handle->e_ptr = HTOL32(total_buf_len);
+
+	/* Initialize Read and Write pointers */
+	handle->w_ptr = handle->r_ptr = handle->wp_ptr = handle->rp_ptr = HTOL32(0);
+	handle->mb_ring_bell = NULL;
+	handle->mb_ctx = NULL;
+
+	return;
+}
+
+/**
+ * When an item is added to the circular buffer by the producing party, the consuming party has to
+ * be notified by means of a 'door bell' or 'ring'. This function allows the caller to register a
+ * 'ring' function that will be called when a 'write complete' occurs.
+ */
+void
+circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx)
+{
+	handle->mb_ring_bell = mb_ring_func;
+	handle->mb_ctx = ctx;
+}
+
+#ifdef CBUF_DEBUG
+static void
+circularbuf_check_sanity(circularbuf_t *handle)
+{
+	if ((handle->e_ptr > handle->depth) ||
+	    (handle->r_ptr > handle->e_ptr) ||
+		(handle->rp_ptr > handle->e_ptr) ||
+		(handle->w_ptr > handle->e_ptr))
+	{
+		printf("%s:%d: Pointers are corrupted.\n", __FUNCTION__, __LINE__);
+		circularbuf_debug_print(handle);
+		ASSERT(0);
+	}
+	return;
+}
+#endif /* CBUF_DEBUG */
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function   : circularbuf_reserve_for_write
+ *
+ * Description:
+ * This function reserves N bytes for write in the circular buffer. The circularbuf
+ * implementation will only reserve space in the circular buffer and return
+ * the pointer to the address where the new data can be written.
+ * The actual write implementation (bcopy/dma) is outside the scope of
+ * circularbuf implementation.
+ *
+ * Input Args :
+ *		size - No. of bytes to reserve for write
+ *
+ * Return Values :
+ *		void * : Pointer to the reserved location. This is the address
+ *		          that will be used for write (dma/bcopy)
+ *
+ * -----------------------------------------------------------------------------
+ */
+void * BCMFASTPATH
+circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size)
+{
+	int16 avail_space;
+	void *ret_ptr = NULL;
+
+	CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+	ASSERT(size < handle->depth);
+
+	if (handle->wp_ptr >= handle->r_ptr)
+		avail_space = handle->depth - handle->wp_ptr;
+	else
+		avail_space = handle->r_ptr - handle->wp_ptr;
+
+	ASSERT(avail_space <= handle->depth);
+	if (avail_space > size)
+	{
+		/* Great. We have enough space. */
+		ret_ptr = CIRCULARBUF_START(handle) + handle->wp_ptr;
+
+		/*
+		 * We need to update the wp_ptr for the next guy to write.
+		 *
+		 * Please Note : We are not updating the write pointer here. This can be
+		 * done only after write is complete (In case of DMA, we can only schedule
+		 * the DMA. Actual completion will be known only on DMA complete interrupt).
+		 */
+		handle->wp_ptr += size;
+		return ret_ptr;
+	}
+
+	/*
+	 * If there is no available space, we should check if there is some space left
+	 * in the beginning of the circular buffer.  Wrap-around case, where there is
+	 * not enough space in the end of the circular buffer. But, there might be
+	 * room in the beginning of the buffer.
+	 */
+	if (handle->wp_ptr >= handle->r_ptr)
+	{
+		avail_space = handle->r_ptr;
+		if (avail_space > size)
+		{
+			/* OK. There is room in the beginning. Let's go ahead and use that.
+			 * But, before that, we have left a hole at the end of the circular
+			 * buffer as that was not sufficient to accomodate the requested
+			 * size. Let's make sure this is updated in the circularbuf structure
+			 * so that consumer does not use the hole.
+			 */
+			handle->e_ptr  = handle->wp_ptr;
+			handle->wp_ptr = size;
+
+			return CIRCULARBUF_START(handle);
+		}
+	}
+
+	/* We have tried enough to accomodate the new packet. There is no room for now. */
+	return NULL;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function   : circularbuf_write_complete
+ *
+ * Description:
+ * This function has to be called by the producer end of circularbuf to indicate to
+ * the circularbuf layer that data has been written and the write pointer can be
+ * updated. In the process, if there was a doorbell callback registered, that
+ * function would also be invoked as to notify the consuming party.
+ *
+ * Input Args :
+ *		dest_addr	  : Address where the data was written. This would be the
+ *					    same address that was reserved earlier.
+ *		bytes_written : Length of data written
+ *
+ * -----------------------------------------------------------------------------
+ */
+void BCMFASTPATH
+circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written)
+{
+	CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+
+	/* Update the write pointer */
+	if ((handle->w_ptr + bytes_written) >= handle->depth) {
+		OSL_CACHE_FLUSH((void *) CIRCULARBUF_START(handle), bytes_written);
+		handle->w_ptr = bytes_written;
+	} else {
+		OSL_CACHE_FLUSH((void *) (CIRCULARBUF_START(handle) + handle->w_ptr),
+			bytes_written);
+		handle->w_ptr += bytes_written;
+	}
+
+	/* And ring the door bell (mail box interrupt) to indicate to the peer that
+	 * message is available for consumption.
+	 */
+	if (handle->mb_ring_bell)
+		handle->mb_ring_bell(handle->mb_ctx);
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function   : circularbuf_get_read_ptr
+ *
+ * Description:
+ * This function will be called by the consumer of circularbuf for reading data from
+ * the circular buffer. This will typically be invoked when the consumer gets a
+ * doorbell interrupt.
+ * Please note that the function only returns the pointer (and length) from
+ * where the data can be read. Actual read implementation is up to the
+ * consumer. It could be a bcopy or dma.
+ *
+ * Input Args :
+ *		void *			: Address from where the data can be read.
+ *		available_len	: Length of data available for read.
+ *
+ * -----------------------------------------------------------------------------
+ */
+void * BCMFASTPATH
+circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *available_len)
+{
+	uint8 *ret_addr;
+
+	CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+
+	/* First check if there is any data available in the circular buffer */
+	*available_len = CIRCULARBUF_READ_SPACE_AVAIL(handle);
+	if (*available_len == 0)
+		return NULL;
+
+	/*
+	 * Although there might be data in the circular buffer for read, in
+	 * cases of write wrap-around and read still in the end of the circular
+	 * buffer, we might have to wrap around the read pending pointer also.
+	 */
+	if (CIRCULARBUF_READ_SPACE_AT_END(handle) == 0)
+		handle->rp_ptr = 0;
+
+	ret_addr = CIRCULARBUF_START(handle) + handle->rp_ptr;
+
+	/*
+	 * Please note that we do not update the read pointer here. Only
+	 * read pending pointer is updated, so that next reader knows where
+	 * to read data from.
+	 * read pointer can only be updated when the read is complete.
+	 */
+	handle->rp_ptr = (uint16)(ret_addr - CIRCULARBUF_START(handle) + *available_len);
+
+	ASSERT(*available_len <= handle->depth);
+
+	OSL_CACHE_INV((void *) ret_addr, *available_len);
+
+	return ret_addr;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function   : circularbuf_read_complete
+ * Description:
+ * This function has to be called by the consumer end of circularbuf to indicate
+ * that data has been consumed and the read pointer can be updated, so the producing side
+ * can can use the freed space for new entries.
+ *
+ *
+ * Input Args :
+ *		bytes_read : No. of bytes consumed by the consumer. This has to match
+ *					 the length returned by circularbuf_get_read_ptr
+ *
+ * Return Values :
+ *		CIRCULARBUF_SUCCESS		: Otherwise
+ *
+ * -----------------------------------------------------------------------------
+ */
+circularbuf_ret_t BCMFASTPATH
+circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read)
+{
+	CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+	ASSERT(bytes_read < handle->depth);
+
+	/* Update the read pointer */
+	if ((handle->w_ptr < handle->e_ptr) && (handle->r_ptr + bytes_read) > handle->e_ptr)
+		handle->r_ptr = bytes_read;
+	else
+		handle->r_ptr += bytes_read;
+
+	return CIRCULARBUF_SUCCESS;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function	: circularbuf_revert_rp_ptr
+ *
+ * Description:
+ * The rp_ptr update during circularbuf_get_read_ptr() is done to reflect the amount of data
+ * that is sent out to be read by the consumer. But the consumer may not always read the
+ * entire data. In such a case, the rp_ptr needs to be reverted back by 'left' bytes, where
+ * 'left' is the no. of bytes left unread.
+ *
+ * Input args:
+ * 	bytes : The no. of bytes left unread by the consumer
+ *
+ * -----------------------------------------------------------------------------
+ */
+circularbuf_ret_t
+circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes)
+{
+	CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+	ASSERT(bytes < handle->depth);
+
+	handle->rp_ptr -= bytes;
+
+	return CIRCULARBUF_SUCCESS;
+}
diff --git a/drivers/net/wireless/bcmdhd/common/include/devctrl_if/wlioctl_defs.h b/drivers/net/wireless/bcmdhd/common/include/devctrl_if/wlioctl_defs.h
new file mode 100644
index 0000000000000000000000000000000000000000..141427c6f6ad29cf86b232474b8b73c01504f684
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/common/include/devctrl_if/wlioctl_defs.h
@@ -0,0 +1,2104 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $
+ */
+
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK      0x000000FF      /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK   0x0000000F      /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK   0x000000F0      /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT  4               /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK     0x00000300
+#define WL_RSPEC_TXEXP_SHIFT    8
+#define WL_RSPEC_BW_MASK        0x00070000      /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT       16              /* bandwidth shift */
+#define WL_RSPEC_STBC           0x00100000      /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF           0x00200000      /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC           0x00400000      /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI            0x00800000      /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK  0x03000000      /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE  0x40000000      /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE  0x80000000      /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE    0x00000000      /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT      0x01000000      /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT     0x02000000      /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ       0x00010000
+#define WL_RSPEC_BW_40MHZ       0x00020000
+#define WL_RSPEC_BW_80MHZ       0x00030000
+#define WL_RSPEC_BW_160MHZ      0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE         0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK         0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK          0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT         8          /* stf mode shift */
+#define OLD_NRATE_OVERRIDE          0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI               0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING       0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO	0		/* stf mode SISO */
+#define OLD_NRATE_STF_CDD	1		/* stf mode CDD */
+#define OLD_NRATE_STF_STBC	2		/* stf mode STBC */
+#define OLD_NRATE_STF_SDM	3		/* stf mode SDM */
+
+#define HIGHEST_SINGLE_STREAM_MCS	7 /* MCS values greater than this enable multiple streams */
+
+/* given a proprietary MCS, get number of spatial streams */
+#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+
+#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
+				: ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+
+#define MAX_CCA_CHANNELS 38	/* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS	60	/* CCA keeps this many seconds history */
+
+#define IBSS_MED        15	/* Mediom in-bss congestion percentage */
+#define IBSS_HI         25	/* Hi in-bss congestion percentage */
+#define OBSS_MED        12
+#define OBSS_HI         25
+#define INTERFER_MED    5
+#define INTERFER_HI     10
+
+#define  CCA_FLAG_2G_ONLY		0x01	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_5G_ONLY		0x02	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_IGNORE_DURATION	0x04	/* Ignore dwell time for each channel */
+#define  CCA_FLAGS_PREFER_1_6_11	0x10
+#define  CCA_FLAG_IGNORE_INTERFER 	0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 		1	/* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION	2	/* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN	3	/* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER	4	/* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW	5	/* Only 1 channel was input */
+
+#define WL_STA_AID(a)		((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM		0x00000001	/* Running a Broadcom driver */
+#define WL_STA_WME		0x00000002	/* WMM association */
+#define WL_STA_NONERP		0x00000004	/* No ERP */
+#define WL_STA_AUTHE		0x00000008	/* Authenticated */
+#define WL_STA_ASSOC		0x00000010	/* Associated */
+#define WL_STA_AUTHO		0x00000020	/* Authorized */
+#define WL_STA_WDS		0x00000040	/* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP	0x00000080	/* WDS traffic/probes flowing properly */
+#define WL_STA_PS		0x00000100	/* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE		0x00000200	/* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK		0x00000400	/* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI		0x00000800	/* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO		0x00001000	/* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP		0x00002000	/* STA 802.11n capable */
+#define WL_STA_SCBSTATS		0x00004000	/* Per STA debug stats */
+#define WL_STA_AMPDU_CAP	0x00008000	/* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP	0x00010000	/* STA AMSDU capable */
+#define WL_STA_MIMO_PS		0x00020000	/* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS		0x00040000	/* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP		0x00080000	/* rifs enabled */
+#define WL_STA_VHT_CAP		0x00100000	/* STA VHT(11ac) capable */
+#define WL_STA_WPS		0x00200000	/* WPS state */
+
+#define WL_WDS_LINKUP		WL_STA_WDS_LINKUP	/* deprecated */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING		0x0001	/* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK		0x000C  /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF		0x0003	/* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS		0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON		0x0000	/* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF			0x0010	/* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20		0x0020	/* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40		0x0040	/* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK		0x0300	/* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA		0x0400	/* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU		0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK		0x1000	/* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP			0x2000	/* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT	0x4000	/* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP		0x8000	/* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP	0x0001
+#define WL_STA_SGI80		0x0002
+#define WL_STA_SGI160		0x0004
+#define WL_STA_VHT_TX_STBCCAP	0x0008
+#define WL_STA_VHT_RX_STBCCAP	0x0010
+#define WL_STA_SU_BEAMFORMER	0x0020
+#define WL_STA_SU_BEAMFORMEE	0x0040
+#define WL_STA_MU_BEAMFORMER	0x0080
+#define WL_STA_MU_BEAMFORMEE	0x0100
+#define WL_STA_VHT_TXOP_PS	0x0200
+#define WL_STA_HTC_VHT_CAP	0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+#define WL_IOCTL_ACTION_GET				0x0
+#define WL_IOCTL_ACTION_SET				0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK	0x1e
+#define WL_IOCTL_ACTION_OVL_RSV			0x20
+#define WL_IOCTL_ACTION_OVL				0x40
+#define WL_IOCTL_ACTION_MASK			0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT		1
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY   2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE	0x01	/* force passive scan */
+#define WL_SCANFLAGS_RESERVED	0x02	/* Reserved */
+#define WL_SCANFLAGS_PROHIBITED	0x04	/* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN	0x08	/* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT	0x10	/* automatic ANQP to hotspot APs */
+#define WL_SCANFLAGS_SWTCHAN	0x20	/* Force channel switch for differerent bandwidth */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS	0
+#define WL_SCAN_RESULTS_PARTIAL	1
+#define WL_SCAN_RESULTS_PENDING	2
+#define WL_SCAN_RESULTS_ABORTED	3
+#define WL_SCAN_RESULTS_NO_MEM  4
+
+#define SCANOL_ENABLED			(1 << 0)
+#define SCANOL_BCAST_SSID		(1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID	(1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE	(1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME		45	/* for home channel processing */
+#define SCANOL_ASSOC_TIME		20	/* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME		40	/* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME		110	/* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT		100	/* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME		40
+#define SCANOL_IDLE_REST_MULTIPLIER	0
+#define SCANOL_ACTIVE_REST_TIME		20
+#define SCANOL_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_CYCLE_IDLE_REST_TIME	300000	/* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER	0	/* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME	200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_MAX_REST_TIME		3600000	/* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT		0	/* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX		864000	/* Max Scan Cycle */
+						/* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES			2	/* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX		5	/* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY		10	/* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX	240	/* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX		10	/* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX		100	/* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX		500	/* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX			16	/* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+
+#define ANTENNA_NUM_1	1		/* total number of antennas to be used */
+#define ANTENNA_NUM_2	2
+#define ANTENNA_NUM_3	3
+#define ANTENNA_NUM_4	4
+
+#define ANT_SELCFG_AUTO		0x80	/* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK		0x33	/* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST	0	/* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST	1	/* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF	2	/* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF	3	/* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE	0	/* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE	1	/* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE	2	/* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED	1	/* interference detected */
+#define ITFR_HOME_CHANNEL	2	/* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT	4	/* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS		8
+#define WL_RM_TYPE_BASIC	1
+#define WL_RM_TYPE_CCA		2
+#define WL_RM_TYPE_RPI		3
+#define WL_RM_TYPE_ABORT	-1	/* ABORT any in-progress RM request */
+
+#define WL_RM_FLAG_PARALLEL	(1<<0)
+
+#define WL_RM_FLAG_LATE		(1<<1)
+#define WL_RM_FLAG_INCAPABLE	(1<<2)
+#define WL_RM_FLAG_REFUSED	(1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT	0	/* built-in default */
+#define WLC_CIS_SROM	1	/* source is sprom */
+#define WLC_CIS_OTP	2	/* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF	0	/* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON			1	/* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF		2	/* turn off PCL. */
+
+#define	PLC_CMD_FAILOVER	1
+#define	PLC_CMD_MAC_COST	2
+#define	PLC_CMD_LINK_COST	3
+#define	PLC_CMD_NODE_LIST	4
+
+#define NODE_TYPE_UNKNOWN	0	/* Unknown link */
+#define NODE_TYPE_WIFI_ONLY	1	/* Pure Wireless STA node */
+#define NODE_TYPE_PLC_ONLY	2	/* Pure PLC only node */
+#define NODE_TYPE_WIFI_PLC	3	/* WiFi PLC capable node */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF	-2	/* turn off PCL.  */
+#define WL_PWRIDX_PCL_ON	-1	/* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT	-2	/* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT	63	/* upper limit */
+/* value >= 0 causes
+ *	- input to be set to that value
+ *	- PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION	(0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC	0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC		0x14e46c77
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON	0x01	/* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE		0x02	/* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL	0x04	/* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20		0x08	/* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID	0x10	/* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE	0x20	/* BSS contains inaccurate RSSI */
+#define WL_BSS_FLAGS_SNR_INVALID	0x40	/* BSS contains invalid SNR */
+#define WL_BSS_FLAGS_NF_INVALID		0x80	/* BSS contains invalid noise floor */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ			0x00000100
+#define VHT_BI_80MHZ			    0x00000200
+#define VHT_BI_160MHZ			    0x00000400
+#define VHT_BI_8080MHZ			    0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype	set		/* subtype param */
+#define ioctl_pid	used		/* pid param */
+#define ioctl_status	needed		/* status param */
+
+
+/* Enumerate crypto algorithms */
+#define	CRYPTO_ALGO_OFF			0
+#define	CRYPTO_ALGO_WEP1		1
+#define	CRYPTO_ALGO_TKIP		2
+#define	CRYPTO_ALGO_WEP128		3
+#define CRYPTO_ALGO_AES_CCM		4
+#define CRYPTO_ALGO_AES_OCB_MSDU	5
+#define CRYPTO_ALGO_AES_OCB_MPDU	6
+#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG		7
+#else
+#define CRYPTO_ALGO_CKIP		7
+#define CRYPTO_ALGO_CKIP_MMH	8
+#define CRYPTO_ALGO_WEP_MMH		9
+#define CRYPTO_ALGO_NALG		10
+#endif /* !BCMCCX && !BCMEXTCCX */
+
+#define CRYPTO_ALGO_SMS4		11
+#define CRYPTO_ALGO_PMK			12	/* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP			13  /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM     14  /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256  15  /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256  16  /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17  /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC    18  /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19  /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE        CRYPTO_ALGO_OFF
+
+#define WSEC_GEN_MIC_ERROR	0x0001
+#define WSEC_GEN_REPLAY		0x0002
+#define WSEC_GEN_ICV_ERROR	0x0004
+#define WSEC_GEN_MFP_ACT_ERROR	0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR	0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR	0x0020
+
+#define WL_SOFT_KEY	(1 << 0)	/* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY	(1 << 1)	/* Indicates this key is the primary (ie tx) key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WL_CKIP_KP	(1 << 4)	/* CMIC */
+#define WL_CKIP_MMH	(1 << 5)	/* CKIP */
+#else
+#define WL_KF_RES_4	(1 << 4)	/* Reserved for backward compat */
+#define WL_KF_RES_5	(1 << 5)	/* Reserved for backward compat */
+#endif /* BCMCCX || BCMEXTCCX */
+#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)	/* Indicates a group key for a IBSS PEER */
+
+/* wireless security bitvec */
+#define WEP_ENABLED		0x0001
+#define TKIP_ENABLED		0x0002
+#define AES_ENABLED		0x0004
+#define WSEC_SWFLAG		0x0008
+#ifdef BCMCCX
+#define CKIP_KP_ENABLED		0x0010
+#define CKIP_MIC_ENABLED	0x0020
+#endif /* BCMCCX */
+#define SES_OW_ENABLED		0x0040	/* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED		0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec)	((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec)	((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec)	((wsec) & AES_ENABLED)
+
+#ifdef BCMCCX
+#define WSEC_CKIP_KP_ENABLED(wsec)	((wsec) & CKIP_KP_ENABLED)
+#define WSEC_CKIP_MIC_ENABLED(wsec)	((wsec) & CKIP_MIC_ENABLED)
+#define WSEC_CKIP_ENABLED(wsec)	((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) \
+	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED |	\
+	  CKIP_MIC_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) \
+		((wsec) & \
+		 (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
+#endif /* BCMWAPI_WPI */
+#else /* defined BCMCCX */
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#endif /* BCMCCX */
+#define WSEC_SES_OW_ENABLED(wsec)	((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec)	((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+#define MFP_CAPABLE		0x0200
+#define MFP_REQUIRED	0x0400
+#define MFP_SHA256		0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED	0x0000	/* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE		0x0001	/* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED	0x0002	/* over 802.1x */
+#define WPA_AUTH_PSK		0x0004	/* Pre-shared key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM		0x0008	/* CCKM */
+#define WPA2_AUTH_CCKM		0x0010	/* CCKM2 */
+#endif	/* BCMCCX || BCMEXTCCX */
+/* #define WPA_AUTH_8021X 0x0020 */	/* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED	0x0040	/* over 802.1x */
+#define WPA2_AUTH_PSK		0x0080	/* Pre-shared key */
+#define BRCM_AUTH_PSK           0x0100  /* BRCM specific PSK */
+#define BRCM_AUTH_DPT		0x0200	/* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI           0x0400
+#define WAPI_AUTH_NONE		WPA_AUTH_NONE	/* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED	0x0400	/* over AS */
+#define WAPI_AUTH_PSK		0x0800	/* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_MFP           0x1000  /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK		0x2000 	/* TDLS Peer Key */
+#define WPA2_AUTH_FT		0x4000 	/* Fast Transition. */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+/* pmkid */
+#define	MAXPMKID		16
+
+#ifdef SROM12
+#define	WLC_IOCTL_MAXLEN		10000	/* max length ioctl buffer required */
+#else
+#define	WLC_IOCTL_MAXLEN		8192	/* max length ioctl buffer required */
+#endif /* SROM12 */
+
+#define	WLC_IOCTL_SMLEN			256	/* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN		1536    /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN	1024	/* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN	10240	/* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40  8192
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC				0
+#define WLC_GET_VERSION				1
+#define WLC_UP					2
+#define WLC_DOWN				3
+#define WLC_GET_LOOP				4
+#define WLC_SET_LOOP				5
+#define WLC_DUMP				6
+#define WLC_GET_MSGLEVEL			7
+#define WLC_SET_MSGLEVEL			8
+#define WLC_GET_PROMISC				9
+#define WLC_SET_PROMISC				10
+/* #define WLC_OVERLAY_IOCTL			11 */ /* not supported */
+#define WLC_GET_RATE				12
+#define WLC_GET_MAX_RATE			13
+#define WLC_GET_INSTANCE			14
+/* #define WLC_GET_FRAG				15 */ /* no longer supported */
+/* #define WLC_SET_FRAG				16 */ /* no longer supported */
+/* #define WLC_GET_RTS				17 */ /* no longer supported */
+/* #define WLC_SET_RTS				18 */ /* no longer supported */
+#define WLC_GET_INFRA				19
+#define WLC_SET_INFRA				20
+#define WLC_GET_AUTH				21
+#define WLC_SET_AUTH				22
+#define WLC_GET_BSSID				23
+#define WLC_SET_BSSID				24
+#define WLC_GET_SSID				25
+#define WLC_SET_SSID				26
+#define WLC_RESTART				27
+#define WLC_TERMINATED				28
+/* #define WLC_DUMP_SCB				28 */ /* no longer supported */
+#define WLC_GET_CHANNEL				29
+#define WLC_SET_CHANNEL				30
+#define WLC_GET_SRL				31
+#define WLC_SET_SRL				32
+#define WLC_GET_LRL				33
+#define WLC_SET_LRL				34
+#define WLC_GET_PLCPHDR				35
+#define WLC_SET_PLCPHDR				36
+#define WLC_GET_RADIO				37
+#define WLC_SET_RADIO				38
+#define WLC_GET_PHYTYPE				39
+#define WLC_DUMP_RATE				40
+#define WLC_SET_RATE_PARAMS			41
+#define WLC_GET_FIXRATE				42
+#define WLC_SET_FIXRATE				43
+/* #define WLC_GET_WEP				42 */ /* no longer supported */
+/* #define WLC_SET_WEP				43 */ /* no longer supported */
+#define WLC_GET_KEY				44
+#define WLC_SET_KEY				45
+#define WLC_GET_REGULATORY			46
+#define WLC_SET_REGULATORY			47
+#define WLC_GET_PASSIVE_SCAN			48
+#define WLC_SET_PASSIVE_SCAN			49
+#define WLC_SCAN				50
+#define WLC_SCAN_RESULTS			51
+#define WLC_DISASSOC				52
+#define WLC_REASSOC				53
+#define WLC_GET_ROAM_TRIGGER			54
+#define WLC_SET_ROAM_TRIGGER			55
+#define WLC_GET_ROAM_DELTA			56
+#define WLC_SET_ROAM_DELTA			57
+#define WLC_GET_ROAM_SCAN_PERIOD		58
+#define WLC_SET_ROAM_SCAN_PERIOD		59
+#define WLC_EVM					60	/* diag */
+#define WLC_GET_TXANT				61
+#define WLC_SET_TXANT				62
+#define WLC_GET_ANTDIV				63
+#define WLC_SET_ANTDIV				64
+/* #define WLC_GET_TXPWR			65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR			66 */ /* no longer supported */
+#define WLC_GET_CLOSED				67
+#define WLC_SET_CLOSED				68
+#define WLC_GET_MACLIST				69
+#define WLC_SET_MACLIST				70
+#define WLC_GET_RATESET				71
+#define WLC_SET_RATESET				72
+/* #define WLC_GET_LOCALE			73 */ /* no longer supported */
+#define WLC_LONGTRAIN				74
+#define WLC_GET_BCNPRD				75
+#define WLC_SET_BCNPRD				76
+#define WLC_GET_DTIMPRD				77
+#define WLC_SET_DTIMPRD				78
+#define WLC_GET_SROM				79
+#define WLC_SET_SROM				80
+#define WLC_GET_WEP_RESTRICT			81
+#define WLC_SET_WEP_RESTRICT			82
+#define WLC_GET_COUNTRY				83
+#define WLC_SET_COUNTRY				84
+#define WLC_GET_PM				85
+#define WLC_SET_PM				86
+#define WLC_GET_WAKE				87
+#define WLC_SET_WAKE				88
+/* #define WLC_GET_D11CNTS			89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK			90	/* ndis only */
+#define WLC_SET_FORCELINK			91	/* ndis only */
+#define WLC_FREQ_ACCURACY			92	/* diag */
+#define WLC_CARRIER_SUPPRESS			93	/* diag */
+#define WLC_GET_PHYREG				94
+#define WLC_SET_PHYREG				95
+#define WLC_GET_RADIOREG			96
+#define WLC_SET_RADIOREG			97
+#define WLC_GET_REVINFO				98
+#define WLC_GET_UCANTDIV			99
+#define WLC_SET_UCANTDIV			100
+#define WLC_R_REG				101
+#define WLC_W_REG				102
+/* #define WLC_DIAG_LOOPBACK			103	old tray diag */
+/* #define WLC_RESET_D11CNTS			104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE				105
+#define WLC_SET_MACMODE				106
+#define WLC_GET_MONITOR				107
+#define WLC_SET_MONITOR				108
+#define WLC_GET_GMODE				109
+#define WLC_SET_GMODE				110
+#define WLC_GET_LEGACY_ERP			111
+#define WLC_SET_LEGACY_ERP			112
+#define WLC_GET_RX_ANT				113
+#define WLC_GET_CURR_RATESET			114	/* current rateset */
+#define WLC_GET_SCANSUPPRESS			115
+#define WLC_SET_SCANSUPPRESS			116
+#define WLC_GET_AP				117
+#define WLC_SET_AP				118
+#define WLC_GET_EAP_RESTRICT			119
+#define WLC_SET_EAP_RESTRICT			120
+#define WLC_SCB_AUTHORIZE			121
+#define WLC_SCB_DEAUTHORIZE			122
+#define WLC_GET_WDSLIST				123
+#define WLC_SET_WDSLIST				124
+#define WLC_GET_ATIM				125
+#define WLC_SET_ATIM				126
+#define WLC_GET_RSSI				127
+#define WLC_GET_PHYANTDIV			128
+#define WLC_SET_PHYANTDIV			129
+#define WLC_AP_RX_ONLY				130
+#define WLC_GET_TX_PATH_PWR			131
+#define WLC_SET_TX_PATH_PWR			132
+#define WLC_GET_WSEC				133
+#define WLC_SET_WSEC				134
+#define WLC_GET_PHY_NOISE			135
+#define WLC_GET_BSS_INFO			136
+#define WLC_GET_PKTCNTS				137
+#define WLC_GET_LAZYWDS				138
+#define WLC_SET_LAZYWDS				139
+#define WLC_GET_BANDLIST			140
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_GET_BAND				141
+#define WLC_SET_BAND				142
+#define WLC_SCB_DEAUTHENTICATE			143
+#define WLC_GET_SHORTSLOT			144
+#define WLC_GET_SHORTSLOT_OVERRIDE		145
+#define WLC_SET_SHORTSLOT_OVERRIDE		146
+#define WLC_GET_SHORTSLOT_RESTRICT		147
+#define WLC_SET_SHORTSLOT_RESTRICT		148
+#define WLC_GET_GMODE_PROTECTION		149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE	150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE	151
+#define WLC_UPGRADE				152
+/* #define WLC_GET_MRATE			153 */ /* no longer supported */
+/* #define WLC_SET_MRATE			154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS			155
+#define WLC_SET_IGNORE_BCNS			156
+#define WLC_GET_SCB_TIMEOUT			157
+#define WLC_SET_SCB_TIMEOUT			158
+#define WLC_GET_ASSOCLIST			159
+#define WLC_GET_CLK				160
+#define WLC_SET_CLK				161
+#define WLC_GET_UP				162
+#define WLC_OUT					163
+#define WLC_GET_WPA_AUTH			164
+#define WLC_SET_WPA_AUTH			165
+#define WLC_GET_UCFLAGS				166
+#define WLC_SET_UCFLAGS				167
+#define WLC_GET_PWRIDX				168
+#define WLC_SET_PWRIDX				169
+#define WLC_GET_TSSI				170
+#define WLC_GET_SUP_RATESET_OVERRIDE		171
+#define WLC_SET_SUP_RATESET_OVERRIDE		172
+/* #define WLC_SET_FAST_TIMER			173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER			174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER			175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER			176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS			177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL		178
+#define WLC_SET_PROTECTION_CONTROL		179
+#endif /* LINUX_POSTMOGRIFY_REMOVAL  */
+#define WLC_GET_PHYLIST				180
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_ENCRYPT_STRENGTH			181	/* ndis only */
+#define WLC_DECRYPT_STATUS			182	/* ndis only */
+#define WLC_GET_KEY_SEQ				183
+#define WLC_GET_SCAN_CHANNEL_TIME		184
+#define WLC_SET_SCAN_CHANNEL_TIME		185
+#define WLC_GET_SCAN_UNASSOC_TIME		186
+#define WLC_SET_SCAN_UNASSOC_TIME		187
+#define WLC_GET_SCAN_HOME_TIME			188
+#define WLC_SET_SCAN_HOME_TIME			189
+#define WLC_GET_SCAN_NPROBES			190
+#define WLC_SET_SCAN_NPROBES			191
+#define WLC_GET_PRB_RESP_TIMEOUT		192
+#define WLC_SET_PRB_RESP_TIMEOUT		193
+#define WLC_GET_ATTEN				194
+#define WLC_SET_ATTEN				195
+#define WLC_GET_SHMEM				196	/* diag */
+#define WLC_SET_SHMEM				197	/* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS		198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS		199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST			200
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON	201
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_TKIP_COUNTERMEASURES		202
+#define WLC_GET_PIOMODE				203
+#define WLC_SET_PIOMODE				204
+#define WLC_SET_ASSOC_PREFER			205
+#define WLC_GET_ASSOC_PREFER			206
+#define WLC_SET_ROAM_PREFER			207
+#define WLC_GET_ROAM_PREFER			208
+#define WLC_SET_LED				209
+#define WLC_GET_LED				210
+#define WLC_GET_INTERFERENCE_MODE		211
+#define WLC_SET_INTERFERENCE_MODE		212
+#define WLC_GET_CHANNEL_QA			213
+#define WLC_START_CHANNEL_QA			214
+#define WLC_GET_CHANNEL_SEL			215
+#define WLC_START_CHANNEL_SEL			216
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VALID_CHANNELS			217
+#define WLC_GET_FAKEFRAG			218
+#define WLC_SET_FAKEFRAG			219
+#define WLC_GET_PWROUT_PERCENTAGE		220
+#define WLC_SET_PWROUT_PERCENTAGE		221
+#define WLC_SET_BAD_FRAME_PREEMPT		222
+#define WLC_GET_BAD_FRAME_PREEMPT		223
+#define WLC_SET_LEAP_LIST			224
+#define WLC_GET_LEAP_LIST			225
+#define WLC_GET_CWMIN				226
+#define WLC_SET_CWMIN				227
+#define WLC_GET_CWMAX				228
+#define WLC_SET_CWMAX				229
+#define WLC_GET_WET				230
+#define WLC_SET_WET				231
+#define WLC_GET_PUB				232
+/* #define WLC_SET_GLACIAL_TIMER		233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER		234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY			235
+#define WLC_SET_KEY_PRIMARY			236
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* #define WLC_DUMP_RADIOREGS			237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS			238
+#define WLC_SET_ACI_ARGS			239
+#define WLC_UNSET_CALLBACK			240
+#define WLC_SET_CALLBACK			241
+#define WLC_GET_RADAR				242
+#define WLC_SET_RADAR				243
+#define WLC_SET_SPECT_MANAGMENT			244
+#define WLC_GET_SPECT_MANAGMENT			245
+#define WLC_WDS_GET_REMOTE_HWADDR		246	/* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP			247
+#define WLC_SET_CS_SCAN_TIMER			248
+#define WLC_GET_CS_SCAN_TIMER			249
+#define WLC_MEASURE_REQUEST			250
+#define WLC_INIT				251
+#define WLC_SEND_QUIET				252
+#define WLC_KEEPALIVE			253
+#define WLC_SEND_PWR_CONSTRAINT			254
+#define WLC_UPGRADE_STATUS			255
+#define WLC_CURRENT_PWR				256
+#define WLC_GET_SCAN_PASSIVE_TIME		257
+#define WLC_SET_SCAN_PASSIVE_TIME		258
+#define WLC_LEGACY_LINK_BEHAVIOR		259
+#define WLC_GET_CHANNELS_IN_COUNTRY		260
+#define WLC_GET_COUNTRY_LIST			261
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VAR				262	/* get value of named variable */
+#define WLC_SET_VAR				263	/* set named variable to value */
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_NVRAM_GET				264	/* deprecated */
+#define WLC_NVRAM_SET				265
+#define WLC_NVRAM_DUMP				266
+#define WLC_REBOOT				267
+#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_SET_WSEC_PMK			268
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_GET_AUTH_MODE			269
+#define WLC_SET_AUTH_MODE			270
+#define WLC_GET_WAKEENTRY			271
+#define WLC_SET_WAKEENTRY			272
+#define WLC_NDCONFIG_ITEM			273	/* currently handled in wl_oid.c */
+#define WLC_NVOTPW				274
+#define WLC_OTPW				275
+#define WLC_IOV_BLOCK_GET			276
+#define WLC_IOV_MODULES_GET			277
+#define WLC_SOFT_RESET				278
+#define WLC_GET_ALLOW_MODE			279
+#define WLC_SET_ALLOW_MODE			280
+#define WLC_GET_DESIRED_BSSID			281
+#define WLC_SET_DESIRED_BSSID			282
+#define	WLC_DISASSOC_MYAP			283
+#define WLC_GET_NBANDS				284	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES			285	/* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO			286	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO			287	/* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY				288	/* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY				289	/* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME			290	/* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID			291	/* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC			292	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE			293	/* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE			294	/* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING			295	/* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING			296	/* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON		297	/* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON		298	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI			299	/* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE			300	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN			301	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN			302	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS			303	/* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC			304	/* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS			305     /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS				306	/* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT			307	/* phy sample collect mode */
+/* #define WLC_UM_PRIV				308 */	/* Deprecated: usermode driver */
+#define WLC_GET_CMD				309
+/* #define WLC_LAST				310 */	/* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE	311	/* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE	312	/* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT			313 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT			314 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY			315 */	/* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG			316	/* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE			317
+#define WLC_GET_TXBF_RATESET			318
+#define WLC_SET_TXBF_RATESET			319
+#define WLC_SCAN_CQ				320
+#define WLC_GET_RSSI_QDB			321 /* qdB portion of the RSSI */
+#define WLC_DUMP_RATESET			322
+#define WLC_ECHO				323
+#define WLC_LAST				324
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE		0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE		0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE	(WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK	(WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK	(WL_OID_BASE + WLC_SET_FORCELINK)
+#define	OID_WL_ENCRYPT_STRENGTH	(WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS	(WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM	(WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC	(WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS		(WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY		(WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY		(WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME	(WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID	(WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE	(WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING	(WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE	(WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC	(WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS	(WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS	(WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG	(WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE	(WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS	1
+#define WL_DECRYPT_STATUS_FAILURE	2
+#define WL_DECRYPT_STATUS_UNKNOWN	3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS			0
+#define WLC_UPGRADE_PENDING			1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM		0	/* d11 open authentication */
+#define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED		2	/* try open, then shared if open failed w/rc 13 */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX		(127)	/* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT			1	/* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK			2	/* d11 loopback data test */
+#define WL_DIAG_MEMORY				3	/* d11 memory test */
+#define WL_DIAG_LED				4	/* LED test */
+#define WL_DIAG_REG				5	/* d11/phy register test */
+#define WL_DIAG_SROM				6	/* srom read/crc test */
+#define WL_DIAG_DMA				7	/* DMA test */
+#define WL_DIAG_LOOPBACK_EXT			8	/* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS			0
+#define WL_DIAGERR_FAIL_TO_RUN			1	/* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED		2	/* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL		3	/* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL		4	/* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL			5	/* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC			6	/* srom crc failed */
+#define WL_DIAGERR_REG_FAIL			7	/* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL			8	/* d11 memory test failed */
+#define WL_DIAGERR_NOMEM			9	/* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL			10	/* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT		11	/* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN		12	/* d11 memory test result in bad pattern */
+
+/* band types */
+#define	WLC_BAND_AUTO		0	/* auto-select */
+#define	WLC_BAND_5G		1	/* 5 Ghz */
+#define	WLC_BAND_2G		2	/* 2.4 Ghz */
+#define	WLC_BAND_ALL		3	/* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND    4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND    5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND    6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND    7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND     8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0     1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1     2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2     3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3     4
+
+#ifdef SROM12
+#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
+#define WL_CHAN_FREQ_RANGE_2G_40 6
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND	17
+#define WL_CHAN_FREQ_RANGE_5G_5BAND	18
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_40	19
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_80	20
+#else
+#define WL_CHAN_FREQ_RANGE_5G_4BAND	5
+#endif /* SROM12 */
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED	0	/* MAC list disabled */
+#define WLC_MACMODE_DENY	1	/* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW	2	/* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B			Rateset: 1b, 2b, 5.5, 11
+ *					Preamble: Long
+ *					Shortslot: Off
+ * GMODE_AUTO				Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: Auto
+ * GMODE_ONLY				Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ *					Extended Rateset: 6b, 9, 12b, 48
+ *					Preamble: Short required
+ *					Shortslot: Auto
+ * GMODE_B_DEFERRED			Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: On
+ * GMODE_PERFORMANCE			Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ *					Preamble: Short required
+ *					Shortslot: On and required
+ * GMODE_LRS				Rateset: 1b, 2b, 5.5b, 11b
+ *					Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ *					Preamble: Long
+ *					Shortslot: Auto
+ */
+#define GMODE_LEGACY_B		0
+#define GMODE_AUTO		1
+#define GMODE_ONLY		2
+#define GMODE_B_DEFERRED	3
+#define GMODE_PERFORMANCE	4
+#define GMODE_LRS		5
+#define GMODE_MAX		6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO	-1
+#define WLC_PLCP_SHORT	0
+#define WLC_PLCP_LONG	1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO		-1
+#define WLC_PROTECTION_OFF		0
+#define WLC_PROTECTION_ON		1
+#define WLC_PROTECTION_MMHDR_ONLY	2
+#define WLC_PROTECTION_CTS_ONLY		3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF		0
+#define WLC_PROTECTION_CTL_LOCAL	1
+#define WLC_PROTECTION_CTL_OVERLAP	2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF		0
+#define WLC_N_PROTECTION_OPTIONAL	1
+#define WLC_N_PROTECTION_20IN40		2
+#define WLC_N_PROTECTION_MIXEDMODE	3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE	0
+#define WLC_N_PREAMBLE_GF		1
+#define WLC_N_PREAMBLE_GF_BRCM          2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL			0
+#define WLC_N_BW_40ALL			1
+#define WLC_N_BW_20IN2G_40IN5G		2
+
+#define WLC_BW_20MHZ_BIT		(1<<0)
+#define WLC_BW_40MHZ_BIT		(1<<1)
+#define WLC_BW_80MHZ_BIT		(1<<2)
+#define WLC_BW_160MHZ_BIT		(1<<3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ		(WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ		(WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ		(WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ		(WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+	WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED		0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap)	(((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap)	(((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap)	(((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0		0
+#define WLC_N_TXRX_CHAIN1		1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20			0x01
+#define WLC_N_SGI_40			0x02
+#define WLC_VHT_SGI_80			0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL				0x02
+
+#define LISTEN_INTERVAL			10
+/* interference mitigation options */
+#define	INTERFERE_OVRRIDE_OFF	-1	/* interference override off */
+#define	INTERFERE_NONE	0	/* off */
+#define	NON_WLAN	1	/* foreign/non 802.11 interference, no auto detect */
+#define	WLAN_MANUAL	2	/* ACI: no auto detection */
+#define	WLAN_AUTO	3	/* ACI: auto detect */
+#define	WLAN_AUTO_W_NOISE	4	/* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE	(1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1   /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2      /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4       /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8            /* bit 3 */
+#define ACPHY_HWACI_MITIGATION 16            /* bit 4 */
+#define ACPHY_ACI_MAX_MODE 31
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED		0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE			1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE			2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE		3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW				0
+#define TRIGGER_CRS				0x01
+#define TRIGGER_CRSDEASSERT			0x02
+#define TRIGGER_GOODFCS				0x04
+#define TRIGGER_BADFCS				0x08
+#define TRIGGER_BADPLCP				0x10
+#define TRIGGER_CRSGLITCH			0x20
+
+#define	WL_SAMPLEDATA_HEADER_TYPE	1
+#define WL_SAMPLEDATA_HEADER_SIZE	80	/* sample collect header size (bytes) */
+#define	WL_SAMPLEDATA_TYPE		2
+#define	WL_SAMPLEDATA_SEQ		0xff	/* sequence # */
+#define	WL_SAMPLEDATA_MORE_DATA		0x100	/* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE	1200
+#define WL_OTA_TEST_MAX_NUM_RATE	30
+#define WL_OTA_TEST_MAX_NUM_SEQ		100
+
+#define WL_THRESHOLD_LO_BAND	70	/* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF		0	/* radar detector off */
+#define WL_RADAR_DETECTOR_ON		1	/* radar detector on */
+#define WL_RADAR_SIMULATED		2	/* force radar detector to declare
+						 * detection once
+						 */
+#define WL_RSSI_ANT_VERSION	1	/* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX		2	/* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX	3	/* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1		0	/* antenna index 1 */
+#define WL_ANT_IDX_2		1	/* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX		4	/* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE		0	/* state for operating in non-radar channel */
+#define	WL_DFS_CACSTATE_PREISM_CAC	1	/* CAC in progress */
+#define WL_DFS_CACSTATE_ISM		2	/* ISM in progress */
+#define WL_DFS_CACSTATE_CSA		3	/* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC	4	/* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC	5	/* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC	6	/* POSTISM OOC */
+#define WL_DFS_CACSTATES		7	/* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ		0
+#define WL_BW_40MHZ		1
+#define WL_BW_80MHZ		2
+#define WL_BW_160MHZ		3
+#define WL_BW_8080MHZ		4
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED	1
+#define WL_TX_POWER_F_HW		2
+#define WL_TX_POWER_F_MIMO		4
+#define WL_TX_POWER_F_SISO		8
+#define WL_TX_POWER_F_HT		0x10
+#define WL_TX_POWER_F_VHT		0x20
+#define WL_TX_POWER_F_OPENLOOP		0x40
+
+/* Message levels */
+#define WL_ERROR_VAL		0x00000001
+#define WL_TRACE_VAL		0x00000002
+#define WL_PRHDRS_VAL		0x00000004
+#define WL_PRPKT_VAL		0x00000008
+#define WL_INFORM_VAL		0x00000010
+#define WL_TMP_VAL		0x00000020
+#define WL_OID_VAL		0x00000040
+#define WL_RATE_VAL		0x00000080
+#define WL_ASSOC_VAL		0x00000100
+#define WL_PRUSR_VAL		0x00000200
+#define WL_PS_VAL		0x00000400
+#define WL_TXPWR_VAL		0x00000800	/* retired in TOT on 6/10/2009 */
+#define WL_MODE_SWITCH_VAL	0x00000800 /* Using retired TXPWR val */
+#define WL_PORT_VAL		0x00001000
+#define WL_DUAL_VAL		0x00002000
+#define WL_WSEC_VAL		0x00004000
+#define WL_WSEC_DUMP_VAL	0x00008000
+#define WL_LOG_VAL		0x00010000
+#define WL_NRSSI_VAL		0x00020000	/* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL		0x00040000	/* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL	0x00080000
+#define WL_TAF_VAL		0x00100000
+#define WL_RADAR_VAL		0x00200000	/* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL		0x00400000
+#define WL_APSTA_VAL		0x00800000
+#define WL_DFS_VAL		0x01000000
+#define WL_BA_VAL		0x02000000	/* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL		0x04000000
+#define WL_PRMAC_VAL		0x04000000
+#define WL_MBSS_VAL		0x04000000
+#define WL_CAC_VAL		0x08000000
+#define WL_AMSDU_VAL		0x10000000
+#define WL_AMPDU_VAL		0x20000000
+#define WL_FFPLD_VAL		0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL		0x00000001
+#define WL_SCAN_VAL		0x00000002
+#define WL_WOWL_VAL		0x00000004
+#define WL_COEX_VAL		0x00000008
+#define WL_RTDC_VAL		0x00000010
+#define WL_PROTO_VAL		0x00000020
+#define WL_BTA_VAL		0x00000040
+#define WL_CHANINT_VAL		0x00000080
+#define WL_WMF_VAL		0x00000100
+#define WL_P2P_VAL		0x00000200
+#define WL_ITFR_VAL		0x00000400
+#define WL_MCHAN_VAL		0x00000800
+#define WL_TDLS_VAL		0x00001000
+#define WL_MCNX_VAL		0x00002000
+#define WL_PROT_VAL		0x00004000
+#define WL_PSTA_VAL		0x00008000
+#define WL_TSO_VAL		0x00010000
+#define WL_TRF_MGMT_VAL		0x00020000
+#define WL_LPC_VAL	        0x00040000
+#define WL_L2FILTER_VAL		0x00080000
+#define WL_TXBF_VAL		0x00100000
+#define WL_P2PO_VAL		0x00200000
+#define WL_TBTT_VAL		0x00400000
+#define WL_MQ_VAL		0x01000000
+
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL		0x02000000
+
+#define WL_WNM_VAL		0x04000000
+#define WL_PWRSEL_VAL		0x10000000
+#define WL_NET_DETECT_VAL	0x20000000
+#define WL_PCIE_VAL		0x40000000
+
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL        0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define	WL_LED_NUMGPIO		32	/* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define	WL_LED_OFF		0		/* always off */
+#define	WL_LED_ON		1		/* always on */
+#define	WL_LED_ACTIVITY		2		/* activity */
+#define	WL_LED_RADIO		3		/* radio enabled */
+#define	WL_LED_ARADIO		4		/* 5  Ghz radio enabled */
+#define	WL_LED_BRADIO		5		/* 2.4Ghz radio enabled */
+#define	WL_LED_BGMODE		6		/* on if gmode, off if bmode */
+#define	WL_LED_WI1		7
+#define	WL_LED_WI2		8
+#define	WL_LED_WI3		9
+#define	WL_LED_ASSOC		10		/* associated state indicator */
+#define	WL_LED_INACTIVE		11		/* null behavior (clears default behavior) */
+#define	WL_LED_ASSOCACT		12		/* on when associated; blink fast for activity */
+#define WL_LED_WI4		13
+#define WL_LED_WI5		14
+#define	WL_LED_BLINKSLOW	15		/* blink slow */
+#define	WL_LED_BLINKMED		16		/* blink med */
+#define	WL_LED_BLINKFAST	17		/* blink fast */
+#define	WL_LED_BLINKCUSTOM	18		/* blink custom */
+#define	WL_LED_BLINKPERIODIC	19		/* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC	20		/* when connected with security */
+						/* keep on for 300 sec */
+#define WL_LED_START_OFF	21		/* off upon boot, could be turned on later */
+#define WL_LED_WI6		22
+#define WL_LED_WI7		23
+#define WL_LED_WI8		24
+#define	WL_LED_NUMBEHAVIOR	25
+
+/* led behavior numeric value format */
+#define	WL_LED_BEH_MASK		0x7f		/* behavior mask */
+#define	WL_LED_AL_MASK		0x80		/* activelow (polarity) bit */
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y)	((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY		8
+#define WL_EVENTING_MASK_LEN	16
+
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI	1	/* by RSSI */
+#define WL_JOIN_PREF_WPA	2	/* by akm and ciphers */
+#define WL_JOIN_PREF_BAND	3	/* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA	4	/* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF	5	/* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF	255	/* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY	"\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC			1
+#define WLC_MEASURE_CHANNEL_BASIC	2
+#define WLC_MEASURE_CHANNEL_CCA		3
+#define WLC_MEASURE_CHANNEL_RPI		4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF			0		/* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H		1		/* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H		2		/* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D		3		/* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D		4		/* operation defined above */
+
+#define WL_CHAN_VALID_HW	(1 << 0)	/* valid with current HW */
+#define WL_CHAN_VALID_SW	(1 << 1)	/* valid with current country setting */
+#define WL_CHAN_BAND_5G		(1 << 2)	/* 5GHz-band channel */
+#define WL_CHAN_RADAR		(1 << 3)	/* radar sensitive  channel */
+#define WL_CHAN_INACTIVE	(1 << 4)	/* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE		(1 << 5)	/* channel is in passive mode */
+#define WL_CHAN_RESTRICTED	(1 << 6)	/* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define	WL_BTC_DISABLE		0	/* disable BT coexistence */
+#define WL_BTC_FULLTDM      1	/* full TDM COEX */
+#define WL_BTC_ENABLE       1	/* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT      2    /* full TDM COEX with preemption */
+#define WL_BTC_LITE        3	/* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL		4   /* BT and WLAN run in parallel with separate antenna  */
+#define WL_BTC_HYBRID		5   /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT		8	/* set the default mode for the device */
+#define WL_INF_BTC_DISABLE      0
+#define WL_INF_BTC_ENABLE       1
+#define WL_INF_BTC_AUTO         3
+
+/* BTC wire used by "btc_wire" iovar */
+#define	WL_BTC_DEFWIRE		0	/* use default wire setting */
+#define WL_BTC_2WIRE		2	/* use 2-wire BTC */
+#define WL_BTC_3WIRE		3	/* use 3-wire BTC */
+#define WL_BTC_4WIRE		4	/* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT               (1 << 0)
+#define WL_BTC_FLAG_BT_DEF               (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT          (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP              (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT           (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP	         (1 << 5)
+#define WL_BTC_FLAG_ECI                  (1 << 6)
+#define WL_BTC_FLAG_LIGHT                (1 << 7)
+#define WL_BTC_FLAG_PARALLEL             (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 206
+#else
+#define WL_NUMCHANSPECS 110
+#endif
+
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH	0	/* authenticator */
+#define WL_WDS_WPA_ROLE_SUP	1	/* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO	255	/* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT   0
+#define WL_PKT_FILTER_BASE_END   1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+	{ "START", WL_PKT_FILTER_BASE_PKT },   \
+	{ "END",   WL_PKT_FILTER_BASE_END },   \
+	{ "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+	{ "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+	{ "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+	{ "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+	{ "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+	{ "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+	{ "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+	{ "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+	{ "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+	{ "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+	{ "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+	{ "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+	{ "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+	{ "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START			0x01
+#define WL_PKTENG_PER_TX_STOP			0x02
+#define WL_PKTENG_PER_RX_START			0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START		0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START		0x06
+#define WL_PKTENG_PER_RX_STOP			0x08
+#define WL_PKTENG_PER_MASK			0xff
+
+#define WL_PKTENG_SYNCHRONOUS			0x100	/* synchronous flag */
+
+#define WL_PKTENG_MAXPKTSZ				16384	/* max pktsz limit for pkteng */
+
+#define NUM_80211b_RATES	4
+#define NUM_80211ag_RATES	8
+#define NUM_80211n_RATES	32
+#define NUM_80211_RATES		(NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC           (1 << 0)    /* Wakeup on Magic packet */
+#define WL_WOWL_NET             (1 << 1)    /* Wakeup on Netpattern */
+#define WL_WOWL_DIS             (1 << 2)    /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR            (1 << 3)    /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN             (1 << 4)    /* Wakeup on loss of beacon */
+#define WL_WOWL_TST             (1 << 5)    /* Wakeup after test */
+#define WL_WOWL_M1              (1 << 6)    /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID           (1 << 7)    /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO        (1 << 8)    /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1       (1 << 9)    /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE     (1 << 10)   /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT       (1 << 11)   /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD      (1 << 12)   /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2            (1 << 13)   /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT          (1 << 14)   /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST           (1 << 15)   /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL          (1 << 16)   /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME    (1 << 17)   /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT   (1 << 18)   /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE    (1 << 19)   /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA    (1 << 20)   /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT         (1 << 21)   /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO    (1 << 22)   /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL        (1 << 23)   /* Offloads detected MIC failure(s) */
+#define WL_WOWL_UNASSOC         (1 << 24)   /* Wakeup in Unassociated state (Net/Magic Pattern) */
+#define WL_WOWL_SECURE          (1 << 25)   /* Wakeup if received matched secured pattern */
+#define WL_WOWL_LINKDOWN        (1 << 31)   /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP         (1 << 20)   /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP	(1 << 0)	/* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA	(1 << 1)	/* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN	102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS	16
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT		20	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN			5	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT		10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN			10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT	300	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN		10	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX		900	/* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX	100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT	25	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN		0	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX		100	/* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7	/* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK		0x07
+#define WL_COEX_INFO_REQ		0x01
+#define	WL_COEX_40MHZ_INTOLERANT	0x02
+#define	WL_COEX_WIDTH20			0x04
+
+#define	WLC_RSSI_INVALID	 0	/* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER		0x0100
+
+#define MAX_ARGSTR_LEN		18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON	0x0001
+#define LOG_MODULE_ASSOC	0x0002
+#define LOG_MODULE_EVENT	0x0004
+#define LOG_MODULE_MAX		3			/* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE	0
+#define WL_LOG_LEVEL_ERR	1
+#define WL_LOG_LEVEL_WARN	2
+#define WL_LOG_LEVEL_INFO	3
+#define WL_LOG_LEVEL_MAX	WL_LOG_LEVEL_INFO	/* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT		1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL	0
+#define LOG_ARGTYPE_STR		1	/* %s */
+#define LOG_ARGTYPE_INT		2	/* %d */
+#define LOG_ARGTYPE_INT_STR	3	/* %d...%s */
+#define LOG_ARGTYPE_STR_INT	4	/* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG	0x1
+#define VNDR_IE_PRBRSP_FLAG	0x2
+#define VNDR_IE_ASSOCRSP_FLAG	0x4
+#define VNDR_IE_AUTHRSP_FLAG	0x8
+#define VNDR_IE_PRBREQ_FLAG	0x10
+#define VNDR_IE_ASSOCREQ_FLAG	0x20
+#define VNDR_IE_IWAPID_FLAG	0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG	0x100 /* allow custom IE id */
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG     0x001000
+#define VNDR_IE_GONRSP_FLAG     0x002000
+#define VNDR_IE_GONCFM_FLAG     0x004000
+#define VNDR_IE_INVREQ_FLAG     0x008000
+#define VNDR_IE_INVRSP_FLAG     0x010000
+#define VNDR_IE_DISREQ_FLAG     0x020000
+#define VNDR_IE_DISRSP_FLAG     0x040000
+#define VNDR_IE_PRDREQ_FLAG     0x080000
+#define VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define VNDR_IE_P2PAF_SHIFT	12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE	0	/* disabled */
+#define CHANIM_DETECT	1	/* detection only */
+#define CHANIM_EXT		2	/* external state machine */
+#define CHANIM_ACT		3	/* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT		0
+#define APCS_IOCTL		1
+#define APCS_CHANIM		2
+#define APCS_CSTIMER		3
+#define APCS_BTA		4
+#define APCS_TXDLY		5
+#define APCS_NONACSD		6
+#define APCS_DFS_REENTRY	7
+#define APCS_TXFAIL		8
+#define APCS_MAX		9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD			10
+
+/* CHANIM */
+#define CCASTATS_TXDUR  0
+#define CCASTATS_INBSS  1
+#define CCASTATS_OBSS   2
+#define CCASTATS_NOCTG  3
+#define CCASTATS_NOPKT  4
+#define CCASTATS_DOZE   5
+#define CCASTATS_TXOP	6
+#define CCASTATS_GDTXDUR        7
+#define CCASTATS_BDTXDUR        8
+#define CCASTATS_MAX    9
+
+#define WL_CHANIM_COUNT_ALL	0xff
+#define WL_CHANIM_COUNT_ONE	0x1
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN	0
+#define WL_P2P_DISC_ST_LISTEN	1
+#define WL_P2P_DISC_ST_SEARCH	2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT	0
+#define WL_P2P_IF_GO		1
+#define WL_P2P_IF_DYNBCN_GO	2
+#define WL_P2P_IF_DEV		3
+
+/* count */
+#define WL_P2P_SCHED_RSVD	0
+#define WL_P2P_SCHED_REPEAT	255	/* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN		3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS		0	/* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS	1	/* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE	0	/* no action */
+#define WL_P2P_SCHED_ACTION_DOZE	1	/* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF	2	/* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET	255	/* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL	0	/* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT	1	/* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS	2	/* normal start/internal/duration/count with
+						 * start being an offset of the 'current' TSF
+						 */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA	(1 << 0)	/* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY	(1 << 1)	/* GO does not probe respond to non-p2p probe
+						 * requests
+						 */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)	/* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2			1
+#define WL_11N_3x3			3
+#define WL_11N_4x4			4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N		0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
+#define WLFEATURE_DISABLE_11N_GF	0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED		0
+#define PSTA_MODE_PROXY			1
+#define PSTA_MODE_REPEATER		2
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE		1	/* enable NAT on given interface */
+#define NAT_OP_DISABLE		2	/* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL	3	/* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED	1	/* NAT is enabled */
+#define NAT_STATE_DISABLED	2	/* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START	36	/* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START	52	/* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START	100	/* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START	149	/* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER		0x0001
+#define IPV4_NETBT_FILTER	0x0002
+#define IPV4_LLMNR_FILTER	0x0004
+#define IPV4_SSDP_FILTER	0x0008
+#define IPV4_WSD_FILTER		0x0010
+#define IPV6_NETBT_FILTER	0x0200
+#define IPV6_LLMNR_FILTER	0x0400
+#define IPV6_SSDP_FILTER	0x0800
+#define IPV6_WSD_FILTER		0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE		0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES                 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP                  0x0001  /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING           0x0002  /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC      0x0008  /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR         0x0010  /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX                     0x0020  /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR              0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR               0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4                    0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM                   0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED               0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS		0x00000001
+#define WL_WNM_PROXYARP		0x00000002
+#define WL_WNM_MAXIDLE		0x00000004
+#define WL_WNM_TIMBC		0x00000008
+#define WL_WNM_TFS		0x00000010
+#define WL_WNM_SLEEP		0x00000020
+#define WL_WNM_DMS		0x00000040
+#define WL_WNM_FMS		0x00000080
+#define WL_WNM_NOTIF		0x00000100
+#define WL_WNM_MAX		0x00000200
+
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA	1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define	DPT_DISCOVERY_MANUAL	0x01	/* manual discovery mode */
+#define	DPT_DISCOVERY_AUTO	0x02	/* auto discovery mode */
+#define	DPT_DISCOVERY_SCAN	0x04	/* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO	0	/* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT	1	/* always use direct DPT path */
+#define DPT_PATHSEL_APPATH	2	/* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD	1	/* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE	2	/* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define	DPT_STATUS_ACTIVE	0x01	/* link active (though may be suspended) */
+#define	DPT_STATUS_AES		0x02	/* link secured through AES encryption */
+#define	DPT_STATUS_FAILED	0x04	/* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM		4	/*  put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE		5	/* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY	6	/* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW		7	/* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ	8	/* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX			0
+#define TDLS_WFD_IE_RX			1
+#define TDLS_WFD_PROBE_IE_TX	2
+#define TDLS_WFD_PROBE_IE_RX	3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING		0	/* TSPEC pending */
+#define TSPEC_ACCEPTED		1	/* TSPEC accepted */
+#define TSPEC_REJECTED		2	/* TSPEC rejected */
+#define TSPEC_UNKNOWN		3	/* TSPEC unknown */
+#define TSPEC_STATUS_MASK	7	/* TSPEC status mask */
+
+#ifdef BCMCCX
+/* "wlan_reason" iovar interface */
+#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK	0 /* normal WLAN network setup */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK	1 /* roam from Cellular network */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN	2 /* roam from LAN */
+#define WL_WLAN_ASSOC_REASON_MAX		2 /* largest value allowed */
+#endif /* BCMCCX */
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL       0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE    0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO	0x0004
+#define WL_SWFL_FLOWCONTROL     0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT	0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME	0	/* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME	  1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0)   value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ *                      the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT	0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH	1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE	2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO		3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE	3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER	(-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+#define BESTN_BSSID_ONLY_BIT		12
+
+#define SORT_CRITERIA_MASK		0x0001
+#define AUTO_NET_SWITCH_MASK		0x0002
+#define ENABLE_BKGRD_SCAN_MASK		0x0004
+#define IMMEDIATE_SCAN_MASK		0x0008
+#define	AUTO_CONNECT_MASK		0x0010
+
+#define ENABLE_BD_SCAN_MASK		0x0020
+#define ENABLE_ADAPTSCAN_MASK		0x00c0
+#define IMMEDIATE_EVENT_MASK		0x0100
+#define SUPPRESS_SSID_MASK		0x0200
+#define ENABLE_NET_OFFLOAD_MASK		0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK		0x0800
+#define BESTN_BSSID_ONLY_MASK		0x1000
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP				2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_HISTORY_OFF	0x00000002	/* Scan history suppressed */
+
+#define WL_PFN_HIDDEN_BIT		2
+#define PNO_SCAN_MAX_FW			508*1000	/* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC		PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC		10			/* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			8
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			32
+#endif
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM	0x00000001
+#define TOE_ERRTEST_RX_CSUM	0x00000002
+#define TOE_ERRTEST_RX_CSUM2	0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT		0x00000001
+#define ARP_OL_SNOOP		0x00000002
+#define ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER	0x1
+#define ARP_ERRTEST_REPLY_HOST	0x2
+
+#define ARP_MULTIHOMING_MAX	8	/* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 10	/* Maximum local host IP addresses */
+#define ND_REQUEST_MAX		5	/* Max set of offload params */
+
+
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT		1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT	2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+
+
+#define MAX_NUM_WOL_PATTERN	22 /* LOGO requirements min 22 */
+
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH		1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE					2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH		4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK		0x000000ff
+#define OVERLAY_IDX_SHIFT		0
+#define OVERLAY_FLAGS_MASK		0xffffff00
+#define OVERLAY_FLAGS_SHIFT		8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD	0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL	0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP	0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE	1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define	SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED	0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE		0x77
+#define BCM_ACTION_RFAWARE_DCS  0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR		0x1
+#define BCM_DCS_UNKNOWN		0xFF
+
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS			0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS		0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS	0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE	0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE	0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE	0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE	0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS		0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN	255	/* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT		32
+#define WL_RELMCAST_FLAG_INBLACKLIST	1
+#define WL_RELMCAST_FLAG_ACTIVEACKER	2
+#define WL_RELMCAST_FLAG_RELMCAST	4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE	2048
+#define NET_DETECT_MAX_PROFILES		16
+#define NET_DETECT_MAX_CHANNELS		50
+#endif /* NET_DETECT */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE		(1<<0)
+#define WL_RADIO_HW_DISABLE		(1<<1)
+#define WL_RADIO_MPC_DISABLE		(1<<2)
+#define WL_RADIO_COUNTRY_DISABLE	(1<<3)	/* some countries don't support any channel */
+
+#define	WL_SPURAVOID_OFF	0
+#define	WL_SPURAVOID_ON1	1
+#define	WL_SPURAVOID_ON2	2
+
+
+#define WL_4335_SPURAVOID_ON1	1
+#define WL_4335_SPURAVOID_ON2	2
+#define WL_4335_SPURAVOID_ON3	3
+#define WL_4335_SPURAVOID_ON4	4
+#define WL_4335_SPURAVOID_ON5	5
+#define WL_4335_SPURAVOID_ON6	6
+#define WL_4335_SPURAVOID_ON7	7
+#define WL_4335_SPURAVOID_ON8	8
+#define WL_4335_SPURAVOID_ON9	9
+
+/* Override bit for WLC_SET_TXPWR.  if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE	(1U<<31)
+#define WL_TXPWR_NEG   (1U<<30)
+
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define	WLC_PHY_TYPE_A		0
+#define	WLC_PHY_TYPE_B		1
+#define	WLC_PHY_TYPE_G		2
+#define	WLC_PHY_TYPE_N		4
+#define	WLC_PHY_TYPE_LP		5
+#define	WLC_PHY_TYPE_SSN	6
+#define	WLC_PHY_TYPE_HT		7
+#define	WLC_PHY_TYPE_LCN	8
+#define	WLC_PHY_TYPE_LCN40	10
+#define WLC_PHY_TYPE_AC		11
+#define	WLC_PHY_TYPE_NULL	0xf
+
+/* Values for PM */
+#define PM_OFF	0
+#define PM_MAX	1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3		/* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION	1	/* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC  1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+/* Wi-Fi Display Services (WFDS) */
+#define WL_P2P_SOCIAL_CHANNELS_MAX  WL_NUMCHANNELS
+#define MAX_WFDS_SEEK_SVC 4	/* Max # of wfds services to seek */
+#define MAX_WFDS_ADVERT_SVC 4	/* Max # of wfds services to advertise */
+#define MAX_WFDS_SVC_NAME_LEN 200	/* maximum service_name length */
+#define MAX_WFDS_ADV_SVC_INFO_LEN 65000	/* maximum adv service_info length */
+#define P2P_WFDS_HASH_LEN 6		/* Length of a WFDS service hash */
+#define MAX_WFDS_SEEK_SVC_INFO_LEN 255	/* maximum seek service_info req length */
+#define MAX_WFDS_SEEK_SVC_NAME_LEN 200	/* maximum service_name length */
+
+/* ap_isolate bitmaps */
+#define AP_ISOLATE_DISABLED		0x0
+#define AP_ISOLATE_SENDUP_ALL		0x01
+#define AP_ISOLATE_SENDUP_MCAST		0x02
+
+#endif /* wlioctl_defs_h */
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 0000000000000000000000000000000000000000..6d7297f9b6e31fcf69c34ba4dee9a16ea2d58370
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,1030 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd.h 491170 2014-07-15 06:23:58Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+#define MAX_EVENT	16
+
+#define ALL_INTERFACES	0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+
+#if defined(BCMWDF)
+#include <wdf.h>
+#include <WdfMiniport.h>
+#endif /* (BCMWDF)  */
+
+#if defined(WL11U) && !defined(MFP)
+#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */
+#endif /* WL11U */
+
+#if defined(KEEP_ALIVE)
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+#endif /* KEEP_ALIVE */
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_ioctl;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA,		/* Ready for frame transfers */
+	DHD_BUS_SUSPEND,	/* Bus has been suspended */
+};
+
+#if defined(NDISVER) && (NDISVER >= 0x0600)
+/* Firmware requested operation mode */
+#define STA_MASK			0x0001
+#define HOSTAPD_MASK		0x0002
+#define WFD_MASK			0x0004
+#define SOFTAP_FW_MASK	0x0008
+#define P2P_GO_ENABLED		0x0010
+#define P2P_GC_ENABLED		0x0020
+#define CONCURENT_MASK		0x00F0
+#endif /* (NDISVER >= 0x0600)  */
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xE
+#define DHD_BAD_IF	-0xF
+
+enum dhd_op_flags {
+/* Firmware requested operation mode */
+	DHD_FLAG_STA_MODE				= (1 << (0)), /* STA only */
+	DHD_FLAG_HOSTAP_MODE				= (1 << (1)), /* SOFTAP only */
+	DHD_FLAG_P2P_MODE				= (1 << (2)), /* P2P Only */
+	/* STA + P2P */
+	DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
+	DHD_FLAG_CONCURR_MULTI_CHAN_MODE		= (1 << (4)), /* STA + P2P */
+	/* Current P2P mode for P2P connection */
+	DHD_FLAG_P2P_GC_MODE				= (1 << (5)),
+	DHD_FLAG_P2P_GO_MODE				= (1 << (6)),
+	DHD_FLAG_MBSS_MODE				= (1 << (7)), /* MBSS in future */
+	DHD_FLAG_IBSS_MODE				= (1 << (8)),
+	DHD_FLAG_MFG_MODE				= (1 << (9))
+};
+
+/* Max sequential TX/RX Control timeouts to set HANG event */
+#ifndef MAX_CNTL_TX_TIMEOUT
+#define MAX_CNTL_TX_TIMEOUT 2
+#endif /* MAX_CNTL_TX_TIMEOUT */
+#ifndef MAX_CNTL_RX_TIMEOUT
+#define MAX_CNTL_RX_TIMEOUT 1
+#endif /* MAX_CNTL_RX_TIMEOUT */
+
+#define DHD_SCAN_ASSOC_ACTIVE_TIME	40 /* ms: Embedded default Active setting from DHD */
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
+#define DHD_SCAN_PASSIVE_TIME		130 /* ms: Embedded default Passive setting from DHD */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY	3 /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS		2000 /* ms: time out in waiting wifi to come up */
+#endif
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD
+};
+
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF,
+#if defined(STATIC_WL_PRIV_STRUCT)
+	DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+#endif /* STATIC_WL_PRIV_STRUCT */
+	DHD_PREALLOC_DHD_INFO = 7
+};
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+	void **p;
+	uint8 flow_id;
+	uint8 cur_idx;
+	uint8 exp_idx;
+	uint8 max_idx;
+	uint8 pend_pkts;
+} reorder_info_t;
+
+#ifdef DHDTCPACK_SUPPRESS
+
+enum {
+	/* TCPACK suppress off */
+	TCPACK_SUP_OFF,
+	/* Replace TCPACK in txq when new coming one has higher ACK number. */
+	TCPACK_SUP_REPLACE,
+	/* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+	 * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+	 * 1. we are able to read TCP DATA packets first from the bus
+	 * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
+	 */
+	TCPACK_SUP_DELAYTX,
+	TCPACK_SUP_LAST_MODE
+};
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+/* DMA'ing r/w indices for rings supported */
+#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */
+#define DMA_INDX_ENAB(dma_indxsup)	0
+#elif defined BCM_INDX_DMA  /* FW gets r/w indices from Host memory */
+#define DMA_INDX_ENAB(dma_indxsup)	1
+#else	/* r/w indices in TCM or host memory based on FW/Host agreement */
+#define DMA_INDX_ENAB(dma_indxsup)	dma_indxsup
+#endif	/* BCM_INDX_TCM */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+struct tdls_peer_node {
+	uint8 addr[ETHER_ADDR_LEN];
+	struct tdls_peer_node *next;
+};
+typedef struct tdls_peer_node tdls_peer_node_t;
+typedef struct {
+	tdls_peer_node_t *node;
+	uint8 tdls_peer_count;
+} tdls_peer_tbl_t;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+
+	/* to NDIS developer, the structure dhd_common is redundant,
+	 * please do NOT merge it back from other branches !!!
+	 */
+
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_dropped;	/* Data packets dropped in dhd */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;			/* pno status : "1" is pno enable */
+	int pno_suspend;		/* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+	/* DTIM skip value, default 0(or 1) means wake each DTIM
+	 * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
+	 */
+	int suspend_bcn_li_dtim;         /* bcn_li_dtim value in suspend mode */
+#ifdef PKT_FILTER_SUPPORT
+	int early_suspended;	/* Early suspend status */
+	int dhcp_in_progress;	/* DHCP period */
+#endif
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int	op_mode;				/* STA, HostAPD, WFD, SoftAP */
+
+/* Set this to 1 to use a seperate interface (p2p0) for p2p operations.
+ *  For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework
+ *  see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile
+ */
+/* #define WL_ENABLE_P2P_IF		1 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	struct mutex 	wl_start_stop_lock; /* lock/unlock for Android start/stop */
+	struct mutex 	wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
+#endif 
+
+#ifdef WLBTAMP
+	uint16	maxdatablks;
+#endif /* WLBTAMP */
+#ifdef PROP_TXSTATUS
+	bool	wlfc_enabled;
+	int	wlfc_mode;
+	void*	wlfc_state;
+	/*
+	Mode in which the dhd flow control shall operate. Must be set before
+	traffic starts to the device.
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	3 - Only AMPDU hostreorder used. no wlfc.
+	*/
+	uint8	proptxstatus_mode;
+	bool	proptxstatus_txoff;
+	bool	proptxstatus_module_ignore;
+	bool	proptxstatus_credit_ignore;
+	bool	proptxstatus_txstatus_ignore;
+
+	bool	wlfc_rxpkt_chk;
+	/*
+	 * implement below functions in each platform if needed.
+	 */
+	/* platform specific function whether to skip flow control */
+	bool (*skip_fc)(void);
+	/* platform specific function for wlfc_enable and wlfc_deinit */
+	void (*plat_init)(void *dhd);
+	void (*plat_deinit)(void *dhd);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	void *pno_state;
+#endif
+	bool	dongle_isolation;
+	bool	dongle_trap_occured;	/* flag for sending HANG event to upper layer */
+	int   hang_was_sent;
+	int   rxcnt_timeout;		/* counter rxcnt timeout to send HANG */
+	int   txcnt_timeout;		/* counter txcnt timeout to send HANG */
+	bool hang_report;		/* enable hang report by default */
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+#ifdef WLTDLS
+	bool tdls_enable;
+#endif
+	struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+	char  fw_capabilities[WLC_IOCTL_SMLEN];
+	#define MAXSKBPEND 1024
+	void *skbbuf[MAXSKBPEND];
+	uint32 store_idx;
+	uint32 sent_idx;
+#ifdef DHDTCPACK_SUPPRESS
+	uint8 tcpack_sup_mode;		/* TCPACK suppress mode */
+	void *tcpack_sup_module;	/* TCPACK suppress module */
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
+	uint32 arp_version;
+#endif
+#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
+	bool fw_4way_handshake;		/* Whether firmware will to do the 4way handshake. */
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	struct task_struct * current_dpc;
+	struct task_struct * current_rxf;
+	int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+
+
+	void    *sta_pool;          /* pre-allocated pool of sta objects */
+	void    *staid_allocator;   /* allocator of sta indexes */
+
+	void    *flowid_allocator;  /* unique flowid allocator */
+	void	*flow_ring_table;   /* flow ring table, include prot and bus info */
+	void	*if_flow_lkup;      /* per interface flowid lkup hash table */
+	uint32  num_flow_rings;
+	uint8  flow_prio_map[NUMPRIO];
+	uint8	flow_prio_map_type;
+	char enable_log[MAX_EVENT];
+	bool dma_d2h_ring_upd_support;
+	bool dma_h2d_ring_upd_support;
+#ifdef DHD_WMF
+	bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+	bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+	bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+	bool dhcp_unicast;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	bool block_ping;
+#endif
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+	tdls_peer_tbl_t peer_tbl;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+} dhd_pub_t;
+
+#if defined(BCMWDF)
+typedef struct {
+	dhd_pub_t *dhd_pub;
+} dhd_workitem_context_t;
+
+WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
+#endif /* (BCMWDF)  */
+
+	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do {\
+			int retry = 0; \
+			SMP_RD_BARRIER_DEPENDS(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				SMP_RD_BARRIER_DEPENDS(); \
+				wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#ifdef CUSTOMER_HW4
+		#define DHD_PM_RESUME_RETURN_ERROR(a)   do { \
+				if (dhd_mmc_suspend) { \
+					printf("%s[%d]: mmc is still in suspend state!!!\n", \
+							__FUNCTION__, __LINE__); \
+					return a; \
+				} \
+			} while (0)
+	#else
+		#define DHD_PM_RESUME_RETURN_ERROR(a)	do { \
+			if (dhd_mmc_suspend) return a; } while (0)
+	#endif 
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, 1); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+	#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+	#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifndef OSL_SLEEP
+#define OSL_SLEEP(ms)		OSL_DELAY(ms*1000)
+#endif /* OSL_SLEEP */
+
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+/*
+ *  Wake locks are an Android power management concept. They are used by applications and services
+ *  to request CPU resources.
+ */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub)			dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)		dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)		dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_rx_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+	dhd_os_wake_lock_ctrl_timeout_cancel(pub)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub)             dhd_os_wake_lock_waive(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub)           dhd_os_wake_lock_restore(pub)
+
+#define DHD_OS_WD_WAKE_LOCK(pub)		dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub)		dhd_os_wd_wake_unlock(pub)
+#define DHD_PACKET_TIMEOUT_MS	500
+#define DHD_EVENT_TIMEOUT_MS	1500
+
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+extern struct mutex _dhd_sdio_mutex_lock_;
+#endif
+#endif /* MULTIPLE_SUPPLICANT */
+
+typedef enum dhd_attach_states
+{
+	DHD_ATTACH_STATE_INIT = 0x0,
+	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+	DHD_ATTACH_STATE_ADD_IF = 0x4,
+	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+	DHD_ATTACH_STATE_CFG80211 = 0x80,
+	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+	DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID 	-1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID	-2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface  */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+#if 0 && (NDISVER >= 0x0600)
+#define dhd_os_open_image(a) wl_os_open_image(a)
+#define dhd_os_close_image(a) wl_os_close_image(a)
+#define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c)
+#endif /* (NDISVER >= 0x0600)  */
+
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void * dhd_os_open_image(char * filename);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+#ifdef DHDTCPACK_SUPPRESS
+extern void dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub);
+#endif /* DHDTCPACK_SUPPRESS */
+
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
+extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_get_instance(dhd_pub_t *pub);
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_UNICAST_FILTER_NUM		0
+#define DHD_BROADCAST_FILTER_NUM	1
+#define DHD_MULTICAST4_FILTER_NUM	2
+#define DHD_MULTICAST6_FILTER_NUM	3
+#define DHD_MDNS_FILTER_NUM		4
+#define DHD_ARP_FILTER_NUM		5
+extern int 	dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
+extern int net_os_enable_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+#endif /* PKT_FILTER_SUPPORT */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+#ifdef SHOW_LOGTRACE
+typedef struct {
+	int  num_fmts;
+	char **fmts;
+	char *raw_fmts;
+} dhd_event_log_t;
+#endif /* SHOW_LOGTRACE */
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_ifidx2hostidx(struct dhd_info *dhd, int ifidx);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+                         wl_event_msg_t *, void **data_ptr,  void *);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+                            int ifindex);
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval);
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+#endif /* defined(BCMSDIO) || defined(BCMPCIE) */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+/* OS spin lock API */
+extern void *dhd_os_spin_lock_init(osl_t *osh);
+extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long dhd_os_spin_lock(void *lock);
+void dhd_os_spin_unlock(void *lock, unsigned long flags);
+
+/*
+ * Manage sta objects in an interface. Interface is identified by an ifindex and
+ * sta(s) within an interfaces are managed using a MacAddress of the sta.
+ */
+struct dhd_sta;
+extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
+extern void dhd_del_sta(void *pub, int ifidx, void *ea);
+extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+
+extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
+extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set);
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#ifdef DHD_USE_IDLECOUNT
+#define DHD_IDLETIME_TICKS 5
+#else
+#define DHD_IDLETIME_TICKS 1
+#endif /* DHD_USE_IDLECOUNT */
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define DEFAULT_KEEP_ALIVE_VALUE 	55000 /* msec */
+#ifndef CUSTOM_KEEP_ALIVE_SETTING
+#define CUSTOM_KEEP_ALIVE_SETTING 	DEFAULT_KEEP_ALIVE_VALUE
+#endif /* DEFAULT_KEEP_ALIVE_VALUE */
+
+#define NULL_PKT_STR	"null_pkt"
+
+/* hooks for custom glom setting option via Makefile */
+#define DEFAULT_GLOM_VALUE 	-1
+#ifndef CUSTOM_GLOM_SETTING
+#define CUSTOM_GLOM_SETTING 	DEFAULT_GLOM_VALUE
+#endif
+#define WL_AUTO_ROAM_TRIGGER -75
+/* hooks for custom Roaming Trigger  setting via Makefile */
+#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
+#define DEFAULT_ROAM_TRIGGER_SETTING 	-1
+#ifndef CUSTOM_ROAM_TRIGGER_SETTING
+#define CUSTOM_ROAM_TRIGGER_SETTING 	DEFAULT_ROAM_TRIGGER_VALUE
+#endif
+
+/* hooks for custom Roaming Romaing  setting via Makefile */
+#define DEFAULT_ROAM_DELTA_VALUE  10 /* dBm default roam delta all band */
+#define DEFAULT_ROAM_DELTA_SETTING 	-1
+#ifndef CUSTOM_ROAM_DELTA_SETTING
+#define CUSTOM_ROAM_DELTA_SETTING 	DEFAULT_ROAM_DELTA_VALUE
+#endif
+
+/* hooks for custom PNO Event wake lock to guarantee enough time
+	for the Platform to detect Event before system suspended
+*/
+#define DEFAULT_PNO_EVENT_LOCK_xTIME 	2 	/* multiplay of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
+#define CUSTOM_PNO_EVENT_LOCK_xTIME	 DEFAULT_PNO_EVENT_LOCK_xTIME
+#endif
+/* hooks for custom dhd_dpc_prio setting option via Makefile */
+#define DEFAULT_DHP_DPC_PRIO  1
+#ifndef CUSTOM_DPC_PRIO_SETTING
+#define CUSTOM_DPC_PRIO_SETTING 	DEFAULT_DHP_DPC_PRIO
+#endif
+
+#ifndef CUSTOM_LISTEN_INTERVAL
+#define CUSTOM_LISTEN_INTERVAL 		LISTEN_INTERVAL
+#endif /* CUSTOM_LISTEN_INTERVAL */
+
+#define DEFAULT_SUSPEND_BCN_LI_DTIM		3
+#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
+#define CUSTOM_SUSPEND_BCN_LI_DTIM		DEFAULT_SUSPEND_BCN_LI_DTIM
+#endif
+
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING		MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY		0
+#ifndef WIFI_TURNOFF_DELAY
+#define WIFI_TURNOFF_DELAY		DEFAULT_WIFI_TURNOFF_DELAY
+#endif /* WIFI_TURNOFF_DELAY */
+
+#define DEFAULT_WIFI_TURNON_DELAY		200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY		DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	10 /* msec */
+#ifndef CUSTOM_DHD_WATCHDOG_MS
+#define CUSTOM_DHD_WATCHDOG_MS			DEFAULT_DHD_WATCHDOG_INTERVAL_MS
+#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
+
+#ifdef WLTDLS
+#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
+#define CUSTOM_TDLS_IDLE_MODE_SETTING  60000 /* 60sec to tear down TDLS of not active */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
+#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
+#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
+#endif
+#endif /* WLTDLS */
+
+
+#define MAX_DTIM_SKIP_BEACON_INTERVAL	100 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+#define NO_DTIM_SKIP 1
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+#define MOD_PARAM_INFOLEN	512
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock)       *lock = 0
+#define IFLOCK(lock)    while (InterlockedCompareExchange((lock), 1, 0))	\
+	NdisStallExecution(1);
+#define IFUNLOCK(lock)  InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, #capa) != NULL))
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES	8
+void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef WLTDLS
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* WLTDLS */
+/* Neighbor Discovery Offload Support */
+int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
+int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
+/* ioctl processing for nl80211 */
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path);
+void dhd_set_bus_state(void *bus, uint32 state);
+
+/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+extern const uint8 prio2fifo[];
+#endif /* PROP_TXSTATUS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+
+#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid)  do {} while (0)
+#define dhd_del_flowid(pub, ifidx, flowid)               do {} while (0)
+
+extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/** Miscellaenous DHD Spin Locks */
+
+/* Disable router 3GMAC bypass path perimeter lock */
+#define DHD_PERIM_LOCK(dhdp)              do {} while (0)
+#define DHD_PERIM_UNLOCK(dhdp)            do {} while (0)
+
+/* Enable DHD general spin lock/unlock */
+#define DHD_GENERAL_LOCK(dhdp, flags) \
+	(flags) = dhd_os_general_spin_lock(dhdp)
+#define DHD_GENERAL_UNLOCK(dhdp, flags) \
+	dhd_os_general_spin_unlock((dhdp), (flags))
+
+/* Enable DHD flowring queue spin lock/unlock */
+#define DHD_QUEUE_LOCK(lock, flags)       (flags) = dhd_os_spin_lock(lock)
+#define DHD_QUEUE_UNLOCK(lock, flags)     dhd_os_spin_unlock((lock), (flags))
+
+
+
+typedef struct wl_io_pport {
+	dhd_pub_t *dhd_pub;
+	uint ifidx;
+} wl_io_pport_t;
+
+extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub);
+#ifdef EXYNOS5433_PCIE_WAR
+extern void exynos_pcie_set_l1_exit(void);
+extern void exynos_pcie_clear_l1_exit(void);
+extern int enum_wifi;
+#endif /* EXYNOS5433_PCIE_WAR */
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 0000000000000000000000000000000000000000..6a4ef7938a91da68d6bfa600823848c691a9aacd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,321 @@
+/*
+ * BT-AMP support routines
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_bta.c 434434 2013-11-06 07:16:02Z $
+ */
+#ifndef WLBTAMP
+#error "WLBTAMP is not defined"
+#endif	/* WLBTAMP */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+	uint len = sizeof(buf);
+	wl_ioctl_t ioc;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+		return BCME_BADLEN;
+
+	if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+		return BCME_BADLEN;
+
+	len = bcm_mkiovar("HCI_cmd",
+		(char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = TRUE;
+
+	return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+	int prec;
+	struct pktq *q;
+	uint count = 0;
+
+	q = dhd_bus_txq(pub->bus);
+	if (q == NULL)
+		return;
+
+	DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+	dhd_os_sdlock_txq(pub);
+
+	/* Walk through the txq and toss all HCI ACL data packets */
+	PKTQ_PREC_ITER(q, prec) {
+		void *head_pkt = NULL;
+
+		while (pktq_ppeek(q, prec) != head_pkt) {
+			void *pkt = pktq_pdeq(q, prec);
+			int ifidx;
+
+			dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
+
+			if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+				struct ether_header *eh =
+				        (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+				if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+					struct dot11_llc_snap_header *lsh =
+					        (struct dot11_llc_snap_header *)&eh[1];
+
+					if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+					         DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+					    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+						amp_hci_ACL_data_t *ACL_data =
+						        (amp_hci_ACL_data_t *)&lsh[1];
+						uint16 handle = ltoh16(ACL_data->handle);
+
+						if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+							PKTFREE(pub->osh, pkt, TRUE);
+							count ++;
+							continue;
+						}
+					}
+				}
+			}
+
+			dhd_prot_hdrpush(pub, ifidx, pkt);
+
+			if (head_pkt == NULL)
+				head_pkt = pkt;
+			pktq_penq(q, prec, pkt);
+		}
+	}
+
+	dhd_os_sdunlock_txq(pub);
+
+	DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ *        < 0: stop, fail
+ *        > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+	int status = 0;
+
+	switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+	case HCI_Enhanced_Flush: {
+		eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+		dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+		break;
+	}
+	default:
+		break;
+	}
+
+	return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+	int status;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+		DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+		           len, cmd_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* intercept and handle the HCI cmd locally */
+	if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+		return 0;
+	else if (status < 0)
+		return status;
+
+	/* copy in HCI cmd */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(cmd, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	ETHER_SET_LOCALADDR(eh->ether_dhost);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = 0;
+
+	return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+	amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+
+	if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+		           len, data_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* copy in HCI ACL data header and HCI ACL data */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(data, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = HTON16(BTA_PROT_L2CAP);
+
+	return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+	amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+	uint16 handle = ltoh16(ACL_data->handle);
+	uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+	wl_event_msg_t event;
+	uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+	amp_hci_event_t *evt;
+	num_completed_data_blocks_evt_parms_t *parms;
+
+	uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+	/* update the event struct */
+	memset(&event, 0, sizeof(event));
+	event.version = hton16(BCM_EVENT_MSG_VERSION);
+	event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+	event.status = 0;
+	event.reason = 0;
+	event.auth_type = 0;
+	event.datalen = hton32(len);
+	event.flags = 0;
+
+	/* generate Number of Completed Blocks event */
+	evt = (amp_hci_event_t *)data;
+	evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+	evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+	parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+	htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+	parms->num_handles = 1;
+	htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+	parms->completed[0].pkts = 1;
+	parms->completed[0].blocks = 1;
+
+	dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+	amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+	ASSERT(dhdp);
+	ASSERT(evt);
+
+	switch (evt->ecode) {
+	case HCI_Command_Complete: {
+		cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+		switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+		case HCI_Read_Data_Block_Size: {
+			read_data_block_size_evt_parms_t *parms2 =
+			        (read_data_block_size_evt_parms_t *)parms->parms;
+			dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+			break;
+		}
+		}
+		break;
+	}
+
+	case HCI_Flush_Occurred: {
+		flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+		dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+		break;
+	}
+	default:
+		break;
+	}
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 0000000000000000000000000000000000000000..4067fc39b2985a1b973913d75b46fbec9390cb24
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,21 @@
+/*
+ * BT-AMP support routines
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 0000000000000000000000000000000000000000..3b44a01a56e08f5b77c0220adfb0f5c7cb78accb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,172 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_bus.h 491657 2014-07-17 06:29:40Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern void *dhd_bus_sih(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+#ifdef BCMSDIO
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+#else
+#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
+#endif
+
+#define DHD_SET_BUS_STATE_DOWN(_bus)  do { \
+	(_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+	uint32 *slot_num);
+
+#ifdef BCMPCIE
+enum {
+	DNGL_TO_HOST_BUF_IOCT,
+	DNGL_TO_HOST_DMA_SCRATCH_BUFFER,
+	DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN,
+	HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+	HOST_TO_DNGL_DMA_READINDX_BUFFER,
+	DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+	DNGL_TO_HOST_DMA_READINDX_BUFFER,
+	TOTAL_LFRAG_PACKET_CNT,
+	HTOD_MB_DATA,
+	DTOH_MB_DATA,
+	RING_BUF_ADDR,
+	H2D_DMA_WRITEINDX,
+	H2D_DMA_READINDX,
+	D2H_DMA_WRITEINDX,
+	D2H_DMA_READINDX,
+	RING_READ_PTR,
+	RING_WRITE_PTR,
+	RING_LEN_ITEMS,
+	RING_MAX_ITEM,
+	MAX_HOST_RXBUFS
+};
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
+	uint16 ringid);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint16 status,
+	uint32 resp_len);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint16 flowid);
+extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint8 flowid);
+extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, uint16 flowid);
+extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
+extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern uint8 dhd_bus_is_txmode_push(struct dhd_bus *bus);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush);
+extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
+extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
+extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_disable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus);
+extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
+extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
+extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+
+
+#endif /* BCMPCIE */
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 0000000000000000000000000000000000000000..f67bbb378e65f2b031eb7b91cb95eb67b6b226d4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,800 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_cdc.c 472193 2014-04-23 06:27:38Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(24+DHD_SDALIGN)	/* Must be at least SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+	int err = 0;
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+	err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+	int ret;
+	int cdc_len = len + sizeof(cdc_ioctl_t);
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	do {
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+
+	return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		if (!dhd->hang_was_sent)
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, (void*) prot->buf, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+	if (action & WL_IOCTL_ACTION_SET)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+/*	The FreeBSD PKTPUSH could change the packet buf pinter
+	so we need to make it changable
+*/
+#define PKTBUF pktbuf
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+	PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(PKTBUF))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+#undef PKTBUF	/* Only defined in the above routine */
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	uint hdrlen = 0;
+#ifdef BDC
+	/* Length of BDC(+WLFC) headers pushed */
+	hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
+#endif
+	return hdrlen;
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+	uint *reorder_info_len)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+	uint8 data_offset = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	if (reorder_info_len)
+		*reorder_info_len = 0;
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if (!ifidx) {
+		/* for tx packet, skip the analysis */
+		data_offset = h->dataOffset;
+		PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+		goto exit;
+	}
+
+	if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+		           __FUNCTION__, *ifidx));
+		return BCME_ERROR;
+	}
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+			h->dataOffset = 0;
+		else
+		return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	data_offset = h->dataOffset;
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+#if defined(NDISVER) && (NDISVER < 0x0630)
+	if (PKTLEN(dhd->osh, pktbuf) < (uint32) (data_offset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), (data_offset * 4)));
+		return BCME_ERROR;
+	}
+#endif /* (NDISVER < 0x0630) */
+
+#ifdef PROP_TXSTATUS
+	if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
+		/*
+		- parse txstatus only for packets that came from the firmware
+		*/
+		dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
+			reorder_buf_info, reorder_info_len);
+
+	}
+#endif /* PROP_TXSTATUS */
+
+exit:
+	PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+	return 0;
+}
+
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+	if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+	if (cdc != NULL)
+		DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_deinit(dhd);
+#endif
+	DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	/*  copy bus stats */
+
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int
+dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+
+	dhd_process_cid_mac(dhd, TRUE);
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	if (!ret)
+		dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+done:
+	return ret;
+}
+
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+	return TRUE;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+/* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+	uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+	void *plast = NULL, *p;
+	uint32 pkt_cnt = 0;
+
+	if (ptr->pend_pkts == 0) {
+		DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+		*pplast = NULL;
+		*pkt_count = 0;
+		*pkt = NULL;
+		return;
+	}
+	do {
+		p = (void *)(ptr->p[start]);
+		ptr->p[start] = NULL;
+
+		if (p != NULL) {
+			if (plast == NULL)
+				*pkt = p;
+			else
+				PKTSETNEXT(osh, plast, p);
+
+			plast = p;
+			pkt_cnt++;
+		}
+		start++;
+		if (start > ptr->max_idx)
+			start = 0;
+	} while (start != end);
+	*pplast = plast;
+	*pkt_count = pkt_cnt;
+	ptr->pend_pkts -= (uint8)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count)
+{
+	uint8 flow_id, max_idx, cur_idx, exp_idx;
+	struct reorder_info *ptr;
+	uint8 flags;
+	void *cur_pkt, *plast = NULL;
+	uint32 cnt = 0;
+
+	if (pkt == NULL) {
+		if (pkt_count != NULL)
+			*pkt_count = 0;
+		return 0;
+	}
+
+	flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+	flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+	DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+		reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+		*pkt_count = 1;
+		return 0;
+	}
+
+	cur_pkt = *pkt;
+	*pkt = NULL;
+
+	ptr = dhd->reorder_bufs[flow_id];
+	if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+		uint32 buf_size = sizeof(struct reorder_info);
+
+		DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+			__FUNCTION__, flow_id));
+
+		if (ptr == NULL) {
+			DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n",
+				__FUNCTION__, flow_id));
+			*pkt_count = 1;
+			*pkt = cur_pkt;
+			return 0;
+		}
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+			ptr->exp_idx, ptr->exp_idx);
+		/* set it to the last packet */
+		if (plast) {
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+			cnt++;
+		}
+		else {
+			if (cnt != 0) {
+				DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+					__FUNCTION__, cnt));
+			}
+			*pkt = cur_pkt;
+			cnt = 1;
+		}
+		buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+		MFREE(dhd->osh, ptr, buf_size);
+		dhd->reorder_bufs[flow_id] = NULL;
+		*pkt_count = cnt;
+		return 0;
+	}
+	/* all the other cases depend on the existance of the reorder struct for that flow id */
+	if (ptr == NULL) {
+		uint32 buf_size_alloc = sizeof(reorder_info_t);
+		max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+		buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+		/* allocate space to hold the buffers, index etc */
+
+		DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+			__FUNCTION__, buf_size_alloc, flow_id, max_idx));
+		ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+		if (ptr == NULL) {
+			DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+			*pkt_count = 1;
+			return 0;
+		}
+		bzero(ptr, buf_size_alloc);
+		dhd->reorder_bufs[flow_id] = ptr;
+		ptr->p = (void *)(ptr+1);
+		ptr->max_idx = max_idx;
+	}
+	if (flags & WLHOST_REORDERDATA_NEW_HOLE)  {
+		DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+		if (ptr->pend_pkts) {
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, ptr->exp_idx);
+			ptr->pend_pkts = 0;
+		}
+		ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+		ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+		ptr->p[ptr->cur_idx] = cur_pkt;
+		ptr->pend_pkts++;
+		*pkt_count = cnt;
+	}
+	else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+		cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+		if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+			ptr->cur_idx = cur_idx;
+			DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+				__FUNCTION__, ptr->pend_pkts));
+			*pkt_count = 0;
+			*pkt = NULL;
+		}
+		else if (ptr->exp_idx == cur_idx) {
+			/* got the right one ..flush from cur to exp and update exp */
+			DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+				__FUNCTION__, cur_idx));
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: Error buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+
+			ptr->cur_idx = cur_idx;
+			ptr->exp_idx = exp_idx;
+
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				cur_idx, exp_idx);
+			*pkt_count = cnt;
+			DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+				__FUNCTION__, cnt, ptr->pend_pkts));
+		}
+		else {
+			uint8 end_idx;
+			bool flush_current = FALSE;
+			/* both cur and exp are moved now .. */
+			DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+				__FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+				ptr->exp_idx, exp_idx));
+			if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+				end_idx = ptr->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, end_idx);
+
+			if (cur_idx == ptr->max_idx) {
+				if (exp_idx == 0)
+					flush_current = TRUE;
+			} else {
+				if (exp_idx == cur_idx + 1)
+					flush_current = TRUE;
+			}
+			if (flush_current) {
+				if (plast)
+					PKTSETNEXT(dhd->osh, plast, cur_pkt);
+				else
+					*pkt = cur_pkt;
+				cnt++;
+			}
+			else {
+				ptr->p[cur_idx] = cur_pkt;
+				ptr->pend_pkts++;
+			}
+			ptr->exp_idx = exp_idx;
+			ptr->cur_idx = cur_idx;
+			*pkt_count = cnt;
+		}
+	}
+	else {
+		uint8 end_idx;
+		/* no real packet but update to exp_seq...that means explicit window move */
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+		DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+			__FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+		if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+			end_idx =  ptr->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+		if (plast)
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+		else
+			*pkt = cur_pkt;
+		cnt++;
+		*pkt_count = cnt;
+		/* set the new expected idx */
+		ptr->exp_idx = exp_idx;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
new file mode 100644
index 0000000000000000000000000000000000000000..e5c28823305e2a1cb3492f34c5576d1abcae4ea9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -0,0 +1,201 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/vmalloc.h>
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
+extern struct bcm_cfg80211 *g_bcm_cfg;
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <brcm_nl80211.h>
+#include <dhd_cfg80211.h>
+
+static s32 wl_dongle_up(struct net_device *ndev);
+static s32 wl_dongle_down(struct net_device *ndev);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (!dhd_dongle_up) {
+		WL_ERR(("Dongle is already down\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wl_dongle_down(ndev);
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode |= val;
+	WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, false);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
+	WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is disabled, enable arpoe back for STA mode. */
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, true);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx)
+{
+	return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE);
+}
+
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_register_if(cfg->pub, ifidx, FALSE);
+}
+
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_remove_if(cfg->pub, ifidx, FALSE);
+}
+
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
+{
+	if (ndev) {
+		if (ndev->ieee80211_ptr) {
+			kfree(ndev->ieee80211_ptr);
+			ndev->ieee80211_ptr = NULL;
+		}
+		free_netdev(ndev);
+		return NULL;
+	}
+
+	return ndev;
+}
+
+void dhd_netdev_free(struct net_device *ndev)
+{
+#ifdef WL_CFG80211
+	ndev = dhd_cfg80211_netdev_free(ndev);
+#endif
+	if (ndev)
+		free_netdev(ndev);
+}
+
+static s32
+wl_dongle_up(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 up = 0;
+
+	err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_UP error (%d)\n", err));
+	}
+	return err;
+}
+
+static s32
+wl_dongle_down(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 down = 0;
+
+	err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_DOWN error (%d)\n", err));
+	}
+	return err;
+}
+
+
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (dhd_dongle_up) {
+		WL_ERR(("Dongle is already up\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	err = wl_dongle_up(ndev);
+	if (unlikely(err)) {
+		WL_ERR(("wl_dongle_up failed\n"));
+		goto default_conf_out;
+	}
+	dhd_dongle_up = true;
+
+default_conf_out:
+
+	return err;
+
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
new file mode 100644
index 0000000000000000000000000000000000000000..7e1999d98094617676c8a247c5249af19596d19c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -0,0 +1,30 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+#ifndef WL_ERR
+#define WL_ERR CFG80211_ERR
+#endif
+#ifndef WL_TRACE
+#define WL_TRACE CFG80211_TRACE
+#endif
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c
new file mode 100644
index 0000000000000000000000000000000000000000..7cba554f8a06f9b4e907d8eb77b2df40971d3f7a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c
@@ -0,0 +1,151 @@
+/*
+ * Linux cfg80211 vendor command/event handlers of DHD
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_cfg_vendor.c 487126 2014-06-24 23:06:12Z $
+ */
+
+#include <linuxver.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <bcmutils.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgvendor.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <brcm_nl80211.h>
+
+#ifdef VENDOR_EXT_SUPPORT
+static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	const struct bcm_nlmsg_hdr *nlioc = data;
+	struct net_device *ndev = NULL;
+	struct bcm_cfg80211 *cfg;
+	struct sk_buff *reply;
+	void *buf = NULL, *cur;
+	dhd_pub_t *dhd;
+	dhd_ioctl_t ioc = { 0 };
+	int ret = 0, ret_len, payload, msglen;
+	int maxmsglen = PAGE_SIZE - 0x100;
+	int8 index;
+
+	WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+	DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd));
+
+	cfg = wiphy_priv(wiphy);
+	dhd = cfg->pub;
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->hang_was_sent) {
+		WL_ERR(("HANG was sent up earlier\n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	len -= sizeof(struct bcm_nlmsg_hdr);
+	ret_len = nlioc->len;
+	if (ret_len > 0 || len > 0) {
+		if (len > DHD_IOCTL_MAXLEN) {
+			WL_ERR(("oversize input buffer %d\n", len));
+			len = DHD_IOCTL_MAXLEN;
+		}
+		if (ret_len > DHD_IOCTL_MAXLEN) {
+			WL_ERR(("oversize return buffer %d\n", ret_len));
+			ret_len = DHD_IOCTL_MAXLEN;
+		}
+		payload = max(ret_len, len) + 1;
+		buf = vzalloc(payload);
+		if (!buf) {
+			DHD_OS_WAKE_UNLOCK(dhd);
+			return -ENOMEM;
+		}
+		memcpy(buf, (void *)nlioc + nlioc->offset, len);
+		*(char *)(buf + len) = '\0';
+	}
+
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+	index = dhd_net2idx(dhd->info, ndev);
+	if (index == DHD_BAD_IF) {
+		WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+		ret = BCME_ERROR;
+		goto done;
+	}
+
+	ioc.cmd = nlioc->cmd;
+	ioc.len = nlioc->len;
+	ioc.set = nlioc->set;
+	ioc.driver = nlioc->magic;
+	ret = dhd_ioctl_process(dhd, index, &ioc, buf);
+	if (ret) {
+		WL_TRACE(("dhd_ioctl_process return err %d\n", ret));
+		ret = OSL_ERROR(ret);
+		goto done;
+	}
+
+	cur = buf;
+	while (ret_len > 0) {
+		msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+		ret_len -= msglen;
+		payload = msglen + sizeof(msglen);
+		reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+		if (!reply) {
+			WL_ERR(("Failed to allocate reply msg\n"));
+			ret = -ENOMEM;
+			break;
+		}
+
+		if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) ||
+			nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) {
+			kfree_skb(reply);
+			ret = -ENOBUFS;
+			break;
+		}
+
+		ret = cfg80211_vendor_cmd_reply(reply);
+		if (ret) {
+			WL_ERR(("testmode reply failed:%d\n", ret));
+			break;
+		}
+		cur += msglen;
+	}
+
+done:
+	vfree(buf);
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return ret;
+}
+
+const struct wiphy_vendor_command dhd_cfgvendor_cmds [] = {
+	{
+		{
+			.vendor_id = OUI_BRCM,
+			.subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = dhd_cfgvendor_priv_string_handler
+	},
+};
+
+int cfgvendor_attach(struct wiphy *wiphy)
+{
+	wiphy->vendor_commands	= dhd_cfgvendor_cmds;
+	wiphy->n_vendor_commands = ARRAY_SIZE(dhd_cfgvendor_cmds);
+
+	return 0;
+}
+
+int cfgvendor_detach(struct wiphy *wiphy)
+{
+	wiphy->vendor_commands  = NULL;
+	wiphy->n_vendor_commands = 0;
+
+	return 0;
+}
+#endif /* VENDOR_EXT_SUPPORT */
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..136ea975ffd5da73fd66d6115260ae56a05d0727
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2841 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_common.c 490628 2014-07-11 07:13:31Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+#include <dhd_ip.h>
+#include <proto/bcmevent.h>
+
+#ifdef SHOW_LOGTRACE
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef WLBTAMP
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_linux.h>
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE)
+static int check_event_log_sequence_number(uint32 seq_no);
+#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#if !defined(AP) && defined(WLP2P)
+extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
+#endif
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_DEBUG)
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+	DHD_COMPILED " on " __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
+#endif 
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+#ifdef WLBTAMP
+	IOV_HCI_CMD,		/* HCI command */
+	IOV_HCI_ACL_DATA,	/* HCI data packet */
+#endif
+#if defined(DHD_DEBUG)
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+	IOV_PROPTXSTATUS_ENABLE,
+	IOV_PROPTXSTATUS_MODE,
+	IOV_PROPTXSTATUS_OPT,
+#ifdef QMONITOR
+	IOV_QMON_TIME_THRES,
+	IOV_QMON_TIME_PERCENT,
+#endif /* QMONITOR */
+	IOV_PROPTXSTATUS_MODULE_IGNORE,
+	IOV_PROPTXSTATUS_CREDIT_IGNORE,
+	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+	IOV_PROPTXSTATUS_RXPKT_CHK,
+#endif /* PROP_TXSTATUS */
+	IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+	IOV_WLPKTDLYSTAT_SZ,
+#endif
+	IOV_CHANGEMTU,
+	IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+	IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	IOV_WMF_BSS_ENAB,
+	IOV_WMF_UCAST_IGMP,
+	IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+	IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+	IOV_AP_ISOLATE,
+#ifdef DHD_UNICAST_DHCP
+	IOV_DHCP_UNICAST,
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	IOV_BLOCK_PING,
+#endif
+	IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version",	IOV_VERSION,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+	{"cons",	IOV_CONS,	0,	IOVT_BUFFER,	0 },
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	IOVT_UINT32,	0 },
+#endif
+	{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	IOVT_UINT32,	0 },
+#ifdef WLBTAMP
+	{"HCI_cmd",	IOV_HCI_CMD,	0,	IOVT_BUFFER,	0},
+	{"HCI_ACL_data", IOV_HCI_ACL_DATA, 0,	IOVT_BUFFER,	0},
+#endif
+#ifdef PROP_TXSTATUS
+	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	IOVT_BOOL,	0 },
+	/*
+	set the proptxtstatus operation mode:
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	IOVT_UINT32,	0 },
+	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	IOVT_UINT32,	0 },
+#ifdef QMONITOR
+	{"qtime_thres",	IOV_QMON_TIME_THRES,	0,	IOVT_UINT32,	0 },
+	{"qtime_percent", IOV_QMON_TIME_PERCENT, 0,	IOVT_UINT32,	0 },
+#endif /* QMONITOR */
+	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 },
+	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 },
+	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 },
+	{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 },
+#endif /* PROP_TXSTATUS */
+	{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+	{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+	{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
+	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	IOVT_UINT8,	0 },
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	{"wmf_bss_enable", IOV_WMF_BSS_ENAB,	0,	IOVT_BOOL,	0 },
+	{"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP,	0,	IOVT_BOOL,	0 },
+	{"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP,	0,	IOVT_BOOL,	0 },
+#ifdef WL_IGMP_UCQUERY
+	{"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	{"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+	{"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UNICAST_DHCP */
+	{"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0},
+#ifdef DHD_L2_FILTER
+	{"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0},
+#endif
+	{NULL, 0, 0, 0, 0 }
+};
+
+#define DHD_IOVAR_BUF_SIZE	128
+
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu  tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
+	            dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
+{
+	wl_ioctl_t ioc;
+
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
+}
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
+{
+	int ret = BCME_ERROR;
+
+	if (dhd_os_proto_block(dhd_pub))
+	{
+#if defined(WL_WLC_SHIM)
+		wl_info_t *wl = dhd_pub_wlinfo(dhd_pub);
+
+		wl_io_pport_t io_pport;
+		io_pport.dhd_pub = dhd_pub;
+		io_pport.ifidx = ifidx;
+
+		ret = wl_shim_ioctl(wl->shim, ioc, &io_pport);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s: wl_shim_ioctl(%d) ERR %d\n", __FUNCTION__, ioc->cmd, ret));
+		}
+#else
+		ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+#endif /* defined(WL_WLC_SHIM) */
+
+		if (ret && dhd_pub->up) {
+			/* Send hang event only if dhd_open() was success */
+			dhd_os_check_hang(dhd_pub, ifidx, ret);
+		}
+
+		if (ret == -ETIMEDOUT && !dhd_pub->up) {
+			DHD_ERROR(("%s: 'resumed on timeout' error is "
+				"occurred before the interface does not"
+				" bring up\n", __FUNCTION__));
+			dhd_pub->busstate = DHD_BUS_DOWN;
+		}
+
+		dhd_os_proto_unblock(dhd_pub);
+
+	}
+
+	return ret;
+}
+
+uint wl_get_port_num(wl_io_pport_t *io_pport)
+{
+	return 0;
+}
+
+/* Get bssidx from iovar params
+ * Input:   dhd_pub - pointer to dhd_pub_t
+ *	    params  - IOVAR params
+ * Output:  idx	    - BSS index
+ *	    val	    - ponter to the IOVAR arguments
+ */
+static int
+dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val)
+{
+	char *prefix = "bsscfg:";
+	uint32	bssidx;
+
+	if (!(strncmp(params, prefix, strlen(prefix)))) {
+		/* per bss setting should be prefixed with 'bsscfg:' */
+		char *p = (char *)params + strlen(prefix);
+
+		/* Skip Name */
+		while (*p != '\0')
+			p++;
+		/* consider null */
+		p = p + 1;
+		bcopy(p, &bssidx, sizeof(uint32));
+		/* Get corresponding dhd index */
+		bssidx = dhd_bssidx2idx(dhd_pub, bssidx);
+
+		if (bssidx >= DHD_MAX_IFS) {
+			DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+
+		/* skip bss idx */
+		p += sizeof(uint32);
+		*val = p;
+		*idx = bssidx;
+	} else {
+		DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		bcm_strncpy_s((char*)arg, len, dhd_version, len);
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+#ifdef WL_CFG80211
+		/* Enable DHD and WL logs in oneshot */
+		if (int_val & DHD_WL_VAL2)
+			wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
+		else if (int_val & DHD_WL_VAL)
+			wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
+		if (!(int_val & DHD_WL_VAL2))
+#endif /* WL_CFG80211 */
+		dhd_msg_level = int_val;
+		break;
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif /* DHD_DEBUG */
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->tx_dropped = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+		/* clear proptxstatus related counters */
+		dhd_wlfc_clear_counts(dhd_pub);
+#endif /* PROP_TXSTATUS */
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+#ifdef WLBTAMP
+	case IOV_SVAL(IOV_HCI_CMD): {
+		amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
+
+		/* sanity check: command preamble present */
+		if (len < HCI_CMD_PREAMBLE_SIZE)
+			return BCME_BUFTOOSHORT;
+
+		/* sanity check: command parameters are present */
+		if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
+			return BCME_BUFTOOSHORT;
+
+		dhd_bta_docmd(dhd_pub, cmd, len);
+		break;
+	}
+
+	case IOV_SVAL(IOV_HCI_ACL_DATA): {
+		amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
+
+		/* sanity check: HCI header present */
+		if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
+			return BCME_BUFTOOSHORT;
+
+		/* sanity check: ACL data is present */
+		if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
+			return BCME_BUFTOOSHORT;
+
+		dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
+		break;
+	}
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		int_val = wlfc_enab ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+
+		/* wlfc is already set as desired */
+		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+			goto exit;
+
+		if (int_val == TRUE)
+			bcmerror = dhd_wlfc_init(dhd_pub);
+		else
+			bcmerror = dhd_wlfc_deinit(dhd_pub);
+
+		break;
+	}
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+		dhd_wlfc_set_mode(dhd_pub, int_val);
+		break;
+#ifdef QMONITOR
+	case IOV_GVAL(IOV_QMON_TIME_THRES): {
+		int_val = dhd_qmon_thres(dhd_pub, FALSE, 0);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+
+	case IOV_SVAL(IOV_QMON_TIME_THRES): {
+		dhd_qmon_thres(dhd_pub, TRUE, int_val);
+		break;
+	}
+
+	case IOV_GVAL(IOV_QMON_TIME_PERCENT): {
+		int_val = dhd_qmon_getpercent(dhd_pub);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+#endif /* QMONITOR */
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
+		break;
+
+#endif /* PROP_TXSTATUS */
+
+	case IOV_GVAL(IOV_BUS_TYPE):
+		/* The dhd application queries the driver to check if its usb or sdio.  */
+#ifdef BCMDHDUSB
+		int_val = BUS_TYPE_USB;
+#endif
+#ifdef BCMSDIO
+		int_val = BUS_TYPE_SDIO;
+#endif
+#ifdef PCIE_FULL_DONGLE
+		int_val = BUS_TYPE_PCIE;
+#endif
+		bcopy(&int_val, arg, val_size);
+		break;
+
+
+#ifdef WLMEDIA_HTSF
+	case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+		int_val = dhd_pub->htsfdlystat_sz;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+		dhd_pub->htsfdlystat_sz = int_val & 0xff;
+		printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+		break;
+#endif
+	case IOV_SVAL(IOV_CHANGEMTU):
+		int_val &= 0xffff;
+		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+	{
+		uint i = 0;
+		uint8 *ptr = (uint8 *)arg;
+		uint8 count = 0;
+
+		ptr++;
+		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+			if (dhd_pub->reorder_bufs[i] != NULL) {
+				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
+				ptr++;
+				count++;
+			}
+		}
+		ptr = (uint8 *)arg;
+		*ptr = count;
+		break;
+	}
+#ifdef DHDTCPACK_SUPPRESS
+	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+		int_val = (uint32)dhd_pub->tcpack_sup_mode;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+		break;
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		int_val = wmf->wmf_enable ? 1 :0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+		/* Enable/Disable WMF */
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		if (wmf->wmf_enable == int_val)
+			break;
+		if (int_val) {
+			/* Enable WMF */
+			if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Error in creating WMF instance\n",
+				__FUNCTION__));
+				break;
+			}
+			if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+				break;
+			}
+			wmf->wmf_enable = TRUE;
+		} else {
+			/* Disable WMF */
+			wmf->wmf_enable = FALSE;
+			dhd_wmf_stop(dhd_pub, bssidx);
+			dhd_wmf_instance_del(dhd_pub, bssidx);
+		}
+		break;
+	}
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+		int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+		if (dhd_pub->wmf_ucast_igmp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+	case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+		break;
+
+#ifdef WL_IGMP_UCQUERY
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		if (dhd_pub->wmf_ucast_igmp_query == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp_query = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+		int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+		if (dhd_pub->wmf_ucast_upnp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_upnp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+	case IOV_GVAL(IOV_DHCP_UNICAST):
+		int_val = dhd_pub->dhcp_unicast;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_DHCP_UNICAST):
+		if (dhd_pub->dhcp_unicast == int_val)
+			break;
+
+		if (int_val >= OFF || int_val <= ON) {
+			dhd_pub->dhcp_unicast = int_val;
+		} else {
+			bcmerror = BCME_RANGE;
+		}
+		break;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	case IOV_GVAL(IOV_BLOCK_PING):
+		int_val = dhd_pub->block_ping;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_BLOCK_PING):
+		if (dhd_pub->block_ping == int_val)
+			break;
+		if (int_val >= OFF || int_val <= ON) {
+			dhd_pub->block_ping = int_val;
+		} else {
+			bcmerror = BCME_RANGE;
+		}
+		break;
+#endif
+
+	case IOV_GVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec || eprec < 0)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		ASSERT(p);
+#ifdef DHDTCPACK_SUPPRESS
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	ASSERT(p);
+
+	return TRUE;
+}
+
+/*
+ * Functions to drop proper pkts from queue:
+ *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
+ *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
+ *	If can't find pkts matching upper 2 cases, drop first pkt anyway
+ */
+bool
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
+{
+	struct pktq_prec *q = NULL;
+	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
+	pkt_frag_t frag_info;
+
+	ASSERT(dhdp && pq);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	if (p == NULL)
+		return FALSE;
+
+	while (p) {
+		frag_info = pkt_frag_info(dhdp->osh, p);
+		if (frag_info == DHD_PKT_FRAG_NONE) {
+			break;
+		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
+			if (first) {
+				/* No last frag pkt, use prev as last */
+				last = prev;
+				break;
+			} else {
+				first = p;
+				prev_first = prev;
+			}
+		} else if (frag_info == DHD_PKT_FRAG_LAST) {
+			if (first) {
+				last = p;
+				break;
+			}
+		}
+
+		prev = p;
+		p = PKTLINK(p);
+	}
+
+	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
+		/* Not found matching pkts, use oldest */
+		prev = NULL;
+		p = q->head;
+		frag_info = 0;
+	}
+
+	if (frag_info == DHD_PKT_FRAG_NONE) {
+		first = last = p;
+		prev_first = prev;
+	}
+
+	p = first;
+	while (p) {
+		next = PKTLINK(p);
+		q->len--;
+		pq->len--;
+
+		PKTSETLINK(p, NULL);
+
+		if (fn)
+			fn(dhdp, prec, p, TRUE);
+
+		if (p == last)
+			break;
+
+		p = next;
+	}
+
+	if (prev_first == NULL) {
+		if ((q->head = next) == NULL)
+			q->tail = NULL;
+	} else {
+		PKTSETLINK(prev_first, next);
+		if (!next)
+			q->tail = prev_first;
+	}
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+		name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+	int bcmerror = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) {
+		return BCME_BADARG;
+	}
+
+	switch (ioc->cmd) {
+	case DHD_GET_MAGIC:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_MAGIC;
+		break;
+
+	case DHD_GET_VERSION:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_VERSION;
+		break;
+
+	case DHD_GET_VAR:
+	case DHD_SET_VAR: {
+		char *arg;
+		uint arglen;
+
+		/* scan past the name to any arguments */
+		for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+			;
+
+		if (*arg) {
+			bcmerror = BCME_BUFTOOSHORT;
+			break;
+		}
+
+		/* account for the NUL terminator */
+		arg++, arglen--;
+
+		/* call with the appropriate arguments */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+			buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* not in generic table, try protocol module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+				arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* if still not found, try bus module */
+		if (ioc->cmd == DHD_GET_VAR) {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				arg, arglen, buf, buflen, IOV_GET);
+		} else {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		}
+
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+#ifdef SHOW_LOGTRACE
+
+#define AVOID_BYTE 64
+#define MAX_NO_OF_ARG 16
+
+static int
+check_event_log_sequence_number(uint32 seq_no)
+{
+	int32 diff;
+	uint32 ret;
+	static uint32 logtrace_seqnum_prev = 0;
+
+	diff = ntoh32(seq_no)-logtrace_seqnum_prev;
+	switch (diff)
+	{
+		case 0:
+			ret = -1; /* duplicate packet . drop */
+			break;
+
+		case 1:
+			ret =0; /* in order */
+			break;
+
+		default:
+			if ((ntoh32(seq_no) == 0) &&
+				(logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */
+					ret = 0;
+			} else {
+
+				if (diff > 0) {
+					DHD_EVENT(("WLC_E_TRACE:"
+						"Event lost (log) seqnum %d nblost %d\n",
+						ntoh32(seq_no), (diff-1)));
+				} else {
+					DHD_EVENT(("WLC_E_TRACE:"
+						"Event Packets coming out of order!!\n"));
+				}
+				ret = 0;
+			}
+	}
+
+	logtrace_seqnum_prev = ntoh32(seq_no);
+
+	return ret;
+}
+#endif /* SHOW_LOGTRACE */
+
+static void
+wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
+	void *raw_event_ptr, char *eventmask)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	const char *auth_str;
+	const char *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	uint event_type, flags, auth_type, datalen;
+
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	BCM_REFERENCE(reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+
+	/* debug dump of event messages */
+	snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = bcmevent_get_name(event_type);
+	BCM_REFERENCE(event_name);
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+		if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+		BCM_REFERENCE(auth_str);
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+				event_name, (int)status));
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		BCM_REFERENCE(link);
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		BCM_REFERENCE(group);
+		BCM_REFERENCE(flush_txq);
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+	case WLC_E_ASSOC_REQ_IE:
+	case WLC_E_ASSOC_RESP_IE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+	case WLC_E_PFN_SCAN_NONE:
+	case WLC_E_PFN_SCAN_ALLGONE:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+		break;
+
+#ifdef WIFI_ACT_FRAME
+	case WLC_E_ACTION_FRAME:
+		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+		break;
+#endif /* WIFI_ACT_FRAME */
+
+#ifdef SHOW_LOGTRACE
+	case WLC_E_TRACE:
+	{
+		msgtrace_hdr_t hdr;
+		uint32 nblost;
+		uint8 count;
+		char *s, *p;
+		static uint32 seqnum_prev = 0;
+		uint32 *record = NULL;
+		uint32 *log_ptr =  NULL;
+		uint32 writeindex = 0;
+		event_log_hdr_t event_hdr;
+		int no_of_fmts = 0;
+		char *fmt = NULL;
+		dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr;
+
+		buf = (uchar *) event_data;
+		memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+		if (hdr.version != MSGTRACE_VERSION) {
+			DHD_EVENT(("\nMACEVENT: %s [unsupported version --> "
+				"dhd version:%d dongle version:%d]\n",
+				event_name, MSGTRACE_VERSION, hdr.version));
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+			break;
+		}
+
+		if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) {
+			/* There are 2 bytes available at the end of data */
+			buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+			if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+				DHD_EVENT(("WLC_E_TRACE: [Discarded traces in dongle -->"
+					"discarded_bytes %d discarded_printf %d]\n",
+					ntoh32(hdr.discarded_bytes),
+					ntoh32(hdr.discarded_printf)));
+			}
+
+			nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+			if (nblost > 0) {
+				DHD_EVENT(("WLC_E_TRACE:"
+					"[Event lost (msg) --> seqnum %d nblost %d\n",
+					ntoh32(hdr.seqnum), nblost));
+			}
+			seqnum_prev = ntoh32(hdr.seqnum);
+
+			/* Display the trace buffer. Advance from
+			 * \n to \n to avoid display big
+			 * printf (issue with Linux printf )
+			 */
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+			while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
+				*s = '\0';
+				DHD_EVENT(("%s\n", p));
+				p = s+1;
+			}
+			if (*p)
+				DHD_EVENT(("%s", p));
+
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+
+		} else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
+			/* Let the standard event printing work for now */
+			uint32 timestamp, w, malloc_len;
+
+			if (check_event_log_sequence_number(hdr.seqnum)) {
+
+				DHD_EVENT(("%s: WLC_E_TRACE:"
+					"[Event duplicate (log) %d] dropping!!\n",
+					__FUNCTION__, hdr.seqnum));
+				return; /* drop duplicate events */
+			}
+
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+			datalen -= MSGTRACE_HDRLEN;
+			w = ntoh32((uint32)*p);
+			p += 4;
+			datalen -= 4;
+			timestamp = ntoh32((uint32)*p);
+			BCM_REFERENCE(timestamp);
+			BCM_REFERENCE(w);
+
+			DHD_EVENT(("timestamp %x%x\n", timestamp, w));
+
+			if (raw_event->fmts) {
+				malloc_len = datalen+ AVOID_BYTE;
+				record = (uint32 *)MALLOC(dhd_pub->osh, malloc_len);
+				if (record == NULL) {
+					DHD_EVENT(("MSGTRACE_HDR_TYPE_LOG:"
+						"malloc failed\n"));
+					return;
+				}
+				log_ptr = (uint32 *) (p + datalen);
+				writeindex = datalen/4;
+
+				if (record) {
+					while (datalen > 4) {
+						log_ptr--;
+						datalen -= 4;
+						event_hdr.t = *log_ptr;
+						/*
+						 * Check for partially overriten entries
+						 */
+						if (log_ptr - (uint32 *) p < event_hdr.count) {
+								break;
+						}
+						/*
+						* Check for end of the Frame.
+						*/
+						if (event_hdr.tag ==  EVENT_LOG_TAG_NULL) {
+							continue;
+						}
+						/*
+						* Check For Special Time Stamp Packet
+						*/
+						if (event_hdr.tag == EVENT_LOG_TAG_TS) {
+							datalen -= 12;
+							log_ptr = log_ptr - 3;
+							continue;
+						}
+
+						log_ptr[0] = event_hdr.t;
+						if (event_hdr.count > MAX_NO_OF_ARG) {
+							break;
+						}
+						/* Now place the header at the front
+						* and copy back.
+						*/
+						log_ptr -= event_hdr.count;
+
+						writeindex = writeindex - event_hdr.count;
+						record[writeindex++] = event_hdr.t;
+						for (count = 0; count < (event_hdr.count-1);
+							count++) {
+							record[writeindex++] = log_ptr[count];
+						}
+						writeindex = writeindex - event_hdr.count;
+						datalen = datalen - (event_hdr.count * 4);
+						no_of_fmts++;
+					}
+				}
+
+				while (no_of_fmts--)
+				{
+					event_log_hdr_t event_hdr;
+					event_hdr.t = record[writeindex];
+
+					if ((event_hdr.fmt_num>>2) < raw_event->num_fmts) {
+						fmt = raw_event->fmts[event_hdr.fmt_num>>2];
+						DHD_EVENT((fmt,
+							record[writeindex + 1],
+							record[writeindex + 2],
+							record[writeindex + 3],
+							record[writeindex + 4],
+							record[writeindex + 5],
+							record[writeindex + 6],
+							record[writeindex + 7],
+							record[writeindex + 8],
+							record[writeindex + 9],
+							record[writeindex + 10],
+							record[writeindex + 11],
+							record[writeindex + 12],
+							record[writeindex + 13],
+							record[writeindex + 14],
+							record[writeindex + 15],
+							record[writeindex + 16]));
+
+						if (fmt[strlen(fmt) - 1] != '\n') {
+							/* Add newline if missing */
+							DHD_EVENT(("\n"));
+						}
+					}
+
+					writeindex = writeindex + event_hdr.count;
+				}
+
+				if (record) {
+					MFREE(dhd_pub->osh, record, malloc_len);
+					record = NULL;
+				}
+			} else {
+				while (datalen > 4) {
+					p += 4;
+					datalen -= 4;
+					/* Print each word.  DO NOT ntoh it.  */
+					DHD_EVENT((" %8.8x", *((uint32 *) p)));
+				}
+				DHD_EVENT(("\n"));
+			}
+			datalen = 0;
+		}
+		break;
+	}
+#endif /* SHOW_LOGTRACE */
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	case WLC_E_SERVICE_FOUND:
+	case WLC_E_P2PO_ADD_DEVICE:
+	case WLC_E_P2PO_DEL_DEVICE:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+#ifdef BT_WIFI_HANDOBER
+	case WLC_E_BT_WIFI_HANDOVER_REQ:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+#endif
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data */
+	if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) {
+		buf = (uchar *) event_data;
+		BCM_REFERENCE(buf);
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++)
+			DHD_EVENT((" 0x%02x ", *buf++));
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+	wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+	/* check whether packet is a BRCM event pkt */
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	uint8 *event_data;
+	uint32 type, status, datalen;
+	uint16 flags;
+	int evlen;
+	int hostidx;
+
+	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+	if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+		DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	*data_ptr = &pvt_data[1];
+	event_data = *data_ptr;
+
+
+	/* memcpy since BRCM event pkt may be unaligned. */
+	memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	datalen = ntoh32_ua((void *)&event->datalen);
+	evlen = datalen + sizeof(bcm_event_t);
+
+	/* find equivalent host index for event ifidx */
+	hostidx = dhd_ifidx2hostidx(dhd_pub->info, event->ifidx);
+
+	switch (type) {
+#ifdef PROP_TXSTATUS
+	case WLC_E_FIFO_CREDIT_MAP:
+		dhd_wlfc_enable(dhd_pub);
+		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
+		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+			event_data[2],
+			event_data[3], event_data[4], event_data[5]));
+		break;
+
+	case WLC_E_BCMC_CREDIT_SUPPORT:
+		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+		break;
+#endif
+
+	case WLC_E_IF:
+		{
+		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+
+		/* Ignore the event if NOIF is set */
+		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
+			return (BCME_UNSUPPORTED);
+		}
+#ifdef PCIE_FULL_DONGLE
+		dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
+			ifevent->opcode, ifevent->role);
+#endif
+#ifdef PROP_TXSTATUS
+		{
+			uint8* ea = pvt_data->eth.ether_dhost;
+			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+			              "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+			              ifevent->ifidx,
+			              ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+			              ((ifevent->role == 0) ? "STA":"AP "),
+			              ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+			(void)ea;
+
+			if (ifevent->opcode == WLC_E_IF_CHANGE)
+				dhd_wlfc_interface_event(dhd_pub,
+					eWLFC_MAC_ENTRY_ACTION_UPDATE,
+					ifevent->ifidx, ifevent->role, ea);
+			else
+				dhd_wlfc_interface_event(dhd_pub,
+					((ifevent->opcode == WLC_E_IF_ADD) ?
+					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+					ifevent->ifidx, ifevent->role, ea);
+
+			/* dhd already has created an interface by default, for 0 */
+			if (ifevent->ifidx == 0)
+				break;
+		}
+#endif /* PROP_TXSTATUS */
+
+		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+			if (ifevent->opcode == WLC_E_IF_ADD) {
+				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet)) {
+
+					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
+						__FUNCTION__, ifevent->ifidx, event->ifname));
+					return (BCME_ERROR);
+				}
+			} else if (ifevent->opcode == WLC_E_IF_DEL) {
+				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet);
+			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
+#ifdef WL_CFG80211
+				wl_cfg80211_notify_ifchange(ifevent->ifidx,
+					event->ifname, event->addr.octet, ifevent->bssidx);
+#endif /* WL_CFG80211 */
+			}
+		} else {
+#if !defined(PROP_TXSTATUS) || !defined(PCIE_FULL_DONGLE)
+			DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+			           __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+		}
+			/* send up the if event: btamp user needs it */
+			*ifidx = hostidx;
+			/* push up to external supp/auth */
+			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		break;
+	}
+
+#ifdef WLMEDIA_HTSF
+	case WLC_E_HTSFSYNC:
+		htsf_update(dhd_pub->info, event_data);
+		break;
+#endif /* WLMEDIA_HTSF */
+#if defined(NDISVER) && (NDISVER >= 0x0630)
+	case WLC_E_NDIS_LINK:
+		break;
+#else
+	case WLC_E_NDIS_LINK: {
+		uint32 temp = hton32(WLC_E_LINK);
+
+		memcpy((void *)(&pvt_data->event.event_type), &temp,
+		       sizeof(pvt_data->event.event_type));
+		break;
+	}
+#endif /* NDISVER >= 0x0630 */
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+		break;
+#if defined(PNO_SUPPORT)
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+	case WLC_E_PFN_BEST_BATCHING:
+		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif 
+		/* These are what external supplicant/authenticator wants */
+	case WLC_E_ASSOC_IND:
+	case WLC_E_AUTH_IND:
+	case WLC_E_REASSOC_IND:
+		dhd_findadd_sta(dhd_pub, hostidx, &event->addr.octet);
+		break;
+	case WLC_E_LINK:
+#ifdef PCIE_FULL_DONGLE
+		if (dhd_update_interface_link_status(dhd_pub, (uint8)hostidx,
+			(uint8)flags) != BCME_OK)
+			break;
+		if (!flags) {
+			dhd_flow_rings_delete(dhd_pub, hostidx);
+		}
+		/* fall through */
+#endif
+	case WLC_E_DEAUTH:
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC:
+	case WLC_E_DISASSOC_IND:
+		if (type != WLC_E_LINK) {
+			dhd_del_sta(dhd_pub, hostidx, &event->addr.octet);
+		}
+		DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+#ifdef PCIE_FULL_DONGLE
+		if (type != WLC_E_LINK) {
+			uint8 ifindex = (uint8)hostidx;
+			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+			if (role == WLC_E_IF_ROLE_STA) {
+				dhd_flow_rings_delete(dhd_pub, ifindex);
+			} else {
+				dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+					&event->addr.octet[0]);
+			}
+		}
+#endif
+		/* fall through */
+	default:
+		*ifidx = hostidx;
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+		BCM_REFERENCE(flags);
+		BCM_REFERENCE(status);
+
+		break;
+	}
+
+#ifdef SHOW_EVENTS
+	wl_show_host_event(dhd_pub, event,
+		(void *)event_data, raw_event, dhd_pub->enable_log);
+#endif /* SHOW_EVENTS */
+
+	return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	 * byte order. Convert all members to host-order.
+	 */
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+#endif /* DHD_DEBUG */
+}
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[32] = {0};
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+	/* Contorl the master mode */
+	bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[8], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Offset not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Bitmask not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter mask. */
+	mask_size =
+		htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Pattern not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter pattern. */
+	pattern_size =
+		htod32(wl_pattern_atoh(argv[i],
+	         (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		goto fail;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp,
+	       &pkt_filter,
+	       WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
+{
+	char iovbuf[32];
+	int ret;
+
+	bcm_mkiovar("pkt_filter_delete", (char *)&id, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	if (ret < 0) {
+		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
+			__FUNCTION__, id, ret));
+	}
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+			__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+			__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+			__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+			__FUNCTION__, arp_enable));
+	if (arp_enable) {
+		uint32 version;
+		bcm_mkiovar("arp_version", 0, 0, iovbuf, sizeof(iovbuf));
+		retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+		if (retcode) {
+			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
+				__FUNCTION__, retcode));
+			dhd->arp_version = 1;
+		}
+		else {
+			memcpy(&version, iovbuf, sizeof(version));
+			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
+			dhd->arp_version = version;
+		}
+	}
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+	iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr,
+		sizeof(ipaddr), iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+		__FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
+{
+	int retcode, i;
+	int iov_len;
+	uint32 *ptr32 = buf;
+	bool clr_bottom = FALSE;
+
+	if (!buf)
+		return -1;
+	if (dhd == NULL) return -1;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+	BCM_REFERENCE(iov_len);
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx);
+
+	if (retcode) {
+		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, retcode));
+
+		return -1;
+	}
+
+	/* clean up the buf, ascii reminder */
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (!clr_bottom) {
+			if (*ptr32 == 0)
+				clr_bottom = TRUE;
+		} else {
+			*ptr32 = 0;
+		}
+		ptr32++;
+	}
+
+	return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT  */
+
+/*
+ * Neighbor Discovery Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up/goes down
+ */
+int
+dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iov_len;
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+	if (retcode)
+		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
+			__FUNCTION__, ndo_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
+			__FUNCTION__, ndo_enable));
+
+	return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up
+ */
+int
+dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
+		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry added \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface goes down
+ */
+int
+dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
+		0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+	case WLC_E_BTA_HCI_EVENT:
+		break;
+#endif /* WLBTAMP */
+	default:
+		break;
+	}
+
+	/* Call per-port handler. */
+	dhd_sendup_event(dhdp, event, data);
+}
+
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval)
+{
+	char bssid[6], zbuf[6];
+	int ret = -1;
+
+	bzero(bssid, 6);
+	bzero(zbuf, 6);
+
+	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+	if (ret == BCME_NOTASSOCIATED) {
+		DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+	}
+
+	if (retval)
+		*retval = ret;
+
+	if (ret < 0)
+		return FALSE;
+
+	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+		/*  STA is assocoated BSSID is non zero */
+
+		if (bss_buf) {
+			/* return bss if caller provided buf */
+			memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+		}
+		return TRUE;
+	} else {
+		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+		return FALSE;
+	}
+}
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+	int ret = -1;
+	int dtim_period = 0;
+	int ap_beacon = 0;
+	int allowed_skip_dtim_cnt = 0;
+	/* Check if associated */
+	if (dhd_is_associated(dhd, NULL, NULL) == FALSE) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated AP beacon interval */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated ap's dtim setup */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if not assocated just eixt */
+	if (dtim_period == 0) {
+		goto exit;
+	}
+
+	/* attemp to use platform defined dtim skip interval */
+	bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+	/* check if sta listen interval fits into AP dtim */
+	if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
+		/* AP DTIM to big for our Listen Interval : no dtim skiping */
+		bcn_li_dtim = NO_DTIM_SKIP;
+		DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+			__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
+		goto exit;
+	}
+
+	if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+		 allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+		 bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+	}
+
+	if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
+		/* Round up dtim_skip to fit into STAs Listen Interval */
+		bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
+		DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+	}
+
+	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+exit:
+	return bcn_li_dtim;
+}
+
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
+{
+
+#ifdef  WL_CFG80211
+	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+		return FALSE;
+	else
+#endif /* WL_CFG80211 */
+		return TRUE;
+}
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+	char				buf[32] = {0};
+	const char			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0};
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res					= -1;
+
+	if (!dhd_support_sta_mode(dhd))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
+			strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 0000000000000000000000000000000000000000..633d3a7f71c869d143377cc57bed9e80484dcedc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,286 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* $Copyright Open Broadcom Corporation$
+*
+* $Id: dhd_custom_gpio.c 447105 2014-01-08 05:27:09Z $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#if defined(CUSTOMER_HW2)
+
+#if defined(PLATFORM_MPS)
+int __attribute__ ((weak)) wifi_get_fw_nv_path(char *fw, char *nv) { return 0;};
+#endif
+
+#endif 
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+#if defined(CUSTOMER_HW3) || defined(PLATFORM_MPS)
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ *  1) return :  Host gpio interrupt number per customer platform
+ *  2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ *  NOTE :
+ *  Customer should check his platform definitions
+ *  and his Host Interrupt spec
+ *  to figure out the proper setting for his platform.
+ *  Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#if defined(CUSTOMER_HW2) && !defined(PLATFORM_MPS)
+	host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+		__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW3 || defined(PLATFORM_MPS)
+	gpio_request(dhd_oob_gpio_num, "oob irq");
+	host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+	gpio_direction_input(dhd_oob_gpio_num);
+#endif /* defined CUSTOMER_HW3 || defined(PLATFORM_MPS) */
+#endif 
+
+	return (host_oob_irq);
+}
+#endif 
+
+/* Customer function to control hw specific wlan gpios */
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
+{
+	int err = 0;
+
+	return err;
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10)) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(2, 6, 35))
+	ret = wifi_platform_get_mac_addr(adapter, buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XY", 4},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+	{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+	{"EU", "EU", 5},  /* European union countries to : EU regrev 05 */
+	{"AT", "EU", 5},
+	{"BE", "EU", 5},
+	{"BG", "EU", 5},
+	{"CY", "EU", 5},
+	{"CZ", "EU", 5},
+	{"DK", "EU", 5},
+	{"EE", "EU", 5},
+	{"FI", "EU", 5},
+	{"FR", "EU", 5},
+	{"DE", "EU", 5},
+	{"GR", "EU", 5},
+	{"HU", "EU", 5},
+	{"IE", "EU", 5},
+	{"IT", "EU", 5},
+	{"LV", "EU", 5},
+	{"LI", "EU", 5},
+	{"LT", "EU", 5},
+	{"LU", "EU", 5},
+	{"MT", "EU", 5},
+	{"NL", "EU", 5},
+	{"PL", "EU", 5},
+	{"PT", "EU", 5},
+	{"RO", "EU", 5},
+	{"SK", "EU", 5},
+	{"SI", "EU", 5},
+	{"ES", "EU", 5},
+	{"SE", "EU", 5},
+	{"GB", "EU", 5},
+	{"KR", "XY", 3},
+	{"AU", "XY", 3},
+	{"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+	{"TW", "XY", 3},
+	{"AR", "XY", 3},
+	{"MX", "XY", 3},
+	{"IL", "IL", 0},
+	{"CH", "CH", 0},
+	{"TR", "TR", 0},
+	{"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+#if defined(CUSTOMER_HW2)
+#if defined(BCM4335_CHIP)
+	{"",   "XZ", 11},  /* Universal if Country code is unknown or empty */
+#endif
+	{"AE", "AE", 1},
+	{"AR", "AR", 1},
+	{"AT", "AT", 1},
+	{"AU", "AU", 2},
+	{"BE", "BE", 1},
+	{"BG", "BG", 1},
+	{"BN", "BN", 1},
+	{"CA", "CA", 2},
+	{"CH", "CH", 1},
+	{"CY", "CY", 1},
+	{"CZ", "CZ", 1},
+	{"DE", "DE", 3},
+	{"DK", "DK", 1},
+	{"EE", "EE", 1},
+	{"ES", "ES", 1},
+	{"FI", "FI", 1},
+	{"FR", "FR", 1},
+	{"GB", "GB", 1},
+	{"GR", "GR", 1},
+	{"HR", "HR", 1},
+	{"HU", "HU", 1},
+	{"IE", "IE", 1},
+	{"IS", "IS", 1},
+	{"IT", "IT", 1},
+	{"ID", "ID", 1},
+	{"JP", "JP", 8},
+	{"KR", "KR", 24},
+	{"KW", "KW", 1},
+	{"LI", "LI", 1},
+	{"LT", "LT", 1},
+	{"LU", "LU", 1},
+	{"LV", "LV", 1},
+	{"MA", "MA", 1},
+	{"MT", "MT", 1},
+	{"MX", "MX", 1},
+	{"NL", "NL", 1},
+	{"NO", "NO", 1},
+	{"PL", "PL", 1},
+	{"PT", "PT", 1},
+	{"PY", "PY", 1},
+	{"RO", "RO", 1},
+	{"SE", "SE", 1},
+	{"SI", "SI", 1},
+	{"SK", "SK", 1},
+	{"TR", "TR", 7},
+	{"TW", "TW", 1},
+	{"IR", "XZ", 11},	/* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+	{"SD", "XZ", 11},	/* Universal if Country code is SUDAN */
+	{"SY", "XZ", 11},	/* Universal if Country code is SYRIAN ARAB REPUBLIC */
+	{"GL", "XZ", 11},	/* Universal if Country code is GREENLAND */
+	{"PS", "XZ", 11},	/* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+	{"TL", "XZ", 11},	/* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+	{"MH", "XZ", 11},	/* Universal if Country code is MARSHALL ISLANDS */
+#ifdef BCM4330_CHIP
+	{"RU", "RU", 1},
+	{"US", "US", 5}
+#endif
+#endif /* CUSTOMER_HW2 */
+};
+
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+	struct cntry_locales_custom *cloc_ptr;
+
+	if (!cspec)
+		return;
+
+	cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code);
+	if (cloc_ptr) {
+		strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+		cspec->rev = cloc_ptr->custom_locale_rev;
+	}
+	return;
+#else
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		 return;
+
+	if (size == 0)
+		 return;
+
+	for (i = 0; i < size; i++) {
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode,
+				translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			return;
+		}
+	}
+#ifdef EXAMPLE_TABLE
+	/* if no country code matched return first universal code from translate_custom_table */
+	memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+	cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+	return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 0000000000000000000000000000000000000000..1b0025740f7090a92482b5d42b991bc210333781
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,107 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: dhd_dbg.h 491225 2014-07-15 11:58:29Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#define USE_NET_RATELIMIT		1
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args)		do {if ((dhd_msg_level & DHD_ERROR_VAL) && USE_NET_RATELIMIT) \
+								printf args;} while (0)
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args)		do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args)		do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args)	do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args)		do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+#define DHD_INFO_HW4	DHD_INFO
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON()		(dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON()		(dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON()	(dhd_msg_level & DHD_REORDER_VAL)
+#define DHD_NOCHECKDIED_ON()	(dhd_msg_level & DHD_NOCHECKDIED_VAL)
+#define DHD_PNO_ON()		(dhd_msg_level & DHD_PNO_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args)		do {if (USE_NET_RATELIMIT) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+#define DHD_PNO(args)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+#define DHD_INFO_HW4	DHD_INFO
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_BTA_ON()		0
+#define DHD_ISCAN_ON()		0
+#define DHD_ARPOE_ON()		0
+#define DHD_REORDER_ON()	0
+#define DHD_NOCHECKDIED_ON()	0
+#define DHD_PNO_ON()		0
+
+#endif 
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/net/wireless/bcmdhd/dhd_ip.c
new file mode 100644
index 0000000000000000000000000000000000000000..deef74455b625e2c20912aded8606184d7bb68dd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.c
@@ -0,0 +1,942 @@
+/*
+ * IP Packet Parser Module.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_ip.c 468932 2014-04-09 06:58:15Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/802.3.h>
+#include <proto/bcmip.h>
+#include <bcmendian.h>
+
+#include <dhd_dbg.h>
+
+#include <dhd_ip.h>
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <proto/bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+/* special values */
+/* 802.3 llc/snap header */
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+pkt_frag_t pkt_frag_info(osl_t *osh, void *p)
+{
+	uint8 *frame;
+	int length;
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int ipl;			/* IP frame length */
+	uint16 iph_frag;
+
+	ASSERT(osh && p);
+
+	frame = PKTDATA(osh, p);
+	length = PKTLEN(osh, p);
+
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+		return DHD_PKT_FRAG_NONE;
+	} else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+	} else {
+		DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	ethertype = ntoh16(*(uint16 *)pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if (pt + ETHER_TYPE_LEN > frame + length) {
+			DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+			return DHD_PKT_FRAG_NONE;
+		}
+
+		ethertype = ntoh16(*(uint16 *)pt);
+	}
+
+	if (ethertype != ETHER_TYPE_IP) {
+		DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+			__FUNCTION__, ethertype, length));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+	ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+	/* We support IPv4 only */
+	if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
+		DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph_frag = ntoh16(iph->frag);
+
+	if (iph_frag & IPV4_FRAG_DONT) {
+		return DHD_PKT_FRAG_NONE;
+	} else if ((iph_frag & IPV4_FRAG_MORE) == 0) {
+		return DHD_PKT_FRAG_LAST;
+	} else {
+		return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
+	}
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+	void *pkt_in_q;			/* TCP ACK packet that is already in txq or DelayQ */
+	void *pkt_ether_hdr;	/* Ethernet header pointer of pkt_in_q */
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+	uint32 end_seq;			/* end seq# of a received TCP PSH DATA pkt */
+	struct _tdata_psh_info_t *next;	/* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+	uint8 src_ip_addr[IPV4_ADDR_LEN];	/* SRC ip addrs of this TCP stream */
+	uint8 dst_ip_addr[IPV4_ADDR_LEN];	/* DST ip addrs of this TCP stream */
+	uint8 src_tcp_port[TCP_PORT_LEN];	/* SRC tcp ports of this TCP stream */
+	uint8 dst_tcp_port[TCP_PORT_LEN];	/* DST tcp ports of this TCP stream */
+	tdata_psh_info_t *tdata_psh_info_head;	/* Head of received TCP PSH DATA chain */
+	tdata_psh_info_t *tdata_psh_info_tail;	/* Tail of received TCP PSH DATA chain */
+	uint32 last_used_time;	/* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+	int tcpack_info_cnt;
+	tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM];	/* Info of TCP ACK to send */
+	int tcpdata_info_cnt;
+	tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM];	/* Info of received TCP DATA */
+	tdata_psh_info_t *tdata_psh_info_pool;	/* Pointer to tdata_psh_info elements pool */
+	tdata_psh_info_t *tdata_psh_info_free;	/* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+	int psh_info_enq_num;	/* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+	tdata_psh_info_t *tdata_psh_info)
+{
+	if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+		DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod, tdata_psh_info));
+		return;
+	}
+
+	ASSERT(tdata_psh_info->next == NULL);
+	tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+	tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info = NULL;
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod));
+		return NULL;
+	}
+
+	tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+	if (tdata_psh_info == NULL)
+		DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+	else {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+		tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+	}
+
+	return tdata_psh_info;
+}
+
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info_pool = NULL;
+	uint i;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL)
+		return BCME_ERROR;
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+	tdata_psh_info_pool =
+		MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+	if (tdata_psh_info_pool == NULL)
+		return BCME_NOMEM;
+	bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+	/* Enqueue newly allocated tcpdata psh info elements to the pool */
+	for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+		_tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+	tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+	return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	uint i;
+	tdata_psh_info_t *tdata_psh_info;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+			__FUNCTION__, __LINE__));
+		return;
+	}
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		/* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+		while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+		}
+		tcpdata_info->tdata_psh_info_tail = NULL;
+	}
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	i = 0;
+	/* Be sure we recollected all tdata_psh_info elements */
+	while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+		i++;
+	}
+	ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+	MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+		sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+	tcpack_sup_mod->tdata_psh_info_pool = NULL;
+
+	return;
+}
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+	int ret = BCME_OK;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (dhdp->tcpack_sup_mode == mode) {
+		DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+		goto exit;
+	}
+
+	if (mode >= TCPACK_SUP_LAST_MODE ||
+#ifndef BCMSDIO
+		mode == TCPACK_SUP_DELAYTX ||
+#endif
+		FALSE) {
+		DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode));
+		ret = BCME_BADARG;
+		goto exit;
+	}
+
+	DHD_TRACE(("%s: %d -> %d\n",
+		__FUNCTION__, dhdp->tcpack_sup_mode, mode));
+
+	/* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) {
+		tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+		/* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */
+		_tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod);
+		tcpack_sup_mod->tcpdata_info_cnt = 0;
+		bzero(tcpack_sup_mod->tcpdata_info_tbl,
+			sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+		/* For half duplex bus interface, tx precedes rx by default */
+		if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+	}
+
+	dhdp->tcpack_sup_mode = mode;
+
+	if (mode == TCPACK_SUP_OFF) {
+		ASSERT(dhdp->tcpack_sup_module != NULL);
+		MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = NULL;
+		goto exit;
+	}
+
+	if (dhdp->tcpack_sup_module == NULL) {
+		tcpack_sup_module_t *tcpack_sup_mod =
+			MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+		if (tcpack_sup_mod == NULL) {
+			DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__));
+			dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+		bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = tcpack_sup_mod;
+	}
+
+	if (mode == TCPACK_SUP_DELAYTX) {
+		ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module);
+		if (ret != BCME_OK)
+			DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret));
+		else if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+	}
+
+exit:
+	dhd_os_tcpackunlock(dhdp);
+	return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+	tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+			__FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	tcpack_sup_mod->tcpack_info_cnt = 0;
+	bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 i;
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int tbl_cnt;
+	int ret = BCME_OK;
+	void *pdata;
+	uint32 pktlen;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	pdata = PKTDATA(dhdp->osh, pkt);
+	pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata);
+
+	if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, pktlen));
+		goto exit;
+	}
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+	for (i = 0; i < tbl_cnt; i++) {
+		if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+			DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+				__FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+			/* This pkt is being transmitted so remove the tcp_ack_info of it. */
+			if (i < tbl_cnt - 1) {
+				bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+					&tcpack_info_tbl[i], sizeof(tcpack_info_t));
+			}
+			bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+			if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+				DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+					__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+				ret = BCME_ERROR;
+			}
+			break;
+		}
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+	uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+	tcpack_sup_module_t *tcpack_sup_mod;
+	int i;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info = NULL;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+		" TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_ack_num));
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tcpdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tcpdata_info_tmp->dst_tcp_port)));
+
+		/* If either IP address or TCP port number does not match, skip. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+			tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+			tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) {
+			tcpdata_info = tcpdata_info_tmp;
+			break;
+		}
+	}
+
+	if (tcpdata_info == NULL) {
+		DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	if (tcpdata_info->tdata_psh_info_head == NULL) {
+		DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+	}
+
+	while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+		if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+			DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+				__FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+			ret = TRUE;
+		} else
+			break;
+	}
+	if (tdata_psh_info == NULL)
+		tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+	return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *new_ether_hdr;	/* Ethernet header of the new packet */
+	uint16 new_ether_type;	/* Ethernet type of the new packet */
+	uint8 *new_ip_hdr;		/* IP header of the new packet */
+	uint8 *new_tcp_hdr;		/* TCP header of the new packet */
+	uint32 new_ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint32 new_tcp_ack_num;		/* TCP acknowledge number of the new packet */
+	uint16 new_ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 new_tcp_hdr_len;		/* TCP header length of the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int i;
+	bool ret = FALSE;
+	bool set_dotxinrx = TRUE;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, cur_framelen));
+		goto exit;
+	}
+
+	new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+	if (new_ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, new_ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+	new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+	if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+		goto exit;
+	}
+
+	new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+	cur_framelen -= new_ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	/* is it an ack ? Allow only ACK flag, not to suppress others. */
+	if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+		DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+			__FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+		goto exit;
+	}
+
+	new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+	new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet has TCP data, so just send */
+	if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+	new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+	DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+	/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+	dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	counter_printlog(&tack_tbl);
+	tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+		/* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else
+		set_dotxinrx = FALSE;
+
+	for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+		void *oldpkt;	/* TCPACK packet that is already in txq or DelayQ */
+		uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+		uint32 old_ip_hdr_len, old_tcp_hdr_len;
+		uint32 old_tcpack_num;	/* TCP ACK number of old TCPACK packet in Q */
+
+		if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+			DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+			DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+		old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+		old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+		old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+		old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+		DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		/* If either of IP address or TCP port number does not match, skip. */
+		if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+			&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+			memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+			&old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+			continue;
+
+		old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+		if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+			/* New packet has higher TCP ACK number, so it replaces the old packet */
+			if (new_ip_hdr_len == old_ip_hdr_len &&
+				new_tcp_hdr_len == old_tcp_hdr_len) {
+				ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+				bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+					__FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				ret = TRUE;
+			} else {
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[6]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d"
+					" ACK %u -> %u\n", __FUNCTION__, __LINE__,
+					new_ip_hdr_len, old_ip_hdr_len,
+					new_tcp_hdr_len, old_tcp_hdr_len,
+					old_tcpack_num, new_tcp_ack_num));
+			}
+		} else if (new_tcp_ack_num == old_tcpack_num) {
+			set_dotxinrx = TRUE;
+			/* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+			tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+		} else {
+			DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+				__FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+				new_tcp_ack_num, pkt));
+		}
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+		/* No TCPACK packet with the same IP addr and TCP port is found
+		 * in tcp_ack_info_tbl. So add this packet to the table.
+		 */
+		DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+			__FUNCTION__, __LINE__, pkt, new_ether_hdr,
+			tcpack_sup_mod->tcpack_info_cnt));
+
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+		tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else {
+		ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+		DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+			__FUNCTION__, __LINE__));
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	/* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+		dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+	return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *ether_hdr;	/* Ethernet header of the new packet */
+	uint16 ether_type;	/* Ethernet type of the new packet */
+	uint8 *ip_hdr;		/* IP header of the new packet */
+	uint8 *tcp_hdr;		/* TCP header of the new packet */
+	uint32 ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint16 ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 tcp_hdr_len;		/* TCP header length of the new packet */
+	uint32 tcp_seq_num;		/* TCP sequence number of the new packet */
+	uint16 tcp_data_len;	/* TCP DATA length that excludes IP and TCP headers */
+	uint32 end_tcp_seq_num;	/* TCP seq number of the last byte in the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info;
+
+	int i;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+	if (ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+	ip_hdr = ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	ip_hdr_len = IPV4_HLEN(ip_hdr);
+	if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+		goto exit;
+	}
+
+	tcp_hdr = ip_hdr + ip_hdr_len;
+	cur_framelen -= ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+	tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet is mere TCP ACK, so do nothing */
+	if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+	if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+		DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_hdr[TCP_FLAGS_OFFSET]));
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	/* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+	i = 0;
+	while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+		tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		uint32 now_in_ms = OSL_SYSUPTIME();
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tdata_info_tmp->dst_tcp_port)));
+
+		/* If both IP address and TCP port number match, we found it so break. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) {
+			tcpdata_info = tdata_info_tmp;
+			tcpdata_info->last_used_time = now_in_ms;
+			break;
+		}
+
+		if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+			tdata_psh_info_t *tdata_psh_info_tmp;
+			tcpdata_info_t *last_tdata_info;
+
+			while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+				tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+				tdata_psh_info_tmp->next = NULL;
+				DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+					__FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+				_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+			}
+#ifdef DHDTCPACK_SUP_DBG
+			DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+				__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+			tcpack_sup_mod->tcpdata_info_cnt--;
+			ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+			last_tdata_info =
+				&tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+			if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+				ASSERT(last_tdata_info != tdata_info_tmp);
+				bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+			}
+			bzero(last_tdata_info, sizeof(tcpdata_info_t));
+			DHD_ERROR(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+			/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+		} else
+			 i++;
+	}
+
+	tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+	tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+	end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+	if (tcpdata_info == NULL) {
+		ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+		if (i >= TCPDATA_INFO_MAXNUM) {
+			DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+				" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+				ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+				ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+			dhd_os_tcpackunlock(dhdp);
+			goto exit;
+		}
+		tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+		/* No TCP flow with the same IP addr and TCP port is found
+		 * in tcp_data_info_tbl. So add this flow to the table.
+		 */
+		DHD_ERROR(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n",
+			__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr,
+			IPV4_ADDR_LEN * 2);
+		bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port,
+			TCP_PORT_LEN * 2);
+
+		tcpdata_info->last_used_time = OSL_SYSUPTIME();
+		tcpack_sup_mod->tcpdata_info_cnt++;
+	}
+
+	ASSERT(tcpdata_info != NULL);
+
+	tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	if (tdata_psh_info == NULL) {
+		DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+		__FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+	ASSERT(tdata_psh_info->next == NULL);
+
+	if (tcpdata_info->tdata_psh_info_head == NULL)
+		tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+	else {
+		ASSERT(tcpdata_info->tdata_psh_info_tail);
+		tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+	}
+	tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+#endif /* DHDTCPACK_SUPPRESS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/net/wireless/bcmdhd/dhd_ip.h
new file mode 100644
index 0000000000000000000000000000000000000000..414a94fbcb122cfe59fa649e4c5ee2be73c22bc8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.h
@@ -0,0 +1,54 @@
+/*
+ * Header file describing the common ip parser function.
+ *
+ * Provides type definitions and function prototypes used to parse ip packet.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_ip.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _dhd_ip_h_
+#define _dhd_ip_h_
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+typedef enum pkt_frag
+{
+	DHD_PKT_FRAG_NONE = 0,
+	DHD_PKT_FRAG_FIRST,
+	DHD_PKT_FRAG_CONT,
+	DHD_PKT_FRAG_LAST
+} pkt_frag_t;
+
+extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+
+#ifdef DHDTCPACK_SUPPRESS
+#define	TCPACKSZMIN	(ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define	TCPACKSZMAX	(TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#define TCPACK_INFO_MAXNUM 4
+#define TCPDATA_INFO_MAXNUM 4
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000	/* Remove tcpdata_info if inactive for this time (in ms) */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+
+/* #define DHDTCPACK_SUP_DBG */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#endif /* _dhd_ip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 0000000000000000000000000000000000000000..1d1c276ba71c7ffc8274703c95cfca800381a58f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,9226 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux.c 491481 2014-07-16 14:08:43Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <net/addrconf.h>
+#ifdef ENABLE_ADAPTIVE_SCHED
+#include <linux/cpufreq.h>
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <proto/bcmevent.h>
+#include <proto/vlan.h>
+#include <proto/bcmudp.h>
+#include <proto/bcmdhcp.h>
+#ifdef DHD_L2_FILTER
+#include <proto/bcmicmp.h>
+#endif
+#include <proto/802.3.h>
+
+#include <dngl_stats.h>
+#include <dhd_linux_wq.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef WLBTAMP
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef AMPDU_VO_ENABLE
+#include <proto/802.1d.h>
+#endif /* AMPDU_VO_ENABLE */
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200    /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
+#define TSMAX  1000        /* max no. of timing record kept   */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+	uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#define MIN_TCP_WIN_SIZE 18000
+#define WIN_SIZE_SCALE_FACTOR 2
+#define MAX_TARGET_PORTS 5
+
+static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
+static uint dhd_use_tcp_window_size_adjust = FALSE;
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH		1000000	/* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH	DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+/* Maximum STA per radio */
+#define DHD_MAX_STA     32
+
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+	.notifier_call = dhd_inetaddr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef CONFIG_IPV6
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+	.notifier_call = dhd_inet6addr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#ifdef PROP_TXSTATUS
+extern bool dhd_wlfc_skip_fc(void);
+extern void dhd_wlfc_plat_init(void *dhd);
+extern void dhd_wlfc_plat_deinit(void *dhd);
+#endif /* PROP_TXSTATUS */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t  g_wl_iw_params;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
+
+#ifdef READ_MACADDR
+extern int dhd_read_macaddr(struct dhd_info *dhd);
+#else
+static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
+#endif
+#ifdef WRITE_MACADDR
+extern int dhd_write_macaddr(struct ether_addr *mac);
+#else
+static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
+#endif
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
+#endif /* SOFTAP_TPUT_ENHANCE */
+
+
+static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
+static struct notifier_block dhd_reboot_notifier = {
+		.notifier_call = dhd_reboot_callback,
+		.priority = 1,
+};
+
+
+typedef struct dhd_if_event {
+	struct list_head	list;
+	wl_event_data_if_t	event;
+	char			name[IFNAMSIZ+1];
+	uint8			mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	int				idx;			/* iface idx in dongle */
+	uint			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			set_macaddress;
+	bool			set_multicast;
+	uint8			bssidx;			/* bsscfg index for the interface */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+	struct net_device_stats stats;
+#ifdef DHD_WMF
+	dhd_wmf_t		wmf;		/* per bsscfg wmf setting */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+	struct list_head sta_list;		/* sll of associated stations */
+#if !defined(BCM_GMAC3)
+	spinlock_t	sta_list_lock;		/* lock for manipulating sll */
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+	uint32  ap_isolate;			/* ap-isolation settings */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+	uint32 low;
+	uint32 high;
+} tsf_t;
+
+typedef struct {
+	uint32 last_cycle;
+	uint32 last_sec;
+	uint32 last_tsf;
+	uint32 coef;     /* scaling factor */
+	uint32 coefdec1; /* first decimal  */
+	uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif  /* WLMEDIA_HTSF */
+
+struct ipv6_work_info_t {
+	uint8			if_idx;
+	char			ipv6_addr[16];
+	unsigned long		event;
+};
+
+/* When Perimeter locks are deployed, any blocking calls must be preceeded
+ * with a PERIM UNLOCK and followed by a PERIM LOCK.
+ * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
+ * wait_event_timeout().
+ */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+	dhd_pub_t pub;
+	dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
+
+	void *adapter;			/* adapter information, interrupt, fw path etc. */
+	char fw_path[PATH_MAX];		/* path to firmware image */
+	char nv_path[PATH_MAX];		/* path to nvram vars file */
+
+	struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+	spinlock_t	wlfc_spinlock;
+
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+	htsf_t  htsf;
+#endif
+	wait_queue_head_t ioctl_resp_wait;
+	uint32	default_wd_interval;
+
+	struct timer_list timer;
+	bool wd_timer_valid;
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	dhd_lock;
+
+	struct semaphore sdsem;
+	tsk_ctl_t	thr_dpc_ctl;
+	tsk_ctl_t	thr_wdt_ctl;
+
+	tsk_ctl_t	thr_rxf_ctl;
+	spinlock_t	rxf_lock;
+	bool		rxthread_enabled;
+
+	/* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	/* net_device interface lock, prevent race conditions among net_dev interface
+	 * calls and wifi_on or wifi_off
+	 */
+	struct mutex dhd_net_if_mutex;
+	struct mutex dhd_suspend_mutex;
+#endif
+	spinlock_t wakelock_spinlock;
+	uint32 wakelock_counter;
+	int wakelock_wd_counter;
+	int wakelock_rx_timeout_enable;
+	int wakelock_ctrl_timeout_enable;
+	bool waive_wakelock;
+	uint32 wakelock_before_waive;
+
+	/* Thread to issue ioctl for multicast */
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+	dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+	dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+	void *rpc_th;
+	void *rpc_osh;
+	struct timer_list rpcth_timer;
+	bool rpcth_timer_active;
+	bool fdaggr;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	spinlock_t	tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+	void			*dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+	struct notifier_block freq_trans;
+	int __percpu *new_freq;
+#endif
+	unsigned int unit;
+	struct notifier_block pm_notifier;
+} dhd_info_t;
+
+#define DHDIF_FWDER(dhdif)      FALSE
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+/* backup buffer for firmware and nvram path */
+char fw_bak_path[MOD_PARAM_PATHLEN];
+char nv_bak_path[MOD_PARAM_PATHLEN];
+
+/* information string to keep firmware, chio, cheip version info visiable from log */
+char info_string[MOD_PARAM_INFOLEN];
+module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
+int op_mode = 0;
+int disable_proptx = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
+struct semaphore dhd_registration_sem;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+#ifdef CONFIG_IPV6
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+#endif
+
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+
+module_param(dhd_arp_mode, uint, 0);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Disable Prop tx */
+module_param(disable_proptx, int, 0644);
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
+
+/* Watchdog interval */
+
+/* extend watchdog expiration to 2 seconds when DPC is running */
+#define WATCHDOG_EXTEND_INTERVAL (2000)
+
+uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 0);
+
+int dhd_watchdog_prio = 0;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority */
+int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+module_param(dhd_dpc_prio, int, 0);
+
+/* RX frame thread priority */
+int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
+module_param(dhd_rxf_prio, int, 0);
+
+#if !defined(BCMDHDUSB)
+extern int dhd_dongle_ramsize;
+module_param(dhd_dongle_ramsize, int, 0);
+#endif /* BCMDHDUSB */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_ALL()                do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_ALL()              do { /* noop */ } while (0)
+
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+	spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+	spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+
+/* Control fw roaming */
+#ifdef BCMCCX
+uint dhd_roam_disable = 0;
+#else
+uint dhd_roam_disable = 0;
+#endif /* BCMCCX */
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+#endif /* BCMSDIO */
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+/* Use in dongle supplicant for 4-way handshake */
+uint dhd_use_idsup = 0;
+module_param(dhd_use_idsup, uint, 0);
+#endif /* BCMSUP_4WAY_HANDSHAKE */
+
+extern char dhd_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+                             wl_event_msg_t *event_ptr, void **data_ptr);
+#ifdef DHD_UNICAST_DHCP
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+	int *len_ptr, uint8 *prot_ptr);
+static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+	int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
+
+static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+	bool suspend = FALSE;
+	dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+
+	BCM_REFERENCE(dhdinfo);
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		suspend = TRUE;
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		suspend = FALSE;
+		break;
+	}
+
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+	if (suspend) {
+		DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+		dhd_wlfc_suspend(&dhdinfo->pub);
+		DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+	} else
+		dhd_wlfc_resume(&dhdinfo->pub);
+#endif
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 39))
+	dhd_mmc_suspend = suspend;
+	smp_mb();
+#endif
+
+	return ret;
+}
+
+static struct notifier_block dhd_pm_notifier = {
+	.notifier_call = dhd_pm_callback,
+	.priority = 10
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
+
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
+
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+	dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+	dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
+	int          ifidx; /* interface index */
+} dhd_dev_priv_t;
+
+#define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = (dhd_info_t *)NULL;
+	dev_priv->ifp = (dhd_if_t *)NULL;
+	dev_priv->ifidx = DHD_BAD_IF;
+}
+
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+                  int ifidx)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = dhd;
+	dev_priv->ifp = ifp;
+	dev_priv->ifidx = ifidx;
+}
+
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+	.pub = {
+	         .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+	         .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+	         .up = FALSE, .busstate = DHD_BUS_DOWN
+	}
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL  (&dhd_info_null.pub)
+
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+	.reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+#ifdef WMF
+	.wmf = { .wmf_enable = TRUE },
+#endif
+	.info = DHD_INFO_NULL,
+	.net = DHD_NET_DEV_NULL,
+	.idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL  (&dhd_if_null)
+
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+
+/** Interface STA list management. */
+
+/** Fetch the dhd_if object, given the interface index in the dhd. */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void	dhd_if_flush_sta(dhd_if_t * ifp);
+
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+
+
+/* Return interface pointer */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+	ASSERT(ifidx < DHD_MAX_IFS);
+	return dhdp->info->iflist[ifidx];
+}
+
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+{
+	int prio;
+
+	ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+	id16_map_free(dhdp->staid_allocator, sta->idx);
+	for (prio = 0; prio < (int)NUMPRIO; prio++)
+		sta->flowid[prio] = FLOWID_INVALID;
+	sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+	sta->ifidx = DHD_BAD_IF;
+	bzero(sta->ea.octet, ETHER_ADDR_LEN);
+	INIT_LIST_HEAD(&sta->list);
+	sta->idx = ID16_INVALID; /* implying free */
+}
+
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
+{
+	uint16 idx;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+	idx = id16_map_alloc(dhdp->staid_allocator);
+	if (idx == ID16_INVALID) {
+		DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+	sta = &sta_pool[idx];
+
+	ASSERT((sta->idx == ID16_INVALID) &&
+	       (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+	sta->idx = idx; /* implying allocated */
+
+	return sta;
+}
+
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
+{
+	dhd_sta_t *sta, *next;
+	unsigned long flags;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+		if (ifp->fwdh) {
+			/* Remove sta from WOFA forwarder. */
+			fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
+		}
+#endif /* BCM_GMAC3 */
+		list_del(&sta->list);
+		dhd_sta_free(&ifp->info->pub, sta);
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
+{
+#if defined(BCM_GMAC3)
+
+	if (ifp && (ifp->fwdh != FWDER_NULL)) {
+		dhd_sta_t *sta, *next;
+		unsigned long flags;
+
+		DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+		list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+			/* Remove any sta entry from WOFA forwarder. */
+			fwder_flush(ifp->fwdh, (wofa_t)sta);
+		}
+
+		DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+	}
+#endif /* BCM_GMAC3 */
+}
+
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+	int idx, sta_pool_memsz;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+	void * staid_allocator;
+
+	ASSERT(dhdp != (dhd_pub_t *)NULL);
+	ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+	staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+	if (staid_allocator == NULL) {
+		DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Pre allocate a pool of dhd_sta objects (one extra). */
+	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+	sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+	if (sta_pool == NULL) {
+		DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+		id16_map_fini(dhdp->osh, staid_allocator);
+		return BCME_ERROR;
+	}
+
+	dhdp->sta_pool = sta_pool;
+	dhdp->staid_allocator = staid_allocator;
+
+	/* Initialize all sta(s) for the pre-allocated free pool. */
+	bzero((uchar *)sta_pool, sta_pool_memsz);
+	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+		sta = &sta_pool[idx];
+		sta->idx = id16_map_alloc(staid_allocator);
+		ASSERT(sta->idx <= max_sta);
+	}
+	/* Now place them into the pre-allocated free pool. */
+	for (idx = 1; idx <= max_sta; idx++) {
+		sta = &sta_pool[idx];
+		dhd_sta_free(dhdp, sta);
+	}
+
+	return BCME_OK;
+}
+
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+	dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+
+	if (sta_pool) {
+		int idx;
+		int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+		for (idx = 1; idx <= max_sta; idx++) {
+			ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+			ASSERT(sta_pool[idx].idx == ID16_INVALID);
+		}
+		MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+		dhdp->sta_pool = NULL;
+	}
+
+	id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+	dhdp->staid_allocator = NULL;
+}
+
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry(sta, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+			DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+			return sta;
+		}
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return DHD_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+	sta = dhd_sta_alloc((dhd_pub_t *)pub);
+	if (sta == DHD_STA_NULL) {
+		DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+
+	/* link the sta and the dhd interface */
+	sta->ifp = ifp;
+	sta->ifidx = ifidx;
+	INIT_LIST_HEAD(&sta->list);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_add_tail(&sta->list, &ifp->sta_list);
+
+#if defined(BCM_GMAC3)
+	if (ifp->fwdh) {
+		ASSERT(ISALIGNED(ea, 2));
+		/* Add sta to WOFA forwarder. */
+		fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+	}
+#endif /* BCM_GMAC3 */
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return sta;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta, *next;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+#if defined(BCM_GMAC3)
+			if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+				ASSERT(ISALIGNED(ea, 2));
+				fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+			}
+#endif /* BCM_GMAC3 */
+			list_del(&sta->list);
+			dhd_sta_free(&ifp->info->pub, sta);
+		}
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+
+	sta = dhd_find_sta(pub, ifidx, ea);
+
+	if (!sta) {
+		/* Add entry */
+		sta = dhd_add_sta(pub, ifidx, ea);
+	}
+
+	return sta;
+}
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
+
+
+/* Returns dhd iflist index correspondig the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+	dhd_if_t *ifp;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	ASSERT(bssidx < DHD_MAX_IFS);
+	ASSERT(dhdp);
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ifp = dhd->iflist[i];
+		if (ifp && (ifp->bssidx == bssidx)) {
+			DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+				ifp->name, bssidx, i));
+			break;
+		}
+	}
+	return i;
+}
+
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+
+	if (!skb) {
+		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+		return BCME_ERROR;
+	}
+
+	dhd_os_rxflock(dhdp);
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	if (dhdp->skbbuf[store_idx] != NULL) {
+		/* Make sure the previous packets are processed */
+		dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+		DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		/* removed msleep here, should use wait_event_timeout if we
+		 * want to give rx frame thread a chance to run
+		 */
+#if defined(WAIT_DEQUEUE)
+		OSL_SLEEP(1);
+#endif
+		return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+	}
+	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+	dhdp->skbbuf[store_idx] = skb;
+	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+	dhd_os_rxfunlock(dhdp);
+
+	return BCME_OK;
+}
+
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+	void *skb;
+
+	dhd_os_rxflock(dhdp);
+
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	skb = dhdp->skbbuf[sent_idx];
+
+	if (skb == NULL) {
+		dhd_os_rxfunlock(dhdp);
+		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+			store_idx, sent_idx));
+		return NULL;
+	}
+
+	dhdp->skbbuf[sent_idx] = NULL;
+	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+
+	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+		skb, sent_idx));
+
+	dhd_os_rxfunlock(dhdp);
+
+	return skb;
+}
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+#ifndef CUSTOMER_HW10
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#endif /* !CUSTOMER_HW10 */
+
+	if (prepost) { /* pre process */
+		dhd_read_macaddr(dhd);
+	} else { /* post process */
+		dhd_write_macaddr(&dhd->pub.mac);
+	}
+
+	return 0;
+}
+
+#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
+{
+	bool _apply = FALSE;
+	/* In case of IBSS mode, apply arp pkt filter */
+	if (op_mode & DHD_FLAG_IBSS_MODE) {
+		_apply = TRUE;
+		goto exit;
+	}
+	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+	if ((dhd->arp_version == 1) &&
+		(op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+		_apply = TRUE;
+		goto exit;
+	}
+
+exit:
+	return _apply;
+}
+#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+void dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+	if (dhd_pkt_filter_enable) {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable && (!value ||
+	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+	    {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+			if (value && (i == DHD_ARP_FILTER_NUM) &&
+				!_turn_on_arp_filter(dhd, dhd->op_mode)) {
+				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+					"val %d, cnt %d, op_mode 0x%x\n",
+					value, i, dhd->op_mode));
+				continue;
+			}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				value, dhd_master_mode);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+#ifndef SUPPORT_PM2_ONLY
+	int power_mode = PM_MAX;
+#endif /* SUPPORT_PM2_ONLY */
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	char iovbuf[32];
+	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+	uint roamvar = 1;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+	uint nd_ra_filter = 0;
+	int ret = 0;
+
+	if (!dhd)
+		return -ENODEV;
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+		__FUNCTION__, value, dhd->in_suspend));
+
+	dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+	DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+	/* set specific cpucore */
+	dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
+	if (dhd->up) {
+		if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 1;
+#endif
+				/* Kernel suspended */
+				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+#ifndef SUPPORT_PM2_ONLY
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+
+				/* Enable packet filter, only allow unicast packet to send up */
+				dhd_enable_packet_filter(1, dhd);
+
+#if 0
+				/* If DTIM skip is set up as default, force it to wake
+				 * each third DTIM for better power savings.  Note that
+				 * one side effect is a chance to miss BC/MC packet.
+				 */
+				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+				if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
+					TRUE, 0) < 0)
+					DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+#endif
+
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				/* Disable firmware roaming during suspend */
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#if 0
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* enable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 1;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+#endif
+			} else {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 0;
+#endif
+				/* Kernel resumed  */
+				DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+#ifndef SUPPORT_PM2_ONLY
+				power_mode = PM_FAST;
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+#ifdef PKT_FILTER_SUPPORT
+				/* disable pkt filter */
+				dhd_enable_packet_filter(0, dhd);
+#endif /* PKT_FILTER_SUPPORT */
+
+				/* restore pre-suspend setting for dtim_skip */
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				roamvar = dhd_roam_disable;
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+					sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* disable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 0;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+			}
+	}
+	dhd_suspend_unlock(dhd);
+
+	return 0;
+}
+
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	DHD_PERIM_LOCK(dhdp);
+
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if ((force || !dhdp->suspend_disable_flag) &&
+		dhd_support_sta_mode(dhdp))
+	{
+		ret = dhd_set_suspend(val, dhdp);
+	}
+
+	DHD_PERIM_UNLOCK(dhdp);
+	DHD_OS_WAKE_UNLOCK(dhdp);
+	return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1, 0);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0, 0);
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = jiffies_to_usecs(1);
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		(void)schedule_timeout(1);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	ASSERT(dhd);
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+	struct dhd_info *dhd_info;
+
+	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+		return NULL;
+	dhd_info = dhd_pub->info;
+	if (dhd_info && dhd_info->iflist[ifidx])
+		return dhd_info->iflist[ifidx]->net;
+	return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+int
+dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	while (--i > 0)
+		if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
+				break;
+
+	DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+	int i;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+	ASSERT(dhd);
+	for (i = 0; i < DHD_MAX_IFS; i++)
+	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+		return dhd->iflist[i]->mac_addr;
+
+	return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+			ASSERT(dhd && dhd->iflist[ifidx]);
+			dev = dhd->iflist[ifidx]->net;
+			if (!dev)
+				return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			cnt = netdev_mc_count(dev);
+#else
+			cnt = dev->mc_count;
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+			/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+	/* Send down the multicast list first. */
+
+
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strncpy(bufp, "mcast_list", buflen - 1);
+	bufp[buflen - 1] = '\0';
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			netdev_for_each_mc_addr(ha, dev) {
+				if (!cnt)
+					break;
+				memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+				cnt--;
+	}
+#else
+	for (mclist = dev->mc_list; (mclist && (cnt > 0));
+		cnt--, mclist = mclist->next) {
+				memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+			}
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	buflen = sizeof("allmulti") + sizeof(allmulti);
+	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+		return;
+	}
+	allmulti = htol32(allmulti);
+
+	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+		MFREE(dhd->pub.osh, buf, buflen);
+		return;
+	}
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
+{
+	char buf[32];
+	wl_ioctl_t ioc;
+	int ret;
+
+	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+		return -1;
+	}
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = 32;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+		if (ifidx == 0)
+			memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_event_t *if_event = event_info;
+	struct net_device *ndev;
+	int ifidx, bssidx;
+	int ret;
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	struct wireless_dev *vwdev, *primary_wdev;
+	struct net_device *primary_ndev;
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+	if (event != DHD_WQ_WORK_IF_ADD) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	bssidx = if_event->event.bssidx;
+	DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+	ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+		if_event->mac, bssidx, TRUE);
+	if (!ndev) {
+		DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
+		goto done;
+	}
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+	if (unlikely(!vwdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		goto done;
+	}
+	primary_ndev = dhd->pub.info->iflist[0]->net;
+	primary_wdev = ndev_to_wdev(primary_ndev);
+	vwdev->wiphy = primary_wdev->wiphy;
+	vwdev->iftype = if_event->event.role;
+	vwdev->netdev = ndev;
+	ndev->ieee80211_ptr = vwdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
+	DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+	DHD_PERIM_LOCK(&dhd->pub);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+			dhd_remove_if(&dhd->pub, ifidx, TRUE);
+	}
+#ifdef PCIE_FULL_DONGLE
+	/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+	if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+		char iovbuf[WLC_IOCTL_SMLEN];
+		uint32 var_int =  1;
+
+		memset(iovbuf, 0, sizeof(iovbuf));
+		bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
+		dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
+	}
+#endif /* PCIE_FULL_DONGLE */
+done:
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx;
+	dhd_if_event_t *if_event = event_info;
+
+
+	if (event != DHD_WQ_WORK_IF_DEL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+
+	dhd_remove_if(&dhd->pub, ifidx, TRUE);
+
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+
+	if (event != DHD_WQ_WORK_SET_MAC) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+#ifdef SOFTAP
+	{
+		unsigned long flags;
+		bool in_ap = FALSE;
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		in_ap = (ap_net_dev != NULL);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+		if (in_ap)  {
+			DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
+			           ifp->net->name));
+			goto done;
+		}
+	}
+#endif /* SOFTAP */
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+	ifp->set_macaddress = FALSE;
+	if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+		DHD_INFO(("%s: MACID is overwritten\n",	__FUNCTION__));
+	else
+		DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+	int ifidx;
+
+	if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+#ifdef SOFTAP
+	{
+		bool in_ap = FALSE;
+		unsigned long flags;
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		in_ap = (ap_net_dev != NULL);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+		if (in_ap)  {
+			DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
+			           ifp->net->name));
+			ifp->set_multicast = FALSE;
+			goto done;
+		}
+	}
+#endif /* SOFTAP */
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	ifidx = ifp->idx;
+
+
+	_dhd_set_multicast_list(dhd, ifidx);
+	DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+	dhd_if_t *dhdif;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	dhdif = dhd->iflist[ifidx];
+
+	dhd_net_if_lock_local(dhd);
+	memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+	dhdif->set_macaddress = TRUE;
+	dhd_net_if_unlock_local(dhd);
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+		dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhd->iflist[ifidx]->set_multicast = TRUE;
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+		DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	spin_lock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+	ASSERT(di != NULL);
+	spin_unlock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+#endif /* PROP_TXSTATUS */
+
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret = BCME_OK;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh = NULL;
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		/* free the packet here since the caller won't */
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+
+#ifdef PCIE_FULL_DONGLE
+	if (dhdp->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -EBUSY;
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_UNICAST_DHCP
+	/* if dhcp_unicast is enabled, we need to convert the */
+	/* broadcast DHCP ACK/REPLY packets to Unicast. */
+	if (dhdp->dhcp_unicast) {
+	    dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
+	}
+#endif /* DHD_UNICAST_DHCP */
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+			atomic_inc(&dhd->pend_8021x_cnt);
+	} else {
+			PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+			return BCME_ERROR;
+	}
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* If this packet has replaced another packet and got freed, just return */
+	if (dhd_tcpack_suppress(dhdp, pktbuf))
+		return ret;
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+	if (PKTPRIO(pktbuf) == 0)
+#endif 
+		pktsetprio(pktbuf, FALSE);
+
+
+#ifdef PCIE_FULL_DONGLE
+	/*
+	 * Lkup the per interface hash table, for a matching flowring. If one is not
+	 * available, allocate a unique flowid and add a flowring entry.
+	 * The found or newly created flowid is placed into the pktbuf's tag.
+	 */
+	ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+	if (ret != BCME_OK) {
+		PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+		return ret;
+	}
+#endif
+
+#ifdef PROP_TXSTATUS
+	if (dhd_wlfc_is_supported(dhdp)) {
+		/* store the interface ID */
+		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+		/* store destination MAC in the tag as well */
+		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+		/* decide which FIFO this packet belongs to */
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+		else
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+	} else
+#endif /* PROP_TXSTATUS */
+	/* If the protocol uses a data header, apply it */
+	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+	/* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+	{
+		if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+			dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+			/* non-proptxstatus way */
+#ifdef BCMPCIE
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+		}
+	}
+#else
+#ifdef BCMPCIE
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+
+	return ret;
+}
+
+int BCMFASTPATH
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	uint datalen;
+	void *pktbuf;
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp = NULL;
+	int ifidx;
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+	uint8 htsfdlystat_sz = 0;
+#endif
+#ifdef DHD_WMF
+	struct ether_header *eh;
+	uint8 *iph;
+#endif /* DHD_WMF */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+
+	/* Reject if down */
+	if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.up) {
+			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+			net_os_send_hang_message(net);
+		}
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	ifp = DHD_DEV_IFP(net);
+	ifidx = DHD_DEV_IFIDX(net);
+
+	ASSERT(ifidx == dhd_net2idx(dhd, net));
+	ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	/* re-align socket buffer if "skb->data" is odd address */
+	if (((unsigned long)(skb->data)) & 0x1) {
+		unsigned char *data = skb->data;
+		uint32 length = skb->len;
+		PKTPUSH(dhd->pub.osh, skb, 1);
+		memmove(skb->data, data, length);
+		PKTSETLEN(dhd->pub.osh, skb, length);
+	}
+
+	datalen  = PKTLEN(dhd->pub.osh, skb);
+
+	/* Make sure there's enough room for any header */
+
+	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+#ifdef WLMEDIA_HTSF
+	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (!ETHER_ISMULTI(eh->ether_dhost) &&
+			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+		}
+	}
+#endif
+#ifdef DHD_WMF
+	eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+	iph = (uint8 *)eh + ETHER_HDR_LEN;
+
+	/* WMF processing for multicast packets
+	 * Only IPv4 packets are handled
+	 */
+	if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+		(IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+		((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+		void *sdu_clone;
+		bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+		uint32 dest_ip;
+
+		dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+		ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+		ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+			(IPV4_PROT(iph) == IP_PROT_IGMP) &&
+			(*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+		if (ucast_convert) {
+			dhd_sta_t *sta;
+			unsigned long flags;
+
+			DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+			/* Convert upnp/igmp query to unicast for each assoc STA */
+			list_for_each_entry(sta, &ifp->sta_list, list) {
+				if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+					DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+					DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+					DHD_OS_WAKE_UNLOCK(&dhd->pub);
+					return (WMF_NOP);
+				}
+				dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+			}
+
+			DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+			DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+			return NETDEV_TX_OK;
+		} else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+		{
+			/* There will be no STA info if the packet is coming from LAN host
+			 * Pass as NULL
+			 */
+			ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+			switch (ret) {
+			case WMF_TAKEN:
+			case WMF_DROP:
+				/* Either taken by WMF or we should drop it.
+				 * Exiting send path
+				 */
+				DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+				return NETDEV_TX_OK;
+			default:
+				/* Continue the transmit path */
+				break;
+			}
+		}
+	}
+#endif /* DHD_WMF */
+
+	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+done:
+	if (ret) {
+		ifp->stats.tx_dropped++;
+		dhd->pub.tx_dropped++;
+	}
+	else {
+		dhd->pub.tx_packets++;
+		ifp->stats.tx_packets++;
+		ifp->stats.tx_bytes += datalen;
+	}
+
+	DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	/* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+	return 0;
+#else
+	return NETDEV_TX_OK;
+#endif
+}
+
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(dhd);
+
+	if (ifidx == ALL_INTERFACES) {
+		/* Flow control on all active interfaces */
+		dhdp->txoff = state;
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				net = dhd->iflist[i]->net;
+				if (state == ON)
+					netif_stop_queue(net);
+				else
+					netif_wake_queue(net);
+			}
+		}
+	}
+	else {
+		if (dhd->iflist[ifidx]) {
+			net = dhd->iflist[ifidx]->net;
+			if (state == ON)
+				netif_stop_queue(net);
+			else
+				netif_wake_queue(net);
+		}
+	}
+}
+
+#ifdef DHD_RX_DUMP
+typedef struct {
+	uint16 type;
+	const char *str;
+} PKTTYPE_INFO;
+
+static const PKTTYPE_INFO packet_type_info[] =
+{
+	{ ETHER_TYPE_IP, "IP" },
+	{ ETHER_TYPE_ARP, "ARP" },
+	{ ETHER_TYPE_BRCM, "BRCM" },
+	{ ETHER_TYPE_802_1X, "802.1X" },
+	{ ETHER_TYPE_WAI, "WAPI" },
+	{ 0, ""}
+};
+
+static const char *_get_packet_type_str(uint16 type)
+{
+	int i;
+	int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
+
+	for (i = 0; i < n; i++) {
+		if (packet_type_info[i].type == type)
+			return packet_type_info[i].str;
+	}
+
+	return packet_type_info[n].str;
+}
+#endif /* DHD_RX_DUMP */
+
+
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+
+	return dhd->rxthread_enabled;
+}
+#endif /* DHD_WMF */
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void *data, *pnext = NULL;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+	int tout_rx = 0;
+	int tout_ctrl = 0;
+	void *skbhead = NULL;
+	void *skbprev = NULL;
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+	char *dump_data;
+	uint16 protocol;
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+		struct ether_header *eh;
+#ifdef WLBTAMP
+		struct dot11_llc_snap_header *lsh;
+#endif
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+				__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+		eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+
+		/* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#else
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#endif /* PROP_TXSTATUS_VSDB */
+			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+			__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+#ifdef WLBTAMP
+		lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+		if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
+		    (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
+		    bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+		    lsh->type == HTON16(BTA_PROT_L2CAP)) {
+			amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
+			        ((uint8 *)eh + RFC1042_HDR_LEN);
+			ACL_data = NULL;
+		}
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+		if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+			/* WLFC may send header only packet when
+			there is an urgent message but no packet to
+			piggy-back on
+			*/
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+#endif
+#ifdef DHD_L2_FILTER
+		/* If block_ping is enabled drop the ping packet */
+		if (dhdp->block_ping) {
+			if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
+				PKTFREE(dhdp->osh, pktbuf, FALSE);
+				continue;
+			}
+		}
+#endif
+#ifdef DHD_WMF
+		/* WMF processing for multicast packets */
+		if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+			dhd_sta_t *sta;
+			int ret;
+
+			sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+			ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+			switch (ret) {
+				case WMF_TAKEN:
+					/* The packet is taken by WMF. Continue to next iteration */
+					continue;
+				case WMF_DROP:
+					/* Packet DROP decision by WMF. Toss it */
+					DHD_ERROR(("%s: WMF decides to drop packet\n",
+						__FUNCTION__));
+					PKTCFREE(dhdp->osh, pktbuf, FALSE);
+					continue;
+				default:
+					/* Continue the transmit path */
+					break;
+			}
+		}
+#endif /* DHD_WMF */
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+
+#ifdef PCIE_FULL_DONGLE
+		if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+			(!ifp->ap_isolate)) {
+			eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+			if (ETHER_ISUCAST(eh->ether_dhost)) {
+				if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+						dhd_sendpkt(dhdp, ifidx, pktbuf);
+					continue;
+				}
+			} else {
+				void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+				dhd_sendpkt(dhdp, ifidx, npktbuf);
+			}
+		}
+#endif /* PCIE_FULL_DONGLE */
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+		dump_data = skb->data;
+		protocol = (dump_data[12] << 8) | dump_data[13];
+
+		if (protocol == ETHER_TYPE_802_1X) {
+			DHD_ERROR(("ETHER_TYPE_802_1X: "
+				"ver %d, type %d, replay %d\n",
+				dump_data[14], dump_data[15],
+				dump_data[30]));
+		}
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+#if defined(DHD_RX_DUMP)
+		DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
+		if (protocol != ETHER_TYPE_BRCM) {
+			if (dump_data[0] == 0xFF) {
+				DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
+
+				if ((dump_data[12] == 8) &&
+					(dump_data[13] == 6)) {
+					DHD_ERROR(("%s: ARP %d\n",
+						__FUNCTION__, dump_data[0x15]));
+				}
+			} else if (dump_data[0] & 1) {
+				DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(dump_data)));
+			}
+#ifdef DHD_RX_FULL_DUMP
+			{
+				int k;
+				for (k = 0; k < skb->len; k++) {
+					DHD_ERROR(("%02X ", dump_data[k]));
+					if ((k & 15) == 15)
+						DHD_ERROR(("\n"));
+				}
+				DHD_ERROR(("\n"));
+			}
+#endif /* DHD_RX_FULL_DUMP */
+		}
+#endif /* DHD_RX_DUMP */
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+			ifp->stats.multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+		dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		memset(&event, 0, sizeof(event));
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+			dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb_mac_header(skb),
+#else
+			skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+			&event,
+			&data);
+
+			wl_event_to_host_order(&event);
+			if (!tout_ctrl)
+				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
+#ifdef WLBTAMP
+			if (event.event_type == WLC_E_BTA_HCI_EVENT) {
+				dhd_bta_doevt(dhdp, data, event.datalen);
+			}
+#endif /* WLBTAMP */
+
+#if defined(PNO_SUPPORT)
+			if (event.event_type == WLC_E_PFN_NET_FOUND) {
+				/* enforce custom wake lock to garantee that Kernel not suspended */
+				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+			}
+#endif /* PNO_SUPPORT */
+
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+		} else {
+			tout_rx = DHD_PACKET_TIMEOUT_MS;
+
+#ifdef PROP_TXSTATUS
+			dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
+		}
+
+		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+		ifp = dhd->iflist[ifidx];
+
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+
+		if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+			dhdp->dstats.rx_bytes += skb->len;
+			dhdp->rx_packets++; /* Local count */
+			ifp->stats.rx_bytes += skb->len;
+			ifp->stats.rx_packets++;
+		}
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+		if (dhd_use_tcp_window_size_adjust) {
+			if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
+				dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
+			}
+		}
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			if (dhd->rxthread_enabled) {
+				if (!skbhead)
+					skbhead = skb;
+				else
+					PKTSETNEXT(dhdp->osh, skbprev, skb);
+				skbprev = skb;
+			} else {
+
+				/* If the receive is not processed inside an ISR,
+				 * the softirqd must be woken explicitly to service
+				 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+				 * by netif_rx_ni(), but in earlier kernels, we need
+				 * to do it manually.
+				 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				ulong flags;
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+			}
+		}
+	}
+
+	if (dhd->rxthread_enabled && skbhead)
+		dhd_sched_rxf(dhdp, skbhead);
+
+	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+#ifdef WLBTAMP
+	uint len;
+#endif
+
+	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if (type == ETHER_TYPE_802_1X)
+		atomic_dec(&dhd->pend_8021x_cnt);
+
+#ifdef WLBTAMP
+	/* Crack open the packet and check to see if it is BT HCI ACL data packet.
+	 * If yes generate packet completion event.
+	 */
+	len = PKTLEN(dhdp->osh, txp);
+
+	/* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
+	if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
+		struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+		if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+		    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+
+			dhd_bta_tx_hcidata_complete(dhdp, txp, success);
+		}
+	}
+#endif /* WLBTAMP */
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+
+		memset(&net->stats, 0, sizeof(net->stats));
+		return &net->stats;
+	}
+
+	ifp = dhd->iflist[ifidx];
+	ASSERT(dhd && ifp);
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+	return &ifp->stats;
+}
+
+static int
+dhd_watchdog_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	while (1)
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+			unsigned long jiffies_at_start = jiffies;
+			unsigned long time_lapse;
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+
+				/* Call the bus module watchdog */
+				dhd_bus_watchdog(&dhd->pub);
+
+
+				DHD_GENERAL_LOCK(&dhd->pub, flags);
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+				time_lapse = jiffies - jiffies_at_start;
+
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid)
+					mod_timer(&dhd->timer,
+					    jiffies +
+					    msecs_to_jiffies(dhd_watchdog_ms) -
+					    min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+					DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+				}
+		} else {
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(ulong data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+	unsigned long flags;
+
+	if (dhd->pub.dongle_reset) {
+		return;
+	}
+
+	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+		up(&dhd->thr_wdt_ctl.sema);
+		return;
+	}
+
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+}
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+	struct sched_param param;
+	if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+		param.sched_priority = 0;
+		setScheduler(current, SCHED_NORMAL, &param);
+	} else {
+		if (get_scheduler_policy(current) != SCHED_FIFO) {
+			param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+			setScheduler(current, SCHED_FIFO, &param);
+		}
+	}
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+	dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+	struct cpufreq_freqs *freq = data;
+	if (dhd) {
+		if (!dhd->new_freq)
+			goto exit;
+		if (val == CPUFREQ_POSTCHANGE) {
+			DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+				freq->new, freq->cpu));
+			*per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+		}
+	}
+exit:
+	return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
+static int
+dhd_dpc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+#ifdef CUSTOM_DPC_CPUCORE
+	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+				while (dhd_bus_dpc(dhd->pub.bus)) {
+					/* process all data */
+				}
+				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			} else {
+				if (dhd->pub.up)
+					dhd_bus_stop(dhd->pub.bus, TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			}
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_rxf_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
+	ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+	dhd_pub_t *pub = &dhd->pub;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_rxf_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	DAEMONIZE("dhd_rxf");
+	/* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
+
+	/*  signal: thread has started */
+	complete(&tsk->completed);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible(&tsk->sema) == 0) {
+			void *skb;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+			ulong flags;
+#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+			SMP_RD_BARRIER_DEPENDS();
+
+			if (tsk->terminated) {
+				break;
+			}
+			skb = dhd_rxf_dequeue(pub);
+
+			if (skb == NULL) {
+				continue;
+			}
+			while (skb) {
+				void *skbnext = PKTNEXT(pub->osh, skb);
+				PKTSETNEXT(pub->osh, skb, NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+
+#endif
+				skb = skbnext;
+			}
+#if defined(WAIT_DEQUEUE)
+			if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+				OSL_SLEEP(1);
+				watchdogTime = OSL_SYSUPTIME();
+			}
+#endif
+
+			DHD_OS_WAKE_UNLOCK(pub);
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+#ifdef BCMPCIE
+void dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp)
+		return;
+
+	dhd = dhdp->info;
+
+	if (!dhd)
+		return;
+
+	tasklet_kill(&dhd->tasklet);
+	DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+}
+#endif /* BCMPCIE */
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+	 * down below , wake lock is set,
+	 * the tasklet is initialized in dhd_attach()
+	 */
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if (dhd->pub.busstate != DHD_BUS_DOWN) {
+		if (dhd_bus_dpc(dhd->pub.bus))
+			tasklet_schedule(&dhd->tasklet);
+		else
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+		/* If the semaphore does not get up,
+		* wake unlock should be done here
+		*/
+		if (!binary_sema_up(&dhd->thr_dpc_ctl))
+			DHD_OS_WAKE_UNLOCK(dhdp);
+		return;
+	} else {
+		tasklet_schedule(&dhd->tasklet);
+	}
+}
+
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+	int ret = BCME_OK;
+	int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+
+	DHD_OS_WAKE_LOCK(dhdp);
+
+	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+	do {
+		ret = dhd_rxf_enqueue(dhdp, skb);
+		if (ret == BCME_OK || ret == BCME_ERROR)
+			break;
+		else
+			OSL_SLEEP(50); /* waiting for dequeueing */
+	} while (retry-- > 0);
+
+	if (retry <= 0 && ret == BCME_BUSY) {
+		void *skbp = skb;
+
+		while (skbp) {
+			void *skbnext = PKTNEXT(dhdp->osh, skbp);
+			PKTSETNEXT(dhdp->osh, skbp, NULL);
+			netif_rx_ni(skbp);
+			skbp = skbnext;
+		}
+		DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+	}
+	else {
+		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+			up(&dhd->thr_rxf_ctl.sema);
+		}
+	}
+#else /* RXF_DEQUEUE_ON_BUSY */
+	do {
+		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+			break;
+	} while (1);
+	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+		up(&dhd->thr_rxf_ctl.sema);
+	}
+	return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		/* Check for older dongle image that doesn't support toe_ol */
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int toe, ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = TRUE;
+
+	/* Set toe_ol as requested */
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+
+	toe = (toe_ol != 0);
+
+	strcpy(buf, "toe");
+	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+void dhd_set_scb_probe(dhd_pub_t *dhd)
+{
+#define NUM_SCB_MAX_PROBE 3
+	int ret = 0;
+	wl_scb_probe_t scb_probe;
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+		return;
+
+	bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
+
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+
+	memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
+
+	scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
+
+	bcm_mkiovar("scb_probe", (char *)&scb_probe,
+		sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+#undef NUM_SCB_MAX_PROBE
+	return;
+}
+#endif /* WL_CFG80211 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+	snprintf(info->driver, sizeof(info->driver), "wl");
+	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			snprintf(info.driver, sizeof(info.driver), "dhd");
+			strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
+			info.version[sizeof(info.version) - 1] = '\0';
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			snprintf(info.driver, sizeof(info.driver), "wl");
+		else
+			snprintf(info.driver, sizeof(info.driver), "xx");
+
+		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	if (!dhdp->up)
+		return FALSE;
+
+	dhd = (dhd_info_t *)dhdp->info;
+#if !defined(BCMPCIE)
+	if (dhd->thr_dpc_ctl.thr_pid < 0) {
+		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+		return FALSE;
+	}
+#endif 
+
+	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+		net_os_send_hang_message(net);
+		return TRUE;
+	}
+	return FALSE;
+}
+
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+	int bcmerror = BCME_OK;
+	int buflen = 0;
+	struct net_device *net;
+
+	net = dhd_idx2net(pub, ifidx);
+	if (!net) {
+		bcmerror = BCME_BADARG;
+		goto done;
+	}
+
+	if (data_buf)
+		buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+
+	/* check for local dhd ioctl and handle it */
+	if (ioc->driver == DHD_IOCTL_MAGIC) {
+		bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+		if (bcmerror)
+			pub->bcmerror = bcmerror;
+		goto done;
+	}
+
+	/* send to dongle (must be up, and wl). */
+	if (pub->busstate != DHD_BUS_DATA) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	if (!pub->iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	/*
+	 * Flush the TX queue if required for proper message serialization:
+	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption and
+	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+	 * prevent disassoc frame being sent before WPS-DONE frame.
+	 */
+	if (ioc->cmd == WLC_SET_KEY ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("wsec_key", data_buf, 9) == 0) ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+	    ioc->cmd == WLC_DISASSOC)
+		dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+	if (data_buf) {
+		/*  short cut wl ioctl calls here  */
+		if (strcmp("htsf", data_buf) == 0) {
+			dhd_ioctl_htsf_get(dhd, 0);
+			return BCME_OK;
+		}
+
+		if (strcmp("htsflate", data_buf) == 0) {
+			if (ioc->set) {
+				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+				memset(&maxdelayts, 0, sizeof(tstamp_t));
+				maxdelay = 0;
+				tspktcnt = 0;
+				maxdelaypktno = 0;
+				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			} else {
+				dhd_dump_latency();
+			}
+			return BCME_OK;
+		}
+		if (strcmp("htsfclear", data_buf) == 0) {
+			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			htsf_seqnum = 0;
+			return BCME_OK;
+		}
+		if (strcmp("htsfhis", data_buf) == 0) {
+			dhd_dump_htsfhisto(&vi_d1, "H to D");
+			dhd_dump_htsfhisto(&vi_d2, "D to D");
+			dhd_dump_htsfhisto(&vi_d3, "D to H");
+			dhd_dump_htsfhisto(&vi_d4, "H to H");
+			return BCME_OK;
+		}
+		if (strcmp("tsport", data_buf) == 0) {
+			if (ioc->set) {
+				memcpy(&tsport, data_buf + 7, 4);
+			} else {
+				DHD_ERROR(("current timestamp port: %d \n", tsport));
+			}
+			return BCME_OK;
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+		data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+		bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+#else
+		bcmerror = BCME_UNSUPPORTED;
+#endif
+		goto done;
+	}
+	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+
+done:
+	dhd_check_hang(net, pub, bcmerror);
+
+	return bcmerror;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int ifidx;
+	int ret;
+	void *local_buf = NULL;
+	u16 buflen = 0;
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	/* Interface up check for built-in type */
+	if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
+		DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return BCME_NOTUP;
+	}
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->pub.hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -1;
+	}
+
+#if defined(WL_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(net, ifr, cmd);
+		dhd_check_hang(net, &dhd->pub, ret);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	if (cmd != SIOCDEVPRIVATE) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_wl_ioctl_t compat_ioc;
+		if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		ioc.cmd = compat_ioc.cmd;
+		ioc.buf = compat_ptr(compat_ioc.buf);
+		ioc.len = compat_ioc.len;
+		ioc.set = compat_ioc.set;
+		ioc.used = compat_ioc.used;
+		ioc.needed = compat_ioc.needed;
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		/* Copy the ioc control structure part of ioctl request */
+		if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = BCME_EPERM;
+		goto done;
+	}
+
+	if (ioc.len > 0) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_from_user(local_buf, ioc.buf, buflen)) {
+			DHD_PERIM_LOCK(&dhd->pub);
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		DHD_PERIM_LOCK(&dhd->pub);
+
+		*(char *)(local_buf + buflen) = '\0';
+	}
+
+	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+
+	if (!bcmerror && buflen && local_buf && ioc.buf) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_to_user(ioc.buf, local_buf, buflen))
+			bcmerror = -EFAULT;
+		DHD_PERIM_LOCK(&dhd->pub);
+	}
+
+done:
+	if (local_buf)
+		MFREE(dhd->pub.osh, local_buf, buflen+1);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+
+
+static int
+dhd_stop(struct net_device *net)
+{
+	int ifidx = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
+	if (dhd->pub.up == 0) {
+		goto exit;
+	}
+
+	dhd_if_flush_sta(DHD_DEV_IFP(net));
+
+
+	ifidx = dhd_net2idx(dhd, net);
+	BCM_REFERENCE(ifidx);
+
+	/* Set state and stop OS transmissions */
+	netif_stop_queue(net);
+	dhd->pub.up = 0;
+
+#ifdef WL_CFG80211
+	if (ifidx == 0) {
+		wl_cfg80211_down(NULL);
+
+		/*
+		 * For CFG80211: Clean up all the left over virtual interfaces
+		 * when the primary Interface is brought down. [ifconfig wlan0 down]
+		 */
+		if (!dhd_download_fw_on_driverload) {
+			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+				int i;
+
+				dhd_net_if_lock_local(dhd);
+				for (i = 1; i < DHD_MAX_IFS; i++)
+					dhd_remove_if(&dhd->pub, i, FALSE);
+				dhd_net_if_unlock_local(dhd);
+			}
+		}
+	}
+#endif /* WL_CFG80211 */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+	/* Stop the protocol module */
+	dhd_prot_stop(&dhd->pub);
+
+	OLD_MOD_DEC_USE_COUNT;
+exit:
+#if defined(WL_CFG80211)
+	if (ifidx == 0 && !dhd_download_fw_on_driverload)
+		wl_android_wifi_off(net);
+#endif 
+	dhd->pub.rxcnt_timeout = 0;
+	dhd->pub.txcnt_timeout = 0;
+
+	dhd->pub.hang_was_sent = 0;
+
+	/* Clear country spec for for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+		dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+		dhd->pub.dhd_cspec.rev = 0;
+		dhd->pub.dhd_cspec.ccode[0] = 0x00;
+	}
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	return 0;
+}
+
+#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
+extern bool g_first_broadcast_scan;
+#endif 
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 enable = true;
+	int ret = BCME_OK;
+
+	bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+	}
+
+	if (ret == BCME_OK) {
+		/* basic capabilities for HS20 REL2 */
+		uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
+		bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
+		}
+	}
+
+	return ret;
+}
+#endif /* WL11u */
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+	int ifidx;
+	int32 ret = 0;
+
+
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+	if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
+		DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
+	}
+	mutex_lock(&_dhd_sdio_mutex_lock_);
+#endif
+#endif /* MULTIPLE_SUPPLICANT */
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	dhd->pub.dongle_trap_occured = 0;
+	dhd->pub.hang_was_sent = 0;
+
+#if !defined(WL_CFG80211)
+	/*
+	 * Force start if ifconfig_up gets called before START command
+	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
+	 *  This should be removed in the future
+	 */
+	ret = wl_control_wl_start(net);
+	if (ret != 0) {
+		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+		ret = -1;
+		goto exit;
+	}
+
+#endif 
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (ifidx < 0) {
+		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (!dhd->iflist[ifidx]) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (ifidx == 0) {
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+		if (!dhd_download_fw_on_driverload) {
+			DHD_ERROR(("\n%s\n", dhd_version));
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+			g_first_broadcast_scan = TRUE;
+#endif 
+			ret = wl_android_wifi_on(net);
+			if (ret != 0) {
+				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+					__FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+		}
+#endif 
+
+		if (dhd->pub.busstate != DHD_BUS_DATA) {
+
+			/* try to bring up bus */
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			ret = dhd_bus_start(&dhd->pub);
+			DHD_PERIM_LOCK(&dhd->pub);
+			if (ret) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+
+		}
+
+		/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+		/* Get current TOE mode from dongle */
+		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+		else
+			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+		if (unlikely(wl_cfg80211_up(NULL))) {
+			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+			ret = -1;
+			goto exit;
+		}
+		dhd_set_scb_probe(&dhd->pub);
+#endif /* WL_CFG80211 */
+	}
+
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+#ifdef BCMDBGFS
+	dhd_dbg_init(&dhd->pub);
+#endif
+
+	OLD_MOD_INC_USE_COUNT;
+exit:
+	if (ret)
+		dhd_stop(net);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+	mutex_unlock(&_dhd_sdio_mutex_lock_);
+#endif
+#endif /* MULTIPLE_SUPPLICANT */
+
+	return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+	dhd_info_t *dhd = NULL;
+
+	if (!net) {
+		DHD_ERROR(("Primary Interface not initialized \n"));
+		return -EINVAL;
+	}
+
+#ifdef MULTIPLE_SUPPLICANT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
+	if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
+		DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
+		return 0;
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif /* MULTIPLE_SUPPLICANT */
+
+	/*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
+	dhd = DHD_DEV_INFO(net);
+
+	/* If driver is already initialized, do nothing
+	 */
+	if (dhd->pub.busstate == DHD_BUS_DATA) {
+		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+		return 0;
+	}
+
+	if (dhd_open(net) < 0) {
+		DHD_ERROR(("Driver Init Failed \n"));
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else. This has to be done asynchronously otherwise
+	 * DPC will be blocked (and iovars will timeout as DPC has no chance
+	 * to read the response back)
+	 */
+	if (ifevent->ifidx > 0) {
+		dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+
+		memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+		memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+		strncpy(if_event->name, name, IFNAMSIZ);
+		if_event->name[IFNAMSIZ - 1] = '\0';
+		dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+			DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
+	}
+
+	return BCME_OK;
+}
+
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+	dhd_if_event_t *if_event;
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif /* WL_CFG80211 */
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else
+	 */
+	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+	strncpy(if_event->name, name, IFNAMSIZ);
+	if_event->name[IFNAMSIZ - 1] = '\0';
+	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+		dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
+
+	return BCME_OK;
+}
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+	ifp = dhdinfo->iflist[ifidx];
+
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
+
+			dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+	} else {
+		ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+			return NULL;
+		}
+	}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhdinfo;
+	ifp->idx = ifidx;
+	ifp->bssidx = bssidx;
+	if (mac != NULL)
+		memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+	/* Allocate etherdev, including space for private structure */
+	ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+	if (ifp->net == NULL) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+		goto fail;
+	}
+
+	/* Setup the dhd interface's netdevice private structure. */
+	dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+	if (name && name[0]) {
+		strncpy(ifp->net->name, name, IFNAMSIZ);
+		ifp->net->name[IFNAMSIZ - 1] = '\0';
+	}
+#ifdef WL_CFG80211
+	if (ifidx == 0)
+		ifp->net->destructor = free_netdev;
+	else
+		ifp->net->destructor = dhd_netdev_free;
+#else
+	ifp->net->destructor = free_netdev;
+#endif /* WL_CFG80211 */
+	strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ - 1] = '\0';
+	dhdinfo->iflist[ifidx] = ifp;
+
+#ifdef PCIE_FULL_DONGLE
+	/* Initialize STA info list */
+	INIT_LIST_HEAD(&ifp->sta_list);
+	DHD_IF_STA_LIST_LOCK_INIT(ifp);
+#endif /* PCIE_FULL_DONGLE */
+
+	return ifp->net;
+
+fail:
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			dhd_dev_priv_clear(ifp->net);
+			free_netdev(ifp->net);
+			ifp->net = NULL;
+		}
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+		ifp = NULL;
+	}
+	dhdinfo->iflist[ifidx] = NULL;
+	return NULL;
+}
+
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ifp = dhdinfo->iflist[ifidx];
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+
+
+
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+#ifdef DHD_WMF
+		dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
+
+		dhd_if_del_sta_list(ifp);
+
+		dhdinfo->iflist[ifidx] = NULL;
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+
+	}
+
+	return BCME_OK;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
+
+
+#ifdef SHOW_LOGTRACE
+static char *logstrs_path = "/root/logstrs.bin";
+module_param(logstrs_path, charp, S_IRUGO);
+
+int
+dhd_init_logstrs_array(dhd_event_log_t *temp)
+{
+	struct file *filep = NULL;
+	struct kstat stat;
+	mm_segment_t fs;
+	char *raw_fmts =  NULL;
+	int logstrs_size = 0;
+
+	logstr_header_t *hdr = NULL;
+	uint32 *lognums = NULL;
+	char *logstrs = NULL;
+	int ram_index = 0;
+	char **fmts;
+	int num_fmts = 0;
+	uint32 i = 0;
+	int error = 0;
+	set_fs(KERNEL_DS);
+	fs = get_fs();
+	filep = filp_open(logstrs_path, O_RDONLY, 0);
+	if (IS_ERR(filep)) {
+		DHD_ERROR(("Failed to open the file logstrs.bin in %s",  __FUNCTION__));
+		goto fail;
+	}
+	error = vfs_stat(logstrs_path, &stat);
+	if (error) {
+		DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
+		goto fail;
+	}
+	logstrs_size = (int) stat.size;
+
+	raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
+	if (raw_fmts == NULL) {
+		DHD_ERROR(("Failed to allocate raw_fmts memory"));
+		goto fail;
+	}
+	if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=	logstrs_size) {
+		DHD_ERROR(("Error: Log strings file read failed"));
+		goto fail;
+	}
+
+	/* Remember header from the logstrs.bin file */
+	hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+		sizeof(logstr_header_t));
+
+	if (hdr->log_magic == LOGSTRS_MAGIC) {
+		/*
+		* logstrs.bin start with header.
+		*/
+		num_fmts =	hdr->rom_logstrs_offset / sizeof(uint32);
+		ram_index = (hdr->ram_lognums_offset -
+			hdr->rom_lognums_offset) / sizeof(uint32);
+		lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+		logstrs = (char *)	 &raw_fmts[hdr->rom_logstrs_offset];
+	} else {
+		/*
+		 * Legacy logstrs.bin format without header.
+		 */
+		num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+		if (num_fmts == 0) {
+			/* Legacy ROM/RAM logstrs.bin format:
+			  *  - ROM 'lognums' section
+			  *   - RAM 'lognums' section
+			  *   - ROM 'logstrs' section.
+			  *   - RAM 'logstrs' section.
+			  *
+			  * 'lognums' is an array of indexes for the strings in the
+			  * 'logstrs' section. The first uint32 is 0 (index of first
+			  * string in ROM 'logstrs' section).
+			  *
+			  * The 4324b5 is the only ROM that uses this legacy format. Use the
+			  * fixed number of ROM fmtnums to find the start of the RAM
+			  * 'lognums' section. Use the fixed first ROM string ("Con\n") to
+			  * find the ROM 'logstrs' section.
+			  */
+			#define NUM_4324B5_ROM_FMTS	186
+			#define FIRST_4324B5_ROM_LOGSTR "Con\n"
+			ram_index = NUM_4324B5_ROM_FMTS;
+			lognums = (uint32 *) raw_fmts;
+			num_fmts =	ram_index;
+			logstrs = (char *) &raw_fmts[num_fmts << 2];
+			while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
+				num_fmts++;
+				logstrs = (char *) &raw_fmts[num_fmts << 2];
+			}
+		} else {
+				/* Legacy RAM-only logstrs.bin format:
+				 *	  - RAM 'lognums' section
+				 *	  - RAM 'logstrs' section.
+				 *
+				 * 'lognums' is an array of indexes for the strings in the
+				 * 'logstrs' section. The first uint32 is an index to the
+				 * start of 'logstrs'. Therefore, if this index is divided
+				 * by 'sizeof(uint32)' it provides the number of logstr
+				 *	entries.
+				 */
+				ram_index = 0;
+				lognums = (uint32 *) raw_fmts;
+				logstrs = (char *)	&raw_fmts[num_fmts << 2];
+			}
+	}
+	fmts = kmalloc(num_fmts  * sizeof(char *), GFP_KERNEL);
+	if (fmts == NULL) {
+		DHD_ERROR(("Failed to allocate fmts memory"));
+		goto fail;
+	}
+
+	for (i = 0; i < num_fmts; i++) {
+		/* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+		* (they are 0-indexed relative to 'rom_logstrs_offset').
+		*
+		* RAM lognums are already indexed to point to the correct RAM logstrs (they
+		* are 0-indexed relative to the start of the logstrs.bin file).
+		*/
+		if (i == ram_index) {
+			logstrs = raw_fmts;
+		}
+		fmts[i] = &logstrs[lognums[i]];
+	}
+	temp->fmts = fmts;
+	temp->raw_fmts = raw_fmts;
+	temp->num_fmts = num_fmts;
+	filp_close(filep, NULL);
+	set_fs(fs);
+	return 0;
+fail:
+	if (raw_fmts) {
+		kfree(raw_fmts);
+		raw_fmts = NULL;
+	}
+	if (!IS_ERR(filep))
+		filp_close(filep, NULL);
+	set_fs(fs);
+	temp->fmts = NULL;
+	return -1;
+}
+#endif /* SHOW_LOGTRACE */
+
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net = NULL;
+	char if_name[IFNAMSIZ] = {'\0'};
+	uint32 bus_type = -1;
+	uint32 bus_num = -1;
+	uint32 slot_num = -1;
+	wifi_adapter_info_t *adapter = NULL;
+
+	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* will implement get_ids for DBUS later */
+#if defined(BCMSDIO)
+	dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+#endif 
+	adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+
+	/* Allocate primary dhd_info */
+	dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+	if (dhd == NULL) {
+		dhd = MALLOC(osh, sizeof(dhd_info_t));
+		if (dhd == NULL) {
+			DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+	memset(dhd, 0, sizeof(dhd_info_t));
+	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+	dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
+
+	dhd->pub.osh = osh;
+	dhd->adapter = adapter;
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+
+	/* Initialize thread based operation and lock */
+	sema_init(&dhd->sdsem, 1);
+
+	/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+	 * This is indeed a hack but we have to make it work properly before we have a better
+	 * solution
+	 */
+	dhd_update_fw_nv_path(dhd);
+
+	/* Link to info module */
+	dhd->pub.info = dhd;
+
+
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(if_name, iface_name, IFNAMSIZ);
+		if_name[IFNAMSIZ - 1] = 0;
+		len = strlen(if_name);
+		ch = if_name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strcat(if_name, "%d");
+	}
+	net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
+	if (net == NULL)
+		goto fail;
+	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+	spin_lock_init(&dhd->wlfc_spinlock);
+
+	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+	dhd->pub.plat_init = dhd_wlfc_plat_init;
+	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+#endif /* PROP_TXSTATUS */
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->ctrl_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->dhd_lock);
+	spin_lock_init(&dhd->rxf_lock);
+#if defined(RXFRAME_THREAD)
+	dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
+#ifdef DHDTCPACK_SUPPRESS
+	spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wakelock_spinlock);
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_wd_counter = 0;
+	dhd->wakelock_rx_timeout_enable = 0;
+	dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+	wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+	wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_init(&dhd->dhd_net_if_mutex);
+	mutex_init(&dhd->dhd_suspend_mutex);
+#endif
+	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+	/* Attach and link in the cfg80211 */
+	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+		DHD_ERROR(("wl_cfg80211_attach failed\n"));
+		goto fail;
+	}
+
+	dhd_monitor_init(&dhd->pub);
+	dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(WL_WIRELESS_EXT)
+	/* Attach and link in the iw */
+	if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
+		if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+		DHD_ERROR(("wl_iw_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef SHOW_LOGTRACE
+	dhd_init_logstrs_array(&dhd->event_data);
+#endif /* SHOW_LOGTRACE */
+
+	if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+		DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+		goto fail;
+	}
+
+
+	/* Set up the watchdog timer */
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+	dhd->default_wd_interval = dhd_watchdog_ms;
+
+	if (dhd_watchdog_prio >= 0) {
+		/* Initialize watchdog thread */
+		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+
+	} else {
+		dhd->thr_wdt_ctl.thr_pid = -1;
+	}
+
+#ifdef DEBUGGER
+	debugger_init((void *) bus);
+#endif
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+	} else {
+		/*  use tasklet for dpc */
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->thr_dpc_ctl.thr_pid = -1;
+	}
+
+	if (dhd->rxthread_enabled) {
+		bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+		/* Initialize RXF thread */
+		PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+	}
+
+	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+#if defined(CONFIG_PM_SLEEP)
+	if (!dhd_pm_notifier_registered) {
+		dhd_pm_notifier_registered = TRUE;
+		register_pm_notifier(&dhd_pm_notifier);
+	}
+#endif /* CONFIG_PM_SLEEP */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	dhd->pend_ipaddr = 0;
+	if (!dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = TRUE;
+		register_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef CONFIG_IPV6
+	if (!dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = TRUE;
+		register_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif
+	dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+	dhd->new_freq = alloc_percpu(int);
+	dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+	cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMSDIO
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
+#elif defined(BCMPCIE)
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
+#else
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMSDIO */
+#endif /* DHDTCPACK_SUPPRESS */
+
+	dhd_state |= DHD_ATTACH_STATE_DONE;
+	dhd->dhd_state = dhd_state;
+
+	dhd_found++;
+	return &dhd->pub;
+
+fail:
+	if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+			__FUNCTION__, dhd_state, &dhd->pub));
+		dhd->dhd_state = dhd_state;
+		dhd_detach(&dhd->pub);
+		dhd_free(&dhd->pub);
+	}
+
+	return NULL;
+}
+
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+	if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+		return DHD_FLAG_HOSTAP_MODE;
+	if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+		return DHD_FLAG_P2P_MODE;
+	if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+		return DHD_FLAG_IBSS_MODE;
+	if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+		return DHD_FLAG_MFG_MODE;
+
+	return DHD_FLAG_STA_MODE;
+}
+
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+	int fw_len;
+	int nv_len;
+	const char *fw = NULL;
+	const char *nv = NULL;
+	wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+
+	/* Update firmware and nvram path. The path may be from adapter info or module parameter
+	 * The path from adapter info is used for initialization only (as it won't change).
+	 *
+	 * The firmware_path/nvram_path module parameter may be changed by the system at run
+	 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+	 * command may change dhdinfo->fw_path. As such we need to clear the path info in
+	 * module parameter after it is copied. We won't update the path until the module parameter
+	 * is changed again (first character is not '\0')
+	 */
+
+	/* set default firmware and nvram path for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+		fw = CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+		nv = CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+	}
+
+	/* check if we need to initialize the path */
+	if (dhdinfo->fw_path[0] == '\0') {
+		if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+			fw = adapter->fw_path;
+
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+			nv = adapter->nv_path;
+	}
+
+	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
+	 *
+	 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+	 */
+	if (firmware_path[0] != '\0')
+		fw = firmware_path;
+	if (nvram_path[0] != '\0')
+		nv = nvram_path;
+
+	if (fw && fw[0] != '\0') {
+		fw_len = strlen(fw);
+		if (fw_len >= sizeof(dhdinfo->fw_path)) {
+			DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
+		if (dhdinfo->fw_path[fw_len-1] == '\n')
+		       dhdinfo->fw_path[fw_len-1] = '\0';
+	}
+	if (nv && nv[0] != '\0') {
+		nv_len = strlen(nv);
+		if (nv_len >= sizeof(dhdinfo->nv_path)) {
+			DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
+		if (dhdinfo->nv_path[nv_len-1] == '\n')
+		       dhdinfo->nv_path[nv_len-1] = '\0';
+	}
+
+	/* clear the path in module parameter */
+	firmware_path[0] = '\0';
+	nvram_path[0] = '\0';
+
+#ifndef BCMEMBEDIMAGE
+	/* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+	if (dhdinfo->fw_path[0] == '\0') {
+		DHD_ERROR(("firmware path not found\n"));
+		return FALSE;
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		DHD_ERROR(("nvram path not found\n"));
+		return FALSE;
+	}
+#endif /* BCMEMBEDIMAGE */
+
+	return TRUE;
+}
+
+
+#ifdef EXYNOS5433_PCIE_WAR
+extern int enum_wifi;
+#endif /* EXYNOS5433_PCIE_WAR */
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+	unsigned long flags;
+
+	ASSERT(dhd);
+
+	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+	DHD_PERIM_LOCK(dhdp);
+
+	/* try to download image and nvram to the dongle */
+	if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+		DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
+		ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+		                                dhd->fw_path, dhd->nv_path);
+		if (ret < 0) {
+#ifdef EXYNOS5433_PCIE_WAR
+			enum_wifi = 0;
+#endif /* EXYNOS5433_PCIE_WAR */
+			DHD_ERROR(("%s: failed to download firmware %s\n",
+			          __FUNCTION__, dhd->fw_path));
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+#ifdef EXYNOS5433_PCIE_WAR
+		enum_wifi = 1;
+#endif /* EXYNOS5433_PCIE_WAR */
+	}
+	if (dhd->pub.busstate != DHD_BUS_LOAD) {
+		DHD_PERIM_UNLOCK(dhdp);
+		return -ENETDOWN;
+	}
+
+	dhd_os_sdlock(dhdp);
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+#if defined(OOB_INTR_ONLY)
+	/* Host registration for OOB interrupt */
+	if (dhd_bus_oob_intr_register(dhdp)) {
+		/* deactivate timer and wait for the handler to finish */
+
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+
+		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif 
+#ifdef PCIE_FULL_DONGLE
+	{
+		uint8 txpush = 0;
+		uint32 num_flowrings; /* includes H2D common rings */
+		num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
+		DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
+			num_flowrings));
+		if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
+			dhd_os_sdunlock(dhdp);
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+	/* Do protocol initialization necessary for IOCTL/IOVAR */
+	dhd_prot_init(&dhd->pub);
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	dhd_os_sdunlock(dhdp);
+
+	/* Bus is ready, query any dongle information */
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+		dhd->pend_ipaddr = 0;
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	DHD_PERIM_UNLOCK(dhdp);
+	return 0;
+}
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 tdls = tdls_on;
+	int ret = 0;
+	uint32 tdls_auto_op = 0;
+	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+	BCM_REFERENCE(mac);
+	if (!FW_SUPPORTED(dhd, tdls))
+		return BCME_ERROR;
+
+	if (dhd->tdls_enable == tdls_on)
+		goto auto_mode;
+	bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+		goto exit;
+	}
+	dhd->tdls_enable = tdls_on;
+auto_mode:
+
+	tdls_auto_op = auto_on;
+	bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	if (tdls_auto_op) {
+		bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
+			sizeof(tdls_idle_time),	iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+	}
+
+exit:
+	return ret;
+}
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+	if (dhd)
+		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+	else
+		ret = BCME_ERROR;
+	return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_pub_t *dhdp =  (dhd_pub_t *)&dhd->pub;
+	tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+	tdls_peer_node_t *new = NULL, *prev = NULL;
+	dhd_if_t *dhdif;
+	uint8 sa[ETHER_ADDR_LEN];
+	int ifidx = dhd_net2idx(dhd, dev);
+
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhdif = dhd->iflist[ifidx];
+	memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
+
+	if (connect) {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+					__FUNCTION__, __LINE__));
+				return;
+			}
+			cur = cur->next;
+		}
+
+		new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
+		if (new == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+			return;
+		}
+		memcpy(new->addr, da, ETHER_ADDR_LEN);
+		new->next = dhdp->peer_tbl.node;
+		dhdp->peer_tbl.node = new;
+		dhdp->peer_tbl.tdls_peer_count++;
+
+	} else {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
+				if (prev)
+					prev->next = cur->next;
+				else
+					dhdp->peer_tbl.node = cur->next;
+				MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
+				dhdp->peer_tbl.tdls_peer_count--;
+				return;
+			}
+			prev = cur;
+			cur = cur->next;
+		}
+		DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+	}
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif 
+
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+{
+	if (!dhd)
+		return FALSE;
+
+	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+		return TRUE;
+	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+		return TRUE;
+	else
+		return FALSE;
+}
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+	int32 ret = 0;
+	char buf[WLC_IOCTL_SMLEN];
+	bool mchan_supported = FALSE;
+	/* if dhd->op_mode is already set for HOSTAP and Manufacturing
+	 * test mode, that means we only will use the mode as it is
+	 */
+	if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+		return 0;
+	if (FW_SUPPORTED(dhd, vsdb)) {
+		mchan_supported = TRUE;
+	}
+	if (!FW_SUPPORTED(dhd, p2p)) {
+		DHD_TRACE(("Chip does not support p2p\n"));
+		return 0;
+	}
+	else {
+		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+			return 0;
+		}
+		else {
+			if (buf[0] == 1) {
+				/* By default, chip supports single chan concurrency,
+				* now lets check for mchan
+				*/
+				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+				if (mchan_supported)
+					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+				/* For customer_hw4, although ICS,
+				* we still support concurrent mode
+				*/
+				return ret;
+#else
+				return 0;
+#endif 
+			}
+		}
+	}
+	return 0;
+}
+#endif 
+#if defined(READ_CONFIG_FROM_FILE)
+#include <linux/fs.h>
+#include <linux/ctype.h>
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+bool PM_control = TRUE;
+
+static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
+{
+	int var_int;
+	wl_country_t cspec = {{0}, -1, {0}};
+	char *revstr;
+	char *endptr = NULL;
+	int iolen;
+	char smbuf[WLC_IOCTL_SMLEN*2];
+
+	if (!strcmp(name, "country")) {
+		revstr = strchr(value, '/');
+		if (revstr) {
+			cspec.rev = strtoul(revstr + 1, &endptr, 10);
+			memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+			cspec.country_abbrev[2] = '\0';
+			memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
+		} else {
+			cspec.rev = -1;
+			memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+			memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
+			get_customized_country_code(dhd->info->adapter,
+				(char *)&cspec.country_abbrev, &cspec);
+		}
+		memset(smbuf, 0, sizeof(smbuf));
+		DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
+			cspec.country_abbrev, cspec.rev));
+		iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
+			smbuf, sizeof(smbuf));
+		return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			smbuf, iolen, TRUE, 0);
+	} else if (!strcmp(name, "roam_scan_period")) {
+		var_int = (int)simple_strtol(value, NULL, 0);
+		return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
+			&var_int, sizeof(var_int), TRUE, 0);
+	} else if (!strcmp(name, "roam_delta")) {
+		struct {
+			int val;
+			int band;
+		} x;
+		x.val = (int)simple_strtol(value, NULL, 0);
+		/* x.band = WLC_BAND_AUTO; */
+		x.band = WLC_BAND_ALL;
+		return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
+	} else if (!strcmp(name, "roam_trigger")) {
+		int ret = 0;
+
+		roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
+		roam_trigger[1] = WLC_BAND_ALL;
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
+			sizeof(roam_trigger), TRUE, 0);
+
+		return ret;
+	} else if (!strcmp(name, "PM")) {
+		int ret = 0;
+		var_int = (int)simple_strtol(value, NULL, 0);
+
+		ret =  dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
+			&var_int, sizeof(var_int), TRUE, 0);
+
+#if defined(CONFIG_PM_LOCK)
+		if (var_int == 0) {
+			g_pm_control = TRUE;
+			printf("%s var_int=%d don't control PM\n", __func__, var_int);
+		} else {
+			g_pm_control = FALSE;
+			printf("%s var_int=%d do control PM\n", __func__, var_int);
+		}
+#endif
+
+		return ret;
+	}
+#ifdef WLBTAMP
+	else if (!strcmp(name, "btamp_chan")) {
+		int btamp_chan;
+		int iov_len = 0;
+		char iovbuf[128];
+		int ret;
+
+		btamp_chan = (int)simple_strtol(value, NULL, 0);
+		iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
+		if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+			DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
+				__FUNCTION__, btamp_chan, ret));
+		else
+			DHD_ERROR(("%s btamp_chan %d set success\n",
+				__FUNCTION__, btamp_chan));
+	}
+#endif /* WLBTAMP */
+	else if (!strcmp(name, "band")) {
+		int ret;
+		if (!strcmp(value, "auto"))
+			var_int = WLC_BAND_AUTO;
+		else if (!strcmp(value, "a"))
+			var_int = WLC_BAND_5G;
+		else if (!strcmp(value, "b"))
+			var_int = WLC_BAND_2G;
+		else if (!strcmp(value, "all"))
+			var_int = WLC_BAND_ALL;
+		else {
+			printf(" set band value should be one of the a or b or all\n");
+			var_int = WLC_BAND_AUTO;
+		}
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
+			sizeof(var_int), TRUE, 0)) < 0)
+			printf(" set band err=%d\n", ret);
+		return ret;
+	} else if (!strcmp(name, "cur_etheraddr")) {
+		struct ether_addr ea;
+		char buf[32];
+		uint iovlen;
+		int ret;
+
+		bcm_ether_atoe(value, &ea);
+
+		ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
+		if (ret == 0) {
+			DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
+			return 0;
+		}
+
+		DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
+			ea.octet[0], ea.octet[1], ea.octet[2],
+			ea.octet[3], ea.octet[4], ea.octet[5]));
+
+		iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
+
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+			memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
+			return ret;
+		}
+	} else {
+		uint iovlen;
+		char iovbuf[WLC_IOCTL_SMLEN];
+
+		/* wlu_iovar_setint */
+		var_int = (int)simple_strtol(value, NULL, 0);
+
+		/* Setup timeout bcn_timeout from dhd driver 4.217.48 */
+		if (!strcmp(name, "roam_off")) {
+			/* Setup timeout if Beacons are lost to report link down */
+			if (var_int) {
+				uint bcn_timeout = 2;
+				bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			}
+		}
+		/* Setup timeout bcm_timeout from dhd driver 4.217.48 */
+
+		DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
+
+		iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
+			iovbuf, sizeof(iovbuf));
+		return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iovbuf, iovlen, TRUE, 0);
+	}
+
+	return 0;
+}
+
+static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
+{
+	mm_segment_t old_fs;
+	struct kstat stat;
+	struct file *fp = NULL;
+	unsigned int len;
+	char *buf = NULL, *p, *name, *value;
+	int ret = 0;
+	char *config_path;
+
+	config_path = CONFIG_BCMDHD_CONFIG_PATH;
+
+	if (!config_path)
+	{
+		printf(KERN_ERR "config_path can't read. \n");
+		return 0;
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	if ((ret = vfs_stat(config_path, &stat))) {
+		set_fs(old_fs);
+		printf(KERN_ERR "%s: Failed to get information (%d)\n",
+			config_path, ret);
+		return ret;
+	}
+	set_fs(old_fs);
+
+	if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
+		printf(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
+		return -ENOMEM;
+	}
+
+	printf("dhd_preinit_config : config path : %s \n", config_path);
+
+	if (!(fp = dhd_os_open_image(config_path)) ||
+		(len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
+		goto err;
+
+	buf[stat.size] = '\0';
+	for (p = buf; *p; p++) {
+		if (isspace(*p))
+			continue;
+		for (name = p++; *p && !isspace(*p); p++) {
+			if (*p == '=') {
+				*p = '\0';
+				p++;
+				for (value = p; *p && !isspace(*p); p++);
+				*p = '\0';
+				if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
+					printf(KERN_ERR "%s: %s=%s\n",
+						bcmerrorstr(ret), name, value);
+				}
+				break;
+			}
+		}
+	}
+	ret = 0;
+
+out:
+	if (fp)
+		dhd_os_close_image(fp);
+	if (buf)
+		MFREE(dhd->osh, buf, stat.size+1);
+	return ret;
+
+err:
+	ret = -1;
+	goto out;
+}
+#endif /* READ_CONFIG_FROM_FILE */
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+	uint32 buf_key_b4_m4 = 1;
+	uint8 msglen;
+	eventmsgs_ext_t *eventmask_msg;
+	char iov_buf[WLC_IOCTL_SMLEN];
+	int ret2 = 0;
+#ifdef WLAIBSS
+	aibss_bcn_force_config_t bcn_config;
+	uint32 aibss;
+#ifdef WLAIBSS_PS
+	uint32 aibss_ps;
+#endif /* WLAIBSS_PS */
+#endif /* WLAIBSS */
+#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
+	uint32 sup_wpa = 0;
+#endif
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+	defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+	uint32 ampdu_ba_wsize = 0;
+#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+#if defined(CUSTOM_AMPDU_MPDU)
+	int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+	int32 ampdu_release = 0;
+#endif
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+	int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+	uint32 hostreorder = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif 
+#ifdef PCIE_FULL_DONGLE
+	uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_ENABLE_LPC
+	uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+	uint power_mode = PM_FAST;
+	uint32 dongle_align = DHD_SDALIGN;
+#if defined(BCMSDIO)
+	uint32 glom = CUSTOM_GLOM_SETTING;
+#endif /* defined(BCMSDIO) */
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	uint32 credall = 1;
+#endif
+#if defined(VSDB) || defined(ROAM_ENABLE)
+	uint bcn_timeout = 10;
+#else
+	uint bcn_timeout = 4;
+#endif 
+	uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+	int arpoe = 1;
+#endif
+	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+	char buf[WLC_IOCTL_SMLEN];
+	char *ptr;
+	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#ifdef ROAM_ENABLE
+	uint roamvar = 0;
+	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+	int roam_scan_period[2] = {10, WLC_BAND_ALL};
+	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+	int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+	int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+	uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
+
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+	uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+	struct ether_addr p2p_ea;
+#endif
+#ifdef BCMCCX
+	uint32 ccx = 1;
+#endif
+
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
+	uint32 apsta = 1; /* Enable APSTA mode */
+#elif defined(SOFTAP_AND_GC)
+	uint32 apsta = 0;
+	int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef DISABLE_11N
+	uint32 nmode = 0;
+#endif /* DISABLE_11N */
+
+#if defined(DISABLE_11AC)
+	uint32 vhtmode = 0;
+#endif /* DISABLE_11AC */
+#ifdef USE_WL_TXBF
+	uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef AMPDU_VO_ENABLE
+	struct ampdu_tid_control tid;
+#endif
+#ifdef USE_WL_FRAMEBURST
+	uint32 frameburst = 1;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef DHD_SET_FW_HIGHSPEED
+	uint32 ack_ratio = 250;
+	uint32 ack_ratio_depth = 64;
+#endif /* DHD_SET_FW_HIGHSPEED */
+#ifdef SUPPORT_2G_VHT
+	uint32 vht_features = 0x3; /* 2G enable | rates all */
+#endif /* SUPPORT_2G_VHT */
+#ifdef CUSTOM_PSPRETEND_THR
+	uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+	dhd_pkt_filter_enable = TRUE;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+	dhd->tdls_enable = FALSE;
+#endif /* WLTDLS */
+	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+	DHD_TRACE(("Enter %s\n", __FUNCTION__));
+	dhd->op_mode = 0;
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+		/* Check and adjust IOCTL response timeout for Manufactring firmware */
+		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+			__FUNCTION__));
+	}
+	else {
+		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+	}
+#ifdef GET_CUSTOM_MAC_ENABLE
+	ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
+	if (!ret) {
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
+	} else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+		/* Get the default device MAC address directly from firmware */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		/* Update public MAC address after reading from Firmware */
+		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	/* get a capabilities from firmware */
+	memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
+	bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
+		sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+			__FUNCTION__, ret));
+		return 0;
+	}
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+		uint rand_mac;
+#endif
+		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+		SRANDOM32((uint)jiffies);
+		rand_mac = RANDOM32();
+		iovbuf[0] = 0x02;			   /* locally administered bit */
+		iovbuf[1] = 0x1A;
+		iovbuf[2] = 0x11;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#if !defined(AP) && defined(WL_CFG80211)
+		/* Turn off MPC in AP mode */
+		bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
+		}
+#endif
+	} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+		arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+		dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+		dhd->op_mode = DHD_FLAG_MFG_MODE;
+	} else {
+		uint32 concurrent_mode = 0;
+		if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+			(op_mode == DHD_FLAG_P2P_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+			dhd->op_mode = DHD_FLAG_P2P_MODE;
+		} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+			(op_mode == DHD_FLAG_IBSS_MODE)) {
+			dhd->op_mode = DHD_FLAG_IBSS_MODE;
+		} else
+			dhd->op_mode = DHD_FLAG_STA_MODE;
+#if !defined(AP) && defined(WLP2P)
+		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 1;
+#endif
+			dhd->op_mode |= concurrent_mode;
+		}
+
+		/* Check if we are enabling p2p */
+		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+			bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+			}
+
+#if defined(SOFTAP_AND_GC)
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+			(char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+		}
+#endif
+			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+			ETHER_SET_LOCALADDR(&p2p_ea);
+			bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
+				ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+			} else {
+				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+			}
+		}
+#else
+	(void)concurrent_mode;
+#endif 
+	}
+
+	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+	/* Set Country code  */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+	}
+
+#if defined(DISABLE_11AC)
+	bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11AC */
+
+	/* Set Listen Interval */
+	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+		sizeof(roam_trigger), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+		sizeof(roam_scan_period), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+		sizeof(roam_delta), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+	bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#endif /* ROAM_ENABLE */
+
+#ifdef BCMCCX
+	bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* BCMCCX */
+#ifdef WLTDLS
+	/* by default TDLS on and auto mode off */
+	_dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* WLTDLS */
+
+#ifdef DHD_ENABLE_LPC
+	/* Set lpc 1 */
+	bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* DHD_ENABLE_LPC */
+
+	/* Set PowerSave mode */
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	/* enable credall to reduce the chance of no bus credit happened. */
+	bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+
+#if defined(BCMSDIO)
+	if (glom != DEFAULT_GLOM_VALUE) {
+		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+		bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	}
+#endif /* defined(BCMSDIO) */
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	/* Setup assoc_retry_max count to reconnect target AP in dongle */
+	bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+	}
+#endif 
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == FALSE)
+#endif 
+		if (!(dhd->op_mode &
+			(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+				DHD_ERROR(("%s set keeplive failed %d\n",
+				__FUNCTION__, res));
+		}
+	}
+#endif /* defined(KEEP_ALIVE) */
+
+#ifdef USE_WL_TXBF
+	bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_TXBF */
+#ifdef USE_WL_FRAMEBURST
+	/* Set frameburst to value */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+		sizeof(frameburst), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set frameburst failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_FRAMEBURST */
+#ifdef DHD_SET_FW_HIGHSPEED
+	/* Set ack_ratio */
+	bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set ack_ratio failed  %d\n", __FUNCTION__, ret));
+	}
+
+	/* Set ack_ratio_depth */
+	bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set ack_ratio_depth failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* DHD_SET_FW_HIGHSPEED */
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+	defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+	/* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+	ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
+	if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
+		ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
+#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
+	if (ampdu_ba_wsize != 0) {
+		bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
+				__FUNCTION__, ampdu_ba_wsize, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+
+#ifdef WLAIBSS
+	/* Configure custom IBSS beacon transmission */
+	if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
+	{
+		aibss = 1;
+		bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set aibss to %d failed  %d\n",
+				__FUNCTION__, aibss, ret));
+		}
+#ifdef WLAIBSS_PS
+		aibss_ps = 1;
+		bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set aibss PS to %d failed  %d\n",
+				__FUNCTION__, aibss, ret));
+		}
+#endif /* WLAIBSS_PS */
+	}
+	memset(&bcn_config, 0, sizeof(bcn_config));
+	bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
+	bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
+	bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
+	bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
+	bcn_config.len = sizeof(bcn_config);
+
+	bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
+		sizeof(aibss_bcn_force_config_t), iov_buf, sizeof(iov_buf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
+		sizeof(iov_buf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
+			__FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
+			AIBSS_BCN_FLOOD_DUR, ret));
+	}
+#endif /* WLAIBSS */
+
+#if defined(CUSTOM_AMPDU_MPDU)
+	ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+	if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+		bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_MPDU */
+
+#if defined(CUSTOM_AMPDU_RELEASE)
+	ampdu_release = CUSTOM_AMPDU_RELEASE;
+	if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+		bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_RELEASE */
+
+#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
+	/* Read 4-way handshake requirements */
+	if (dhd_use_idsup == 1) {
+		bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+		/* sup_wpa iovar returns NOTREADY status on some platforms using modularized
+		 * in-dongle supplicant.
+		 */
+		if (ret >= 0 || ret == BCME_NOTREADY)
+			dhd->fw_4way_handshake = TRUE;
+		DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
+	}
+#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
+#ifdef SUPPORT_2G_VHT
+	bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
+	}
+#endif /* SUPPORT_2G_VHT */
+#ifdef CUSTOM_PSPRETEND_THR
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
+			__FUNCTION__, ret));
+	}
+#endif
+
+	bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
+	}
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+	/* Setup event_msgs */
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_AUTH_IND);
+	setbit(eventmask, WLC_E_ASSOC);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	setbit(eventmask, WLC_E_DEAUTH);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_START);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_NDIS_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+	setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+#ifndef WL_CFG80211
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+	setbit(eventmask, WLC_E_TXFAIL);
+#endif
+	setbit(eventmask, WLC_E_JOIN_START);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+	setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+	setbit(eventmask, WLC_E_PFN_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+	/* enable dongle roaming event */
+	setbit(eventmask, WLC_E_ROAM);
+	setbit(eventmask, WLC_E_BSSID);
+#ifdef BCMCCX
+	setbit(eventmask, WLC_E_ADDTS_IND);
+	setbit(eventmask, WLC_E_DELTS_IND);
+#endif /* BCMCCX */
+#ifdef WLTDLS
+	setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef WL_CFG80211
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+		setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+		setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	}
+#endif /* WL_CFG80211 */
+#ifdef WLAIBSS
+	setbit(eventmask, WLC_E_AIBSS_TXFAIL);
+#endif /* WLAIBSS */
+	setbit(eventmask, WLC_E_TRACE);
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	/* make up event mask ext message iovar for event larger than 128 */
+	msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+	eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+	if (eventmask_msg == NULL) {
+		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+		return BCME_NOMEM;
+	}
+	bzero(eventmask_msg, msglen);
+	eventmask_msg->ver = EVENTMSGS_VER;
+	eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+	/* Read event_msgs_ext mask */
+	bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, sizeof(iov_buf));
+	ret2  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, sizeof(iov_buf), FALSE, 0);
+	if (ret2 != BCME_UNSUPPORTED)
+		ret = ret2;
+	if (ret2 == 0) { /* event_msgs_ext must be supported */
+		bcopy(iov_buf, eventmask_msg, msglen);
+
+#ifdef BT_WIFI_HANDOVER
+		setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+
+		/* Write updated Event mask */
+		eventmask_msg->ver = EVENTMSGS_VER;
+		eventmask_msg->command = EVENTMSGS_SET_MASK;
+		eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+		bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
+			msglen, iov_buf, sizeof(iov_buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iov_buf, sizeof(iov_buf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+			kfree(eventmask_msg);
+			goto done;
+		}
+	} else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
+		DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+		kfree(eventmask_msg);
+		goto done;
+	} /* unsupported is ok */
+	kfree(eventmask_msg);
+
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+		sizeof(scan_assoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+		sizeof(scan_unassoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+		sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature for STA only  */
+#if defined(SOFTAP)
+	if (arpoe && !ap_fw_loaded) {
+#else
+	if (arpoe) {
+#endif 
+		dhd_arp_offload_enable(dhd, TRUE);
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+	} else {
+		dhd_arp_offload_enable(dhd, FALSE);
+		dhd_arp_offload_set(dhd, 0);
+	}
+	dhd_arp_enable = arpoe;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+	/* Setup default defintions for pktfilter , enable in suspend */
+	dhd->pktfilter_count = 6;
+	/* Setup filter to allow only unicast */
+	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+	/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+	/* apply APP pktfilter */
+	dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded) {
+		dhd_enable_packet_filter(0, dhd);
+	}
+#endif /* defined(SOFTAP) */
+	dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+	bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
+
+#ifdef AMPDU_VO_ENABLE
+	tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
+	tid.enable = TRUE;
+	bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
+	tid.enable = TRUE;
+	bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+#if defined(SOFTAP_TPUT_ENHANCE)
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		dhd_bus_setidletime(dhd, (int)100);
+#ifdef DHDTCPACK_SUPPRESS
+		dhd->tcpack_sup_enabled = FALSE;
+#endif
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+		dhd_use_tcp_window_size_adjust = TRUE;
+#endif
+
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
+		if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
+			glom = 0;
+			bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		}
+		else {
+			if (buf[0] == 0) {
+				glom = 1;
+				bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
+				sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			}
+		}
+	}
+#endif /* SOFTAP_TPUT_ENHANCE */
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		bcmstrtok(&ptr, "\n", 0);
+		/* Print fw version info */
+		DHD_ERROR(("Firmware version = %s\n", buf));
+#if defined(BCMSDIO)
+		dhd_set_version_info(dhd, buf);
+#endif /* defined(BCMSDIO) */
+	}
+
+#if defined(BCMSDIO)
+	dhd_txglom_enable(dhd, TRUE);
+#endif /* defined(BCMSDIO) */
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+	if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+		/* enable WLFC only if the firmware is VSDB when it is in STA mode */
+		(dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+		 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+		FALSE) {
+		wlfc_enable = FALSE;
+	}
+
+#ifndef DISABLE_11N
+	bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
+	if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+		if (ret2 != BCME_UNSUPPORTED)
+			ret = ret2;
+		if (ret2 != BCME_OK)
+			hostreorder = 0;
+	}
+#endif /* DISABLE_11N */
+
+#ifdef READ_CONFIG_FROM_FILE
+	dhd_preinit_config(dhd, 0);
+#endif /* READ_CONFIG_FROM_FILE */
+
+	if (wlfc_enable)
+		dhd_wlfc_init(dhd);
+#ifndef DISABLE_11N
+	else if (hostreorder)
+		dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMBUS */
+#ifdef PCIE_FULL_DONGLE
+	/* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+	if (FW_SUPPORTED(dhd, ap)) {
+		wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+		bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	}
+#endif /* PCIE_FULL_DONGLE */
+#ifdef PNO_SUPPORT
+	if (!dhd->pno_state) {
+		dhd_pno_init(dhd);
+	}
+#endif
+#ifdef WL11U
+	dhd_interworking_enable(dhd);
+#endif /* WL11U */
+
+done:
+	return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+	char buf[strlen(name) + 1 + cmd_len];
+	int len = sizeof(buf);
+	wl_ioctl_t ioc;
+	int ret;
+
+	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (!set && ret >= 0)
+		memcpy(cmd_buf, buf, cmd_len);
+
+	return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+	struct dhd_info *dhd = dhdp->info;
+	struct net_device *dev = NULL;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+	ASSERT(dev);
+
+	if (netif_running(dev)) {
+		DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+		return BCME_NOTDOWN;
+	}
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+		return BCME_BADARG;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
+{
+	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+	int i;
+	int ret;
+
+	bzero(ipv4_buf, sizeof(ipv4_buf));
+
+	/* display what we've got */
+	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+	/* now we saved hoste_ip table, clr it in the dongle AOE */
+	dhd_aoe_hostip_clr(dhd_pub, idx);
+
+	if (ret) {
+		DHD_ERROR(("%s failed\n", __FUNCTION__));
+		return;
+	}
+
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (add && (ipv4_buf[i] == 0)) {
+				ipv4_buf[i] = ipa;
+				add = FALSE; /* added ipa to local table  */
+				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+				__FUNCTION__, i));
+		} else if (ipv4_buf[i] == ipa) {
+			ipv4_buf[i]	= 0;
+			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+				__FUNCTION__, ipa, i));
+		}
+
+		if (ipv4_buf[i] != 0) {
+			/* add back host_ip entries from our local cache */
+			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+				__FUNCTION__, ipv4_buf[i], i));
+		}
+	}
+#ifdef AOE_DBG
+	/* see the resulting hostip table */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	int idx;
+
+	if (!dhd_arp_enable)
+		return NOTIFY_DONE;
+	if (!ifa || !(ifa->ifa_dev->dev))
+		return NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	dhd_pub = &dhd->pub;
+
+	if (dhd_pub->arp_version == 1) {
+		idx = 0;
+	}
+	else {
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+			break;
+		}
+		if (idx < DHD_MAX_IFS)
+			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+		else {
+			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+			idx = 0;
+		}
+	}
+
+	switch (event) {
+		case NETDEV_UP:
+			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			if (dhd->pub.busstate != DHD_BUS_DATA) {
+				DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+				if (dhd->pend_ipaddr) {
+					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+						__FUNCTION__, dhd->pend_ipaddr));
+				}
+				dhd->pend_ipaddr = ifa->ifa_address;
+				break;
+			}
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		case NETDEV_DOWN:
+			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+			dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+#else
+			dhd_aoe_hostip_clr(&dhd->pub, idx);
+			dhd_aoe_arp_clr(&dhd->pub, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		default:
+			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+				__func__, ifa->ifa_label, event));
+			break;
+	}
+	return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef CONFIG_IPV6
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+	struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+	dhd_pub_t	*pub = &((dhd_info_t *)dhd_info)->pub;
+	int		ret;
+
+	if (event != DHD_WQ_WORK_IPV6_NDO) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!ndo_work) {
+		DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (!pub) {
+		DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (ndo_work->if_idx) {
+		DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
+		return;
+	}
+
+	switch (ndo_work->event) {
+		case NETDEV_UP:
+			DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
+			ret = dhd_ndo_enable(pub, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+			}
+
+			ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+			}
+			break;
+		case NETDEV_DOWN:
+			DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
+			ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+				goto done;
+			}
+
+			ret = dhd_ndo_enable(pub, FALSE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+				goto done;
+			}
+			break;
+		default:
+			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+			break;
+	}
+done:
+	/* free ndo_work. alloced while scheduling the work */
+	kfree(ndo_work);
+
+	return;
+}
+
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	struct inet6_ifaddr *inet6_ifa = ptr;
+	struct in6_addr *ipv6_addr = &inet6_ifa->addr;
+	struct ipv6_work_info_t *ndo_info;
+	int idx = 0; /* REVISIT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
+		return NOTIFY_DONE;
+	dhd_pub = &dhd->pub;
+	if (!FW_SUPPORTED(dhd_pub, ndoe))
+		return NOTIFY_DONE;
+
+	ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+	if (!ndo_info) {
+		DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+		return NOTIFY_DONE;
+	}
+
+	ndo_info->event = event;
+	ndo_info->if_idx = idx;
+	memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
+
+	/* defer the work to thread as it may block kernel */
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+		dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
+	return NOTIFY_DONE;
+}
+#endif /* #ifdef CONFIG_IPV6 */
+
+int
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	dhd_if_t *ifp;
+	struct net_device *net = NULL;
+	int err = 0;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	ifp = dhd->iflist[ifidx];
+	net = ifp->net;
+	ASSERT(net && (ifp->idx == ifidx));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	/* Ok, link into the network layer... */
+	if (ifidx == 0) {
+		/*
+		 * device functions for the primary interface only
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+	} else {
+		/*
+		 * We have to use the primary MAC for virtual interfaces
+		 */
+		memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
+		/*
+		 * Android sets the locally administered bit to indicate that this is a
+		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
+		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
+		 */
+		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+			ETHER_ADDR_LEN)) {
+			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+			__func__, net->name));
+			temp_addr[0] |= 0x02;
+		}
+	}
+
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if (ifidx == 0)
+		printf("%s\n", dhd_version);
+
+	if (need_rtnl_lock)
+		err = register_netdev(net);
+	else
+		err = register_netdevice(net);
+
+	if (err != 0) {
+		DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+		goto fail;
+	}
+
+
+
+	printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
+		MAC2STRDBG(net->dev_addr));
+
+#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(2, 6, 27))))
+	if (ifidx == 0) {
+#ifdef BCMLXSDMMC
+		up(&dhd_registration_sem);
+#endif
+		if (!dhd_download_fw_on_driverload) {
+			dhd_net_bus_devreset(net, TRUE);
+#ifdef BCMLXSDMMC
+			dhd_net_bus_suspend(net);
+#endif /* BCMLXSDMMC */
+			wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+		}
+	}
+#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
+	return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+
+			/*
+			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+			 *  calling stop again will cuase SD read/write errors.
+			 */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				/* Stop the protocol module */
+				dhd_prot_stop(&dhd->pub);
+
+				/* Stop the bus module */
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+			}
+
+#if defined(OOB_INTR_ONLY)
+			dhd_bus_oob_intr_unregister(dhdp);
+#endif 
+		}
+	}
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	unsigned long flags;
+	int timer_valid = FALSE;
+
+	if (!dhdp)
+		return;
+
+	dhd = (dhd_info_t *)dhdp->info;
+	if (!dhd)
+		return;
+
+	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+	dhd->pub.up = 0;
+	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+		/* Give sufficient time for threads to start running in case
+		 * dhd_attach() has failed
+		 */
+		OSL_SLEEP(100);
+	}
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+#ifdef PCIE_FULL_DONGLE
+		dhd_flow_rings_deinit(dhdp);
+#endif
+		dhd_bus_detach(dhdp);
+
+		if (dhdp->prot)
+			dhd_prot_detach(dhdp);
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = FALSE;
+		unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef CONFIG_IPV6
+	if (dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = FALSE;
+		unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if defined(WL_WIRELESS_EXT)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+		/* Detatch and unlink in the iw */
+		wl_iw_detach();
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	/* delete all interfaces, start with virtual  */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+		int i = 1;
+		dhd_if_t *ifp;
+
+		/* Cleanup virtual interfaces */
+		dhd_net_if_lock_local(dhd);
+		for (i = 1; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i])
+				dhd_remove_if(&dhd->pub, i, TRUE);
+		}
+		dhd_net_if_unlock_local(dhd);
+
+		/*  delete primary interface 0 */
+		ifp = dhd->iflist[0];
+		ASSERT(ifp);
+		ASSERT(ifp->net);
+		if (ifp && ifp->net) {
+
+
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED)
+				free_netdev(ifp->net);
+			else
+				unregister_netdev(ifp->net);
+			ifp->net = NULL;
+#ifdef DHD_WMF
+			dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+
+			dhd_if_del_sta_list(ifp);
+
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+			dhd->iflist[0] = NULL;
+		}
+	}
+
+	/* Clear the watchdog timer */
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	timer_valid = dhd->wd_timer_valid;
+	dhd->wd_timer_valid = FALSE;
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+	if (timer_valid)
+		del_timer_sync(&dhd->timer);
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rxf_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		} else
+			tasklet_kill(&dhd->tasklet);
+	}
+#ifdef WL_CFG80211
+	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+		wl_cfg80211_detach(NULL);
+		dhd_monitor_uninit();
+	}
+#endif
+	/* free deferred work queue */
+	dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+	dhd->dhd_deferred_wq = NULL;
+
+#ifdef SHOW_LOGTRACE
+	if (dhd->event_data.fmts)
+		kfree(dhd->event_data.fmts);
+	if (dhd->event_data.raw_fmts)
+		kfree(dhd->event_data.raw_fmts);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef PNO_SUPPORT
+	if (dhdp->pno_state)
+		dhd_pno_deinit(dhdp);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+	if (dhd_pm_notifier_registered) {
+		unregister_pm_notifier(&dhd_pm_notifier);
+		dhd_pm_notifier_registered = FALSE;
+	}
+#endif /* CONFIG_PM_SLEEP */
+#ifdef DEBUG_CPU_FREQ
+		if (dhd->new_freq)
+			free_percpu(dhd->new_freq);
+		dhd->new_freq = NULL;
+		cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+		DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+		dhd->wakelock_counter = 0;
+		dhd->wakelock_wd_counter = 0;
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		wake_lock_destroy(&dhd->wl_wifi);
+		wake_lock_destroy(&dhd->wl_rxwake);
+		wake_lock_destroy(&dhd->wl_ctrlwake);
+		wake_lock_destroy(&dhd->wl_wdwake);
+#endif /* CONFIG_HAS_WAKELOCK */
+	}
+
+
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* This will free all MEM allocated for TCPACK SUPPRESS */
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		int i;
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+
+				ptr = dhdp->reorder_bufs[i];
+
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+
+		dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+
+		dhd = (dhd_info_t *)dhdp->info;
+		/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+		if (dhd &&
+			dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
+			MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+		dhd = NULL;
+	}
+}
+
+static void
+dhd_module_cleanup(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_bus_unregister();
+
+	wl_android_exit();
+
+	dhd_wifi_platform_unregister_drv();
+}
+
+static void __exit
+dhd_module_exit(void)
+{
+	dhd_module_cleanup();
+	unregister_reboot_notifier(&dhd_reboot_notifier);
+}
+
+static int __init
+dhd_module_init(void)
+{
+	int err;
+	int retry = POWERUP_MAX_RETRY;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+
+	DHD_PERIM_RADIO_INIT();
+
+	if (firmware_path[0] != '\0') {
+		strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
+		fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	if (nvram_path[0] != '\0') {
+		strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
+		nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	do {
+		err = dhd_wifi_platform_register_drv();
+		if (!err) {
+			register_reboot_notifier(&dhd_reboot_notifier);
+			break;
+		}
+		else {
+			DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+				__FUNCTION__, retry));
+			strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
+			firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
+			strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
+			nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
+		}
+	} while (retry--);
+
+	if (err)
+		DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+
+	return err;
+}
+
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
+{
+	DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+	if (code == SYS_RESTART) {
+	}
+
+	return NOTIFY_DONE;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_module_init);
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_exit);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		DHD_PERIM_UNLOCK(pub);
+
+		down(&dhd->proto_sem);
+
+		DHD_PERIM_LOCK(pub);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		up(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+
+	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->ioctl_resp_wait);
+	return 0;
+}
+
+void
+dhd_os_wd_timer_extend(void *bus, bool extend)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+	if (extend)
+		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+	else
+		dhd_os_wd_timer(bus, dhd->default_wd_interval);
+}
+
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_GENERAL_LOCK(pub, flags);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate == DHD_BUS_DOWN) {
+		DHD_GENERAL_UNLOCK(pub, flags);
+		if (!wdtick)
+			DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	/* Totally stop the timer */
+	if (!wdtick && dhd->wd_timer_valid == TRUE) {
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	if (wdtick) {
+		DHD_OS_WD_WAKE_LOCK(pub);
+		dhd_watchdog_ms = (uint)wdtick;
+		/* Re arm the timer, at last watchdog period */
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+		dhd->wd_timer_valid = TRUE;
+	}
+	DHD_GENERAL_UNLOCK(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		down(&dhd->sdsem);
+	else
+		spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		up(&dhd->sdsem);
+	else
+		spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->rxf_lock);
+
+}
+
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->rxf_lock);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+void
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->tcpack_lock);
+
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->tcpack_lock);
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+{
+	uint8* buf;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+	buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+	if (buf == NULL && kmalloc_if_fail)
+		buf = kmalloc(size, flags);
+
+	return buf;
+}
+
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!dhd->pub.up) {
+		return NULL;
+	}
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+	ASSERT(dhd != NULL);
+
+#ifdef SHOW_LOGTRACE
+		bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
+#else
+		bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
+#endif /* SHOW_LOGTRACE */
+
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(WL_WIRELESS_EXT)
+	if (event->bsscfgidx == 0) {
+		/*
+		 * Wireless ext is on primary interface only
+		 */
+
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+		if (dhd->iflist[*ifidx]->net) {
+		wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+		}
+	}
+#endif /* defined(WL_WIRELESS_EXT)  */
+
+#ifdef WL_CFG80211
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+	if (dhd->iflist[*ifidx]->net)
+		wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+#endif /* defined(WL_CFG80211) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+	/* Send up locally generated AMP HCI Events */
+	case WLC_E_BTA_HCI_EVENT: {
+		struct sk_buff *p, *skb;
+		bcm_event_t *msg;
+		wl_event_msg_t *p_bcm_event;
+		char *ptr;
+		uint32 len;
+		uint32 pktlen;
+		dhd_if_t *ifp;
+		dhd_info_t *dhd;
+		uchar *eth;
+		int ifidx;
+
+		len = ntoh32(event->datalen);
+		pktlen = sizeof(bcm_event_t) + len + 2;
+		dhd = dhdp->info;
+		ifidx = dhd_ifname2idx(dhd, event->ifname);
+
+		if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+			ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+			msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
+
+			bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
+			bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
+			ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
+
+			msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+			/* BCM Vendor specific header... */
+			msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
+			msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
+			bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
+
+			/* vendor spec header length + pvt data length (private indication
+			 *  hdr + actual message itself)
+			 */
+			msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
+				BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
+			msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
+
+			PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+			/* copy  wl_event_msg_t into sk_buf */
+
+			/* pointer to wl_event_msg_t in sk_buf */
+			p_bcm_event = &msg->event;
+			bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
+
+			/* copy hci event into sk_buf */
+			bcopy(data, (p_bcm_event + 1), len);
+
+			msg->bcm_hdr.length  = hton16(sizeof(wl_event_msg_t) +
+				ntoh16(msg->bcm_hdr.length));
+			PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+			ptr = (char *)(msg + 1);
+			/* Last 2 bytes of the message are 0x00 0x00 to signal that there
+			 * are no ethertypes which are following this
+			 */
+			ptr[len+0] = 0x00;
+			ptr[len+1] = 0x00;
+
+			skb = PKTTONATIVE(dhdp->osh, p);
+			eth = skb->data;
+			len = skb->len;
+
+			ifp = dhd->iflist[ifidx];
+			if (ifp == NULL)
+			     ifp = dhd->iflist[0];
+
+			ASSERT(ifp);
+			skb->dev = ifp->net;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+
+			skb->data = eth;
+			skb->len = len;
+
+			/* Strip header, count, deliver upward */
+			skb_pull(skb, ETH_HLEN);
+
+			/* Send the packet */
+			if (in_interrupt()) {
+				netif_rx(skb);
+			} else {
+				netif_rx_ni(skb);
+			}
+		}
+		else {
+			/* Could not allocate a sk_buf */
+			DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+		}
+		break;
+	} /* case WLC_E_BTA_HCI_EVENT */
+#endif /* WLBTAMP */
+
+	default:
+		break;
+	}
+}
+
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+	struct sk_buff *p, *skb;
+	uint32 pktlen;
+	int len;
+	dhd_if_t *ifp;
+	dhd_info_t *dhd;
+	uchar *skb_data;
+	int ifidx = 0;
+	struct ether_header eth;
+
+	pktlen = sizeof(eth) + data_len;
+	dhd = dhdp->info;
+
+	if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+		ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+		bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
+		bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
+		ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
+		eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+		bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
+		bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+		skb = PKTTONATIVE(dhdp->osh, p);
+		skb_data = skb->data;
+		len = skb->len;
+
+		ifidx = dhd_ifname2idx(dhd, "wlan0");
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			 ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		skb->data = skb_data;
+		skb->len = len;
+
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Send the packet */
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			netif_rx_ni(skb);
+		}
+	}
+	else {
+		/* Could not allocate a sk_buf */
+		DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+	}
+}
+#endif /* LOG_INTO_TCPDUMP */
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+#else
+	int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+	dhd_os_sdunlock(dhd);
+	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+	dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+{
+	int ret = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (flag == TRUE) {
+		/* Issue wl down command before resetting the chip */
+		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+		}
+#ifdef PROP_TXSTATUS
+		if (dhd->pub.wlfc_enabled)
+			dhd_wlfc_deinit(&dhd->pub);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	if (dhd->pub.pno_state)
+		dhd_pno_deinit(&dhd->pub);
+#endif
+	}
+
+#ifdef BCMSDIO
+	if (!flag) {
+		dhd_update_fw_nv_path(dhd);
+		/* update firmware and nvram path to sdio bus */
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,
+			dhd->fw_path, dhd->nv_path);
+	}
+#endif /* BCMSDIO */
+
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+
+	return ret;
+}
+
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_suspend(&dhd->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_resume(&dhd->pub, stage);
+}
+
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE */
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val, int force)
+{
+	int ret = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd) {
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+		ret = dhd_set_suspend(val, &dhd->pub);
+#else
+		ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+		wl_cfg80211_update_power_mode(dev);
+#endif
+	}
+	return ret;
+}
+
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd)
+		dhd->pub.suspend_bcn_li_dtim = val;
+
+	return 0;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	char *filterp = NULL;
+	int filter_id = 0;
+	int ret = 0;
+
+	if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
+		(num == DHD_MDNS_FILTER_NUM))
+		return ret;
+	if (num >= dhd->pub.pktfilter_count)
+		return -EINVAL;
+	switch (num) {
+		case DHD_BROADCAST_FILTER_NUM:
+			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+			filter_id = 101;
+			break;
+		case DHD_MULTICAST4_FILTER_NUM:
+			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+			filter_id = 102;
+			break;
+		case DHD_MULTICAST6_FILTER_NUM:
+			filterp = "103 0 0 0 0xFFFF 0x3333";
+			filter_id = 103;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Add filter */
+	if (add_remove) {
+		dhd->pub.pktfilter[num] = filterp;
+		dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
+	} else { /* Delete filter */
+		if (dhd->pub.pktfilter[num] != NULL) {
+			dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+			dhd->pub.pktfilter[num] = NULL;
+		}
+	}
+	return ret;
+}
+
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
+{
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhdp && dhdp->up) {
+		if (dhdp->in_suspend) {
+			if (!val || (val && !dhdp->suspend_disable_flag))
+				dhd_enable_packet_filter(val, dhdp);
+		}
+	}
+	return ret;
+}
+
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret;
+
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+		goto done;
+
+done:
+	return ret;
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_stop_for_ssid(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+		pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_enable(&dhd->pub, enable));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
+{
+	dhd_info_t *dhd;
+	struct net_device *dev;
+
+	dhd = (dhd_info_t *)dhd_info;
+	dev = dhd->iflist[0]->net;
+
+	if (dev) {
+		rtnl_lock();
+		dev_close(dev);
+		rtnl_unlock();
+#if defined(WL_WIRELESS_EXT)
+		wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+	}
+}
+
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+	int ret = 0;
+	if (dhdp) {
+		if (!dhdp->hang_was_sent) {
+			dhdp->hang_was_sent = 1;
+			dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+				DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
+		}
+	}
+	return ret;
+}
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		/* Report FW problem when enabled */
+		if (dhd->pub.hang_report) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+			ret = dhd_os_send_hang_message(&dhd->pub);
+#else
+			ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+		} else {
+			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+				__FUNCTION__));
+			/* Enforce bus down to stop any future traffic */
+			dhd->pub.busstate = DHD_BUS_DOWN;
+		}
+	}
+	return ret;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+}
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (dhd && dhd->pub.up) {
+		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, notify);
+#endif
+	}
+}
+
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, true);
+#endif
+	}
+}
+
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!fw || fw[0] == '\0')
+		return -EINVAL;
+
+	strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+	dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+
+#if defined(SOFTAP)
+	if (strstr(fw, "apsta") != NULL) {
+		DHD_INFO(("GOT APSTA FIRMWARE\n"));
+		ap_fw_loaded = TRUE;
+	} else {
+		DHD_INFO(("GOT STA FIRMWARE\n"));
+		ap_fw_loaded = FALSE;
+	}
+#endif 
+	return 0;
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	if (dhd)
+		mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	if (dhd)
+		mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+static void dhd_suspend_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
+{
+	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+	/* and this results in kernel asserts in internal builds */
+	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+	if (lock)
+		spin_lock_init(lock);
+	return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+	MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+	unsigned long flags = 0;
+
+	if (lock)
+		spin_lock_irqsave((spinlock_t *)lock, flags);
+
+	return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
+{
+	if (lock)
+		spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	100
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int timeout = msecs_to_jiffies(10);
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			schedule_timeout(timeout);
+			DHD_PERIM_LOCK(&dhd->pub);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	if (ntimes == 0)
+	{
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+		DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
+	}
+	return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+	if (!fp) {
+		printf("%s: open file error\n", __FUNCTION__);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+	fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+	/* free buf before return */
+	MFREE(dhd->osh, buf, size);
+	/* close file before return */
+	if (fp)
+		filp_close(fp, current->files);
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wakelock_rx_timeout_enable)
+			wake_lock_timeout(&dhd->wl_rxwake,
+				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+		if (dhd->wakelock_ctrl_timeout_enable)
+			wake_lock_timeout(&dhd->wl_ctrlwake,
+				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_rx_timeout_enable)
+			dhd->wakelock_rx_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_ctrl_timeout_enable)
+			dhd->wakelock_ctrl_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (wake_lock_active(&dhd->wl_ctrlwake))
+			wake_unlock(&dhd->wl_ctrlwake);
+#endif
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+		if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_stay_awake(pub);
+#endif
+		}
+		dhd->wakelock_counter++;
+		ret = dhd->wakelock_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter > 0) {
+			dhd->wakelock_counter--;
+			if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+				wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+				dhd_bus_dev_pm_relax(pub);
+#endif
+			}
+			ret = dhd->wakelock_counter;
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+	KERNEL_VERSION(2, 6, 36)))
+	dhd_info_t *dhd;
+
+	if (!pub)
+		return 0;
+	dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+		(wake_lock_active(&dhd->wl_wdwake))))
+		return 1;
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+		return 1;
+#endif
+	return 0;
+}
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+		/* if wakelock_wd_counter was never used : lock it at once */
+		if (!dhd->wakelock_wd_counter)
+			wake_lock(&dhd->wl_wdwake);
+#endif
+		dhd->wakelock_wd_counter++;
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_wd_counter) {
+			dhd->wakelock_wd_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_unlock(&dhd->wl_wdwake);
+#endif
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+		if (dhd->waive_wakelock == FALSE) {
+			/* record current lock status */
+			dhd->wakelock_before_waive = dhd->wakelock_counter;
+			dhd->waive_wakelock = TRUE;
+		}
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!dhd)
+		return 0;
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+	if (!dhd->waive_wakelock)
+		goto exit;
+
+	dhd->waive_wakelock = FALSE;
+	/* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+	 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+	 * the lock in between, do the same by calling wake_unlock or pm_relax
+	 */
+	if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+	} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+	}
+	dhd->wakelock_before_waive = 0;
+exit:
+	ret = dhd->wakelock_wd_counter;
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	return ret;
+}
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+	if (!pub)
+		return FALSE;
+	return pub->up;
+}
+
+#if defined(BCMSDIO)
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+	int i;
+
+	i = snprintf(info_string, sizeof(info_string),
+		"  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
+
+	if (!dhdp)
+		return;
+
+	i = snprintf(&info_string[i], sizeof(info_string) - i,
+		"\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
+		dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
+}
+#endif /* defined(BCMSDIO) */
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+	int ifidx;
+	int ret = 0;
+	dhd_info_t *dhd = NULL;
+
+	if (!net || !DEV_PRIV(net)) {
+		DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd = DHD_DEV_INFO(net);
+	if (!dhd)
+		return -EINVAL;
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+	dhd_check_hang(net, &dhd->pub, ret);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+	struct net_device *net;
+
+	net = dhd_idx2net(dhdp, ifidx);
+	if (!net) {
+		DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+		return -EINVAL;
+	}
+
+	return dhd_check_hang(net, dhdp, ret);
+}
+
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
+{
+	return dhdp->info->unit;
+}
+
+
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+	return;
+}
+
+void dhd_wlfc_plat_deinit(void *dhd)
+{
+	return;
+}
+
+bool dhd_wlfc_skip_fc(void)
+{
+	return FALSE;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+	struct dentry	*debugfs_dir;
+	struct dentry	*debugfs_mem;
+	dhd_pub_t 	*dhdp;
+	uint32 		size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+	ssize_t rval;
+	uint32 tmp;
+	loff_t pos = *ppos;
+	size_t ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+	ret = copy_to_user(ubuf, &tmp, 4);
+	if (ret == count)
+		return -EFAULT;
+
+	count -= ret;
+	*ppos = pos + count;
+	rval = count;
+
+	return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	size_t ret;
+	uint32 buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+	if (ret == count)
+		return -EFAULT;
+
+	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+	return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	loff_t pos = -1;
+
+	switch (whence) {
+		case 0:
+			pos = off;
+			break;
+		case 1:
+			pos = file->f_pos + off;
+			break;
+		case 2:
+			pos = g_dbgfs.size - off;
+	}
+	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+	.read   = dhd_dbg_state_read,
+	.write	= dhd_debugfs_write,
+	.open   = dhd_dbg_state_open,
+	.llseek	= dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+	if (g_dbgfs.debugfs_dir) {
+		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+			NULL, &dhd_dbg_state_ops);
+	}
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+	int err;
+
+	g_dbgfs.dhdp = dhdp;
+	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+	if (IS_ERR(g_dbgfs.debugfs_dir)) {
+		err = PTR_ERR(g_dbgfs.debugfs_dir);
+		g_dbgfs.debugfs_dir = NULL;
+		return;
+	}
+
+	dhd_dbg_create();
+
+	return;
+}
+
+void dhd_dbg_remove(void)
+{
+	debugfs_remove(g_dbgfs.debugfs_mem);
+	debugfs_remove(g_dbgfs.debugfs_dir);
+
+	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct sk_buff *skb;
+	uint32 htsf = 0;
+	uint16 dport = 0, oldmagic = 0xACAC;
+	char *p1;
+	htsfts_t ts;
+
+	/*  timestamp packet  */
+
+	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/*		memcpy(&proto, p1+26, 4);  	*/
+		memcpy(&dport, p1+40, 2);
+/* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
+		dport = ntoh16(dport);
+	}
+
+	/* timestamp only if  icmp or udb iperf with port 5555 */
+/*	if (proto == 17 && dport == tsport) { */
+	if (dport >= tsport && dport <= tsport + 20) {
+
+		skb = (struct sk_buff *) pktbuf;
+
+		htsf = dhd_get_htsf(dhd, 0);
+		memset(skb->data + 44, 0, 2); /* clear checksum */
+		memcpy(skb->data+82, &oldmagic, 2);
+		memcpy(skb->data+84, &htsf, 4);
+
+		memset(&ts, 0, sizeof(htsfts_t));
+		ts.magic  = HTSFMAGIC;
+		ts.prio   = PKTPRIO(pktbuf);
+		ts.seqnum = htsf_seqnum++;
+		ts.c10    = get_cycles();
+		ts.t10    = htsf;
+		ts.endmagic = HTSFENDMAGIC;
+
+		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+	}
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+	int pktcnt = 0, curval = 0, i;
+	for (i = 0; i < (NUMBIN-2); i++) {
+		curval += 500;
+		printf("%d ",  his->bin[i]);
+		pktcnt += his->bin[i];
+	}
+	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+		his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+	int i, binval = 0;
+
+	if (value < 0) {
+		histo->bin[NUMBIN-1]++;
+		return;
+	}
+	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
+		histo->bin[NUMBIN-2] = value;
+
+	for (i = 0; i < (NUMBIN-2); i++) {
+		binval += 500; /* 500m s bins */
+		if (value <= binval) {
+			histo->bin[i]++;
+			return;
+		}
+	}
+	histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	char *p1;
+	uint16 old_magic;
+	int d1, d2, d3, end2end;
+	htsfts_t *htsf_ts;
+	uint32 htsf;
+
+	skb = PKTTONATIVE(dhdp->osh, pktbuf);
+	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+		memcpy(&old_magic, p1+78, 2);
+		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+	}
+	else
+		return;
+
+	if (htsf_ts->magic == HTSFMAGIC) {
+		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+		htsf_ts->cE0 = get_cycles();
+	}
+
+	if (old_magic == 0xACAC) {
+
+		tspktcnt++;
+		htsf = dhd_get_htsf(dhd, 0);
+		memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+		memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+		d1 = ts[tsidx].t2 - ts[tsidx].t1;
+		d2 = ts[tsidx].t3 - ts[tsidx].t2;
+		d3 = ts[tsidx].t4 - ts[tsidx].t3;
+		end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+		sorttobin(d1, &vi_d1);
+		sorttobin(d2, &vi_d2);
+		sorttobin(d3, &vi_d3);
+		sorttobin(end2end, &vi_d4);
+
+		if (end2end > 0 && end2end >  maxdelay) {
+			maxdelay = end2end;
+			maxdelaypktno = tspktcnt;
+			memcpy(&maxdelayts, &ts[tsidx], 16);
+		}
+		if (++tsidx >= TSMAX)
+			tsidx = 0;
+	}
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+	uint32 htsf = 0, cur_cycle, delta, delta_us;
+	uint32    factor, baseval, baseval2;
+	cycles_t t;
+
+	t = get_cycles();
+	cur_cycle = t;
+
+	if (cur_cycle >  dhd->htsf.last_cycle)
+		delta = cur_cycle -  dhd->htsf.last_cycle;
+	else {
+		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
+	}
+
+	delta = delta >> 4;
+
+	if (dhd->htsf.coef) {
+		/* times ten to get the first digit */
+	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+		baseval  = (delta*10)/factor;
+		baseval2 = (delta*10)/(factor+1);
+		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+	}
+	else {
+		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+	}
+
+	return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+	int i, max = 0;
+	int d1, d2, d3, d4, d5;
+
+	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
+	for (i = 0; i < TSMAX; i++) {
+		d1 = ts[i].t2 - ts[i].t1;
+		d2 = ts[i].t3 - ts[i].t2;
+		d3 = ts[i].t4 - ts[i].t3;
+		d4 = ts[i].t4 - ts[i].t1;
+		d5 = ts[max].t4-ts[max].t1;
+		if (d4 > d5 && d4 > 0)  {
+			max = i;
+		}
+		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
+			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+			d1, d2, d3, d4, i);
+	}
+
+	printf("current idx = %d \n", tsidx);
+
+	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
+	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+	maxdelayts.t2 - maxdelayts.t1,
+	maxdelayts.t3 - maxdelayts.t2,
+	maxdelayts.t4 - maxdelayts.t3,
+	maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+	uint32 s1, s2;
+
+	struct tsf {
+		uint32 low;
+		uint32 high;
+	} tsf_buf;
+
+	memset(&ioc, 0, sizeof(ioc));
+	memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "tsf", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	s1 = dhd_get_htsf(dhd, 0);
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: tsf is not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+		return ret;
+	}
+	s2 = dhd_get_htsf(dhd, 0);
+
+	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+		dhd->htsf.coefdec2, s2-tsf_buf.low);
+	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+	return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+	static ulong  cur_cycle = 0, prev_cycle = 0;
+	uint32 htsf, tsf_delta = 0;
+	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+	ulong b, a;
+	cycles_t t;
+
+	/* cycles_t in inlcude/mips/timex.h */
+
+	t = get_cycles();
+
+	prev_cycle = cur_cycle;
+	cur_cycle = t;
+
+	if (cur_cycle > prev_cycle)
+		cyc_delta = cur_cycle - prev_cycle;
+	else {
+		b = cur_cycle;
+		a = prev_cycle;
+		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+	}
+
+	if (data == NULL)
+		printf(" tsf update ata point er is null \n");
+
+	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+	memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+	if (cur_tsf.low == 0) {
+		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+		return;
+	}
+
+	if (cur_tsf.low > prev_tsf.low)
+		tsf_delta = (cur_tsf.low - prev_tsf.low);
+	else {
+		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+		 cur_tsf.low, prev_tsf.low));
+		if (cur_tsf.high > prev_tsf.high) {
+			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
+		}
+		else
+			return; /* do not update */
+	}
+
+	if (tsf_delta)  {
+		hfactor = cyc_delta / tsf_delta;
+		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
+		dec1 =  tmp/tsf_delta;
+		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
+		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+		if (dec3 > 4) {
+			if (dec2 == 9) {
+				dec2 = 0;
+				if (dec1 == 9) {
+					dec1 = 0;
+					hfactor++;
+				}
+				else {
+					dec1++;
+				}
+			}
+			else
+				dec2++;
+		}
+	}
+
+	if (hfactor) {
+		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
+		dhd->htsf.coef = hfactor;
+		dhd->htsf.last_cycle = cur_cycle;
+		dhd->htsf.last_tsf = cur_tsf.low;
+		dhd->htsf.coefdec1 = dec1;
+		dhd->htsf.coefdec2 = dec2;
+	}
+	else {
+		htsf = prev_tsf.low;
+	}
+}
+
+#endif /* WLMEDIA_HTSF */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+	int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+	if (!(dhd->chan_isvht80)) {
+		DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+		return;
+	}
+
+	if (DPC_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(DPC_CPUCORE));
+			} else {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+				return;
+			}
+			if (e_dpc < 0)
+				OSL_SLEEP(1);
+		} while (e_dpc < 0);
+	}
+	if (RXF_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(RXF_CPUCORE));
+			} else {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+				return;
+			}
+			if (e_rxf < 0)
+				OSL_SLEEP(1);
+		} while (e_rxf < 0);
+	}
+#ifdef DHD_OF_SUPPORT
+	interrupt_set_cpucore(set);
+#endif /* DHD_OF_SUPPORT */
+	DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+	return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+static int dhd_port_list_match(int port)
+{
+	int i;
+	for (i = 0; i < MAX_TARGET_PORTS; i++) {
+		if (target_ports[i] == port)
+			return 1;
+	}
+	return 0;
+}
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
+{
+	struct iphdr *ipheader;
+	struct tcphdr *tcpheader;
+	uint16 win_size;
+	int32 incremental_checksum;
+
+	if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
+		return;
+	if (skb == NULL || skb->data == NULL)
+		return;
+
+	ipheader = (struct iphdr*)(skb->data);
+
+	if (ipheader->protocol == IPPROTO_TCP) {
+		tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
+		if (tcpheader) {
+			win_size = ntoh16(tcpheader->window);
+			if (win_size < MIN_TCP_WIN_SIZE &&
+				dhd_port_list_match(ntoh16(tcpheader->dest))) {
+				incremental_checksum = ntoh16(tcpheader->check);
+				incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
+				if (incremental_checksum < 0)
+					--incremental_checksum;
+				tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
+				tcpheader->check = hton16((unsigned short)incremental_checksum);
+			}
+		}
+		skb_push(skb, (ipheader->ihl)<<2);
+	}
+}
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ifp->ap_isolate = val;
+
+	return 0;
+}
+
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+	return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+static int
+dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
+	uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
+{
+	uint8 *frame = PKTDATA(pub->osh, pktbuf);
+	int length = PKTLEN(pub->osh, pktbuf);
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	bool snap = FALSE;
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
+		           __FUNCTION__, length));
+		return BCME_ERROR;
+	} else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+		snap = TRUE;
+	} else {
+		DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
+		           __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	ethertype = ntoh16_ua(pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
+			DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
+			          __FUNCTION__, length));
+			return BCME_ERROR;
+		}
+
+		ethertype = ntoh16_ua(pt);
+	}
+
+	*data_ptr = pt + ETHER_TYPE_LEN;
+	*len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
+	*et_ptr = ethertype;
+	*snap_ptr = snap;
+	return BCME_OK;
+}
+
+static int
+dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
+	uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
+{
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int iplen;			/* IP frame length */
+	uint16 ethertype, iphdrlen, ippktlen;
+	uint16 iph_frag;
+	uint8 prot;
+	bool snap;
+
+	if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
+	    &iplen, &ethertype, &snap) != 0)
+		return BCME_ERROR;
+
+	if (ethertype != ETHER_TYPE_IP) {
+		return BCME_ERROR;
+	}
+
+	/* We support IPv4 only */
+	if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
+		return BCME_ERROR;
+	}
+
+	/* Header length sanity */
+	iphdrlen = IPV4_HLEN(iph);
+
+	/*
+	 * Packet length sanity; sometimes we receive eth-frame size bigger
+	 * than the IP content, which results in a bad tcp chksum
+	 */
+	ippktlen = ntoh16(iph->tot_len);
+	if (ippktlen < iplen) {
+
+		DHD_INFO(("%s: extra frame length ignored\n",
+		          __FUNCTION__));
+		iplen = ippktlen;
+	} else if (ippktlen > iplen) {
+		DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
+		           __FUNCTION__, ippktlen - iplen));
+		return BCME_ERROR;
+	}
+
+	if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
+		DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
+		           __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
+		return BCME_ERROR;
+	}
+
+	/*
+	 * We don't handle fragmented IP packets.  A first frag is indicated by the MF
+	 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
+	 */
+	iph_frag = ntoh16(iph->frag);
+
+	if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
+		DHD_INFO(("DHD:%s: IP fragment not handled\n",
+		           __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	prot = IPV4_PROT(iph);
+
+	*data_ptr = (((uint8 *)iph) + iphdrlen);
+	*len_ptr = iplen - iphdrlen;
+	*prot_ptr = prot;
+	return BCME_OK;
+}
+
+/** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet	*/
+static
+int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+	dhd_sta_t* stainfo;
+	uint8 *eh = PKTDATA(pub->osh, pktbuf);
+	uint8 *udph;
+	uint8 *dhcp;
+	uint8 *chaddr;
+	int udpl;
+	int dhcpl;
+	uint16 port;
+	uint8 prot;
+
+	if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
+	    return BCME_ERROR;
+	if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
+		return BCME_ERROR;
+	if (prot != IP_PROT_UDP)
+		return BCME_ERROR;
+	/* check frame length, at least UDP_HDR_LEN */
+	if (udpl < UDP_HDR_LEN) {
+		DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
+		    __FUNCTION__));
+		return BCME_ERROR;
+	}
+	port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
+	/* only process DHCP packets from server to client */
+	if (port != DHCP_PORT_CLIENT)
+		return BCME_ERROR;
+
+	dhcp = udph + UDP_HDR_LEN;
+	dhcpl = udpl - UDP_HDR_LEN;
+
+	if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
+		DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
+		    __FUNCTION__));
+		return BCME_ERROR;
+	}
+	/* only process DHCP reply(offer/ack) packets */
+	if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+		return BCME_ERROR;
+	chaddr = dhcp + DHCP_CHADDR_OFFSET;
+	stainfo = dhd_find_sta(pub, ifidx, chaddr);
+	if (stainfo) {
+		bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+		return BCME_OK;
+	}
+	return BCME_ERROR;
+}
+#endif /* DHD_UNICAST_DHD */
+#ifdef DHD_L2_FILTER
+/* Check if packet type is ICMP ECHO */
+static
+int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+	struct bcmicmp_hdr *icmph;
+	int udpl;
+	uint8 prot;
+
+	if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
+		return BCME_ERROR;
+	if (prot == IP_PROT_ICMP) {
+		if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
+			return BCME_OK;
+	}
+	return BCME_ERROR;
+}
+#endif /* DHD_L2_FILTER */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/net/wireless/bcmdhd/dhd_linux.h
new file mode 100644
index 0000000000000000000000000000000000000000..7bcc461e7272686c5d9f660c026f8b3e08fc6c5d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.h
@@ -0,0 +1,81 @@
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+#define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
+
+typedef struct wifi_adapter_info {
+	const char	*name;
+	uint		irq_num;
+	uint		intr_flags;
+	const char	*fw_path;
+	const char	*nv_path;
+	void		*wifi_plat_data;	/* wifi ctrl func, for backward compatibility */
+	uint		bus_type;
+	uint		bus_num;
+	uint		slot_num;
+} wifi_adapter_info_t;
+
+typedef struct bcmdhd_wifi_platdata {
+	uint				num_adapters;
+	wifi_adapter_info_t	*adapters;
+} bcmdhd_wifi_platdata_t;
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+typedef struct dhd_sta {
+	uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
+	void * ifp;             /* associated dhd_if */
+	struct ether_addr ea;   /* stations ethernet mac address */
+	struct list_head list;  /* link into dhd_if::sta_list */
+	int idx;                /* index of self in dhd_pub::sta_pool[] */
+	int ifidx;              /* index of interface in dhd */
+} dhd_sta_t;
+typedef dhd_sta_t dhd_sta_pool_t;
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+	uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+#endif /* DHD_WMF */
+#endif /* __DHD_LINUX_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
new file mode 100644
index 0000000000000000000000000000000000000000..3b809903e2dc4b3accc14a1ba3f2e0d284f9b2c0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
@@ -0,0 +1,797 @@
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/wlan_plat.h>
+#endif
+#ifdef CONFIG_DTS
+#include<linux/regulator/consumer.h>
+#include<linux/of_gpio.h>
+#endif /* CONFIG_DTS */
+
+#if !defined(CONFIG_WIFI_CONTROL_FUNC)
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	void *(*get_country_code)(char *ccode);
+};
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+#define WIFI_PLAT_NAME		"bcmdhd_wlan"
+#define WIFI_PLAT_NAME2		"bcm4329_wlan"
+#define WIFI_PLAT_EXT		"bcmdhd_wifi_platform"
+
+#ifdef CONFIG_DTS
+struct regulator *wifi_regulator = NULL;
+#endif /* CONFIG_DTS */
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+#if !defined(CONFIG_DTS)
+#if defined(DHD_OF_SUPPORT)
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+struct resource dhd_wlan_resources = {0};
+struct wifi_platform_data dhd_wlan_control = {0};
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+#endif /* !defind(CONFIG_DTS) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(void);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+	int i;
+
+	if (dhd_wifi_platdata == NULL)
+		return NULL;
+
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+		if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+			(adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+			(adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+			DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+			return adapter;
+		}
+	}
+	return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+	void *alloc_ptr = NULL;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->mem_prealloc) {
+		alloc_ptr = plat_data->mem_prealloc(section, size);
+		if (alloc_ptr) {
+			DHD_INFO(("success alloc section %d\n", section));
+			if (size != 0L)
+				bzero(alloc_ptr, size);
+			return alloc_ptr;
+		}
+	}
+
+	DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section));
+	return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+	if (adapter == NULL)
+		return -1;
+	if (irq_flags_ptr)
+		*irq_flags_ptr = adapter->intr_flags;
+	return adapter->irq_num;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+	int err = 0;
+#ifdef CONFIG_DTS
+	if (on) {
+		err = regulator_enable(wifi_regulator);
+		is_power_on = TRUE;
+	}
+	else {
+		err = regulator_disable(wifi_regulator);
+		is_power_on = FALSE;
+	}
+	if (err < 0)
+		DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__));
+#else
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+		if (on) {
+			printf("WiFi: trying to acquire BT lock\n");
+			if (bcm_bt_lock(lock_cookie_wifi) != 0)
+				printf("** WiFi: timeout in acquiring bt lock**\n");
+			printf("%s: btlock acquired\n", __FUNCTION__);
+		}
+		else {
+			/* For a exceptional case, release btlock */
+			bcm_bt_unlock(lock_cookie_wifi);
+		}
+#endif /* ENABLE_4335BT_WAR */
+
+		err = plat_data->set_power(on);
+	}
+
+	if (msec && !err)
+		OSL_SLEEP(msec);
+
+	if (on && !err)
+		is_power_on = TRUE;
+	else
+		is_power_on = FALSE;
+
+#endif /* CONFIG_DTS */
+
+	return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+	if (plat_data->set_carddetect) {
+		err = plat_data->set_carddetect(device_present);
+	}
+	return err;
+
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
+{
+	struct wifi_platform_data *plat_data;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+	if (!buf || !adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->get_mac_addr) {
+		return plat_data->get_mac_addr(buf);
+	}
+	return -EOPNOTSUPP;
+}
+
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+{
+	/* get_country_code was added after 2.6.39 */
+#if	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	struct wifi_platform_data *plat_data;
+
+	if (!ccode || !adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (plat_data->get_country_code) {
+		return plat_data->get_country_code(ccode);
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+	return NULL;
+}
+
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	wifi_adapter_info_t *adapter;
+#ifdef CONFIG_DTS
+	int irq, gpio;
+#endif /* CONFIG_DTS */
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+	if (resource == NULL)
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+	if (resource) {
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+	}
+
+#ifdef CONFIG_DTS
+	wifi_regulator = regulator_get(&pdev->dev, "wlreg_on");
+	if (wifi_regulator == NULL) {
+		DHD_ERROR(("%s regulator is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* This is to get the irq for the OOB */
+	gpio = of_get_gpio(pdev->dev.of_node, 0);
+
+	if (gpio < 0) {
+		DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__));
+		return -1;
+	}
+	irq = gpio_to_irq(gpio);
+	if (irq < 0) {
+		DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__));
+		return -1;
+	}
+	adapter->irq_num = irq;
+
+	/* need to change the flags according to our requirement */
+	adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
+		IORESOURCE_IRQ_SHAREABLE;
+#endif /* CONFIG_DTS */
+
+	wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	return wifi_plat_dev_probe_ret;
+}
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	if (is_power_on) {
+#ifdef BCMPCIE
+		wifi_platform_bus_enumerate(adapter, FALSE);
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+#else
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+#endif /* BCMPCIE */
+	}
+
+#ifdef CONFIG_DTS
+	regulator_put(wifi_regulator);
+#endif /* CONFIG_DTS */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+		bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+#ifdef CONFIG_DTS
+static const struct of_device_id wifi_device_dt_match[] = {
+	{ .compatible = "android,bcmdhd_wlan", },
+	{},
+};
+#endif /* CONFIG_DTS */
+static struct platform_driver wifi_platform_dev_driver = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name   = WIFI_PLAT_NAME,
+#ifdef CONFIG_DTS
+	.of_match_table = wifi_device_dt_match,
+#endif /* CONFIG_DTS */
+	}
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name	= WIFI_PLAT_NAME2,
+	}
+};
+
+static int wifi_platdev_match(struct device *dev, void *data)
+{
+	char *name = (char*)data;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (strcmp(pdev->name, name) == 0) {
+		DHD_ERROR(("found wifi platform device %s\n", name));
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+	int err = 0;
+	struct device *dev1, *dev2;
+	wifi_adapter_info_t *adapter;
+
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+
+#if !defined(CONFIG_DTS)
+	if (!dts_enabled) {
+		if (dev1 == NULL && dev2 == NULL) {
+			DHD_ERROR(("no wifi platform data, skip\n"));
+			return -ENXIO;
+		}
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+	/* multi-chip support not enabled, build one adapter information for
+	 * DHD (either SDIO, USB or PCIe)
+	 */
+	adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+	adapter->name = "DHD generic adapter";
+	adapter->bus_type = -1;
+	adapter->bus_num = -1;
+	adapter->slot_num = -1;
+	adapter->irq_num = -1;
+	is_power_on = FALSE;
+	wifi_plat_dev_probe_ret = 0;
+	dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+	dhd_wifi_platdata->num_adapters = 1;
+	dhd_wifi_platdata->adapters = adapter;
+
+	if (dev1) {
+		err = platform_driver_register(&wifi_platform_dev_driver);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+	if (dev2) {
+		err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+
+#if !defined(CONFIG_DTS)
+	if (dts_enabled) {
+		struct resource *resource;
+		adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+		resource = &dhd_wlan_resources;
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+		wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+
+#ifdef CONFIG_DTS
+	wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
+#endif /* CONFIG_DTS */
+
+	/* return probe function's return value if registeration succeeded */
+	return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+	struct device *dev1, *dev2;
+
+#ifdef CONFIG_DTS
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+	platform_driver_unregister(&wifi_platform_dev_driver);
+#else
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+	if (!dts_enabled)
+		if (dev1 == NULL && dev2 == NULL)
+			return;
+
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+	if (dev1)
+		platform_driver_unregister(&wifi_platform_dev_driver);
+	if (dev2)
+		platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+	if (dts_enabled) {
+		wifi_adapter_info_t *adapter;
+		adapter = &dhd_wifi_platdata->adapters[0];
+		if (is_power_on) {
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		}
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+	kfree(dhd_wifi_platdata->adapters);
+	dhd_wifi_platdata->adapters = NULL;
+	dhd_wifi_platdata->num_adapters = 0;
+	kfree(dhd_wifi_platdata);
+	dhd_wifi_platdata = NULL;
+}
+
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+	return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	int i;
+	wifi_adapter_info_t *adapter;
+	ASSERT(dhd_wifi_platdata != NULL);
+
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+	return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+	.probe          = bcmdhd_wifi_plat_dev_drv_probe,
+	.remove         = bcmdhd_wifi_plat_dev_drv_remove,
+	.driver         = {
+	.name   = WIFI_PLAT_EXT,
+	}
+};
+
+int dhd_wifi_platform_register_drv(void)
+{
+	int err = 0;
+	struct device *dev;
+
+	/* register Broadcom wifi platform data driver if multi-chip is enabled,
+	 * otherwise use Android style wifi platform data (aka wifi control function)
+	 * if it exists
+	 *
+	 * to support multi-chip DHD, Broadcom wifi platform data device must
+	 * be added in kernel early boot (e.g. board config file).
+	 */
+	if (cfg_multichip) {
+		dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+		if (dev == NULL) {
+			DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+			return -ENXIO;
+		}
+		err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+	} else {
+		err = wifi_ctrlfunc_register_drv();
+
+		/* no wifi ctrl func either, load bus directly and ignore this error */
+		if (err) {
+			if (err == -ENXIO) {
+				/* wifi ctrl function does not exist */
+				err = dhd_wifi_platform_load();
+			} else {
+				/* unregister driver due to initialization failure */
+				wifi_ctrlfunc_unregister_drv();
+			}
+		}
+	}
+
+	return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+	int err = 0;
+	int i;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+
+	if (dhd_wifi_platdata == NULL) {
+		err = dhd_bus_register();
+	} else {
+		if (dhd_download_fw_on_driverload) {
+			/* power up all adapters */
+			for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+				int retry = POWERUP_MAX_RETRY;
+				adapter = &dhd_wifi_platdata->adapters[i];
+
+				DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+				DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+					adapter->irq_num, adapter->intr_flags, adapter->fw_path,
+					adapter->nv_path));
+				DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+					adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+				do {
+					err = wifi_platform_set_power(adapter,
+						TRUE, WIFI_TURNON_DELAY);
+					if (err) {
+						DHD_ERROR(("failed to power up %s,"
+							" %d retry left\n",
+							adapter->name, retry));
+						/* WL_REG_ON state unknown, Power off forcely */
+						wifi_platform_set_power(adapter,
+							FALSE, WIFI_TURNOFF_DELAY);
+						continue;
+					} else {
+						err = wifi_platform_bus_enumerate(adapter, TRUE);
+						if (err) {
+							DHD_ERROR(("failed to enumerate bus %s, "
+								"%d retry left\n",
+								adapter->name, retry));
+							wifi_platform_set_power(adapter, FALSE,
+								WIFI_TURNOFF_DELAY);
+						} else {
+							break;
+						}
+					}
+				} while (retry--);
+
+				if (!retry) {
+					DHD_ERROR(("failed to power up %s, max retry reached**\n",
+						adapter->name));
+					return -ENODEV;
+				}
+			}
+		}
+
+		err = dhd_bus_register();
+
+		if (err) {
+			DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__));
+			if (dhd_download_fw_on_driverload) {
+				/* power down all adapters */
+				for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+					adapter = &dhd_wifi_platdata->adapters[i];
+					wifi_platform_bus_enumerate(adapter, FALSE);
+					wifi_platform_set_power(adapter,
+						FALSE, WIFI_TURNOFF_DELAY);
+				}
+			}
+		}
+	}
+
+	return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+	return 0;
+}
+#endif /* BCMPCIE  */
+
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+	if (cfg_multichip)
+		platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+	else
+		wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(BCMLXSDMMC)
+extern struct semaphore dhd_registration_sem;
+#endif 
+
+#ifdef BCMSDIO
+static int dhd_wifi_platform_load_sdio(void)
+{
+	int i;
+	int err = 0;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+	/* Sanity check on the module parameters
+	 * - Both watchdog and DPC as tasklets are ok
+	 * - If both watchdog and DPC are threads, TX must be deferred
+	 */
+	if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+		!(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+		return -EINVAL;
+
+#if defined(BCMLXSDMMC)
+	if (dhd_wifi_platdata == NULL) {
+		DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+		return -EINVAL;
+	}
+
+	sema_init(&dhd_registration_sem, 0);
+	/* power up all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		bool chip_up = FALSE;
+		int retry = POWERUP_MAX_RETRY;
+		struct semaphore dhd_chipup_sem;
+
+		adapter = &dhd_wifi_platdata->adapters[i];
+
+		DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+		DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+			adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+		DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+			adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+		do {
+			sema_init(&dhd_chipup_sem, 0);
+			err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+			if (err) {
+				DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+					__FUNCTION__, err));
+				return err;
+			}
+			err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+			if (err) {
+				/* WL_REG_ON state unknown, Power off forcely */
+				wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+				continue;
+			} else {
+				wifi_platform_bus_enumerate(adapter, TRUE);
+				err = 0;
+			}
+
+			if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+				dhd_bus_unreg_sdio_notify();
+				chip_up = TRUE;
+				break;
+			}
+
+			DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+			dhd_bus_unreg_sdio_notify();
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		} while (retry--);
+
+		if (!chip_up) {
+			DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+			return -ENODEV;
+		}
+
+	}
+
+	err = dhd_bus_register();
+
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	/*
+	 * Wait till MMC sdio_register_driver callback called and made driver attach.
+	 * It's needed to make sync up exit from dhd insmod  and
+	 * Kernel MMC sdio device callback registration
+	 */
+	err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+		dhd_bus_unregister();
+		goto fail;
+	}
+
+	return err;
+
+fail:
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+#else
+
+	/* x86 bring-up PC needs no power-up operations */
+	err = dhd_bus_register();
+
+#endif 
+
+	return err;
+}
+#else /* BCMSDIO */
+static int dhd_wifi_platform_load_sdio(void)
+{
+	return 0;
+}
+#endif /* BCMSDIO */
+
+static int dhd_wifi_platform_load_usb(void)
+{
+	return 0;
+}
+
+static int dhd_wifi_platform_load()
+{
+	int err = 0;
+
+		wl_android_init();
+
+	if ((err = dhd_wifi_platform_load_usb()))
+		goto end;
+	else if ((err = dhd_wifi_platform_load_sdio()))
+		goto end;
+	else
+		err = dhd_wifi_platform_load_pcie();
+
+end:
+	if (err)
+		wl_android_exit();
+	else
+		wl_android_post_init();
+
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 0000000000000000000000000000000000000000..ed635b889586d466939f8cb3341a4648f8a851c3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,30 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_sched.c 457570 2014-02-23 13:54:46Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+	int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = p->policy;
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
new file mode 100644
index 0000000000000000000000000000000000000000..1df1c92db1d97f35bf33f92d449b2a86446770dd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
@@ -0,0 +1,299 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+struct dhd_deferred_event_t {
+	u8	event; /* holds the event */
+	void	*event_data; /* Holds event specific data */
+	event_handler_t event_handler;
+};
+#define DEFRD_EVT_SIZE	sizeof(struct dhd_deferred_event_t)
+
+struct dhd_deferred_wq {
+	struct work_struct	deferred_work; /* should be the first member */
+
+	/*
+	 * work events may occur simultaneously.
+	 * Can hold upto 64 low priority events and 4 high priority events
+	 */
+#define DHD_PRIO_WORK_FIFO_SIZE	(4 * sizeof(struct dhd_deferred_event_t))
+#define DHD_WORK_FIFO_SIZE	(64 * sizeof(struct dhd_deferred_event_t))
+	struct kfifo			*prio_fifo;
+	struct kfifo			*work_fifo;
+	u8				*prio_fifo_buf;
+	u8				*work_fifo_buf;
+	spinlock_t			work_lock;
+	void				*dhd_info; /* review: does it require */
+};
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+	struct kfifo *fifo;
+	gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+	fifo = kfifo_init(buf, size, flags, lock);
+#else
+	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+	if (!fifo) {
+		return NULL;
+	}
+	kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+	return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+	kfifo_free(fifo);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+	/* FC11 releases the fifo memory */
+	kfree(fifo);
+#endif
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+	struct dhd_deferred_wq	*work = NULL;
+	u8*	buf;
+	unsigned long	fifo_size = 0;
+	gfp_t	flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+	if (!dhd_info) {
+		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+		flags);
+
+	if (!work) {
+		DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+	/* initialize event fifo */
+	spin_lock_init(&work->work_lock);
+
+	/* allocate buffer to hold prio events */
+	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize prio event fifo */
+	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->prio_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	/* allocate buffer to hold work events */
+	fifo_size = DHD_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize event fifo */
+	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->work_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	work->dhd_info = dhd_info;
+	DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
+	return work;
+
+return_null:
+
+	if (work)
+		dhd_deferred_work_deinit(work);
+
+	return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+	struct dhd_deferred_wq *deferred_work = work;
+
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
+		return;
+	}
+
+	/* cancel the deferred work handling */
+	cancel_work_sync((struct work_struct *)deferred_work);
+
+	/*
+	 * free work event fifo.
+	 * kfifo_free frees locally allocated fifo buffer
+	 */
+	if (deferred_work->prio_fifo)
+		dhd_kfifo_free(deferred_work->prio_fifo);
+
+	if (deferred_work->work_fifo)
+		dhd_kfifo_free(deferred_work->work_fifo);
+
+	kfree(deferred_work);
+}
+
+/*
+ *	Prepares event to be queued
+ *	Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t event_handler, u8 priority)
+{
+	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq;
+	struct	dhd_deferred_event_t	deferred_event;
+	int	status;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		ASSERT(0);
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+		DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
+		return DHD_WQ_STS_UNKNOWN_EVENT;
+	}
+
+	/*
+	 * default element size is 1, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	deferred_event.event = event;
+	deferred_event.event_data = event_data;
+	deferred_event.event_handler = event_handler;
+
+	if (priority == DHD_WORK_PRIORITY_HIGH) {
+		status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	} else {
+		status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	if (!status) {
+		return DHD_WQ_STS_SCHED_FAILED;
+	}
+	schedule_work((struct work_struct *)deferred_wq);
+	return DHD_WQ_STS_OK;
+}
+
+static int
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event)
+{
+	int	status = 0;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	/*
+	 * default element size is 1 byte, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	/* first read  priorit event fifo */
+	status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
+		DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+
+	if (!status) {
+		/* priority fifo is empty. Now read low prio work fifo */
+		status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	return status;
+}
+
+/*
+ *	Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+	struct dhd_deferred_wq		*deferred_work = (struct dhd_deferred_wq *)work;
+	struct dhd_deferred_event_t	work_event;
+	int				status;
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		return;
+	}
+
+	do {
+		status = dhd_get_scheduled_work(deferred_work, &work_event);
+		DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
+		if (!status) {
+			DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
+			break;
+		}
+
+		if (work_event.event > DHD_MAX_WQ_EVENTS) {
+			DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
+			break;
+		}
+
+		if (work_event.event_handler) {
+			work_event.event_handler(deferred_work->dhd_info,
+				work_event.event_data, work_event.event);
+		} else {
+			DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
+		}
+	} while (1);
+	return;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
new file mode 100644
index 0000000000000000000000000000000000000000..35982ef5ca8882eab711c6a7ee2e408c7632619b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
@@ -0,0 +1,46 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_wq.h 449578 2014-01-17 13:53:20Z $
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ *	Work event definitions
+ */
+enum _wq_event {
+	DHD_WQ_WORK_IF_ADD = 1,
+	DHD_WQ_WORK_IF_DEL,
+	DHD_WQ_WORK_SET_MAC,
+	DHD_WQ_WORK_SET_MCAST_LIST,
+	DHD_WQ_WORK_IPV6_NDO,
+	DHD_WQ_WORK_HANG_MSG,
+
+	DHD_MAX_WQ_EVENTS
+};
+
+/*
+ *	Work event priority
+ */
+#define DHD_WORK_PRIORITY_LOW	0
+#define DHD_WORK_PRIORITY_HIGH	1
+
+/*
+ *	Error definitions
+ */
+#define DHD_WQ_STS_OK			 0
+#define DHD_WQ_STS_FAILED		-1	/* General failure */
+#define DHD_WQ_STS_UNINITIALIZED	-2
+#define DHD_WQ_STS_SCHED_FAILED		-3
+#define DHD_WQ_STS_UNKNOWN_EVENT	-4
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *workq);
+int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t evt_handler, u8 priority);
+#endif /* _dhd_linux_wq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_log.c b/drivers/net/wireless/bcmdhd/dhd_log.c
new file mode 100644
index 0000000000000000000000000000000000000000..a498197d65aae09d5967e7cbf3ca832e84f65de8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_log.c
@@ -0,0 +1,58 @@
+/*
+ * DHD logging module for internal debug
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_sdio.c 281456 2011-09-02 01:49:45Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <linux/inet.h>
+
+void dhd_blog(char *cp, int size)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+	static struct socket * _udpSocket = NULL;
+	struct sockaddr_in _saAddr;
+	struct iovec iov;
+	struct msghdr msg;
+	if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &_udpSocket) >= 0)
+	{
+
+		{
+			memset(&_saAddr, 0, sizeof(_saAddr));
+			_saAddr.sin_family      = AF_INET;
+			_saAddr.sin_port        = htons(7651);
+			_saAddr.sin_addr.s_addr = in_aton("10.19.74.43");
+
+			iov.iov_base = cp;
+			iov.iov_len = size;
+
+			msg.msg_name = &_saAddr;
+			msg.msg_namelen = sizeof(struct sockaddr_in);
+			msg.msg_iov = &iov;
+			msg.msg_iovlen = 1;
+			msg.msg_control = NULL;
+			msg.msg_controllen = 0;
+			msg.msg_flags = 0;
+
+			{
+				mm_segment_t fs = get_fs();
+				set_fs(get_ds());
+
+				sock_sendmsg(_udpSocket, &msg, size);
+
+				set_fs(fs);
+			}
+		}
+
+		sock_release(_udpSocket);
+	}
+#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
new file mode 100644
index 0000000000000000000000000000000000000000..66f6c51280860c29e2e056329988a319cc03d6ab
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
@@ -0,0 +1,3752 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_msgbuf.c 490973 2014-07-14 12:32:56Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmmsgbuf.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#include <siutils.h>
+
+
+#include <dhd_flowring.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <pcie_core.h>
+#include <bcmpcie.h>
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define IOCTL_HDR_LEN	12
+
+#define DEFAULT_RX_BUFFERS_TO_POST	256
+#define RXBUFPOST_THRESHOLD			32
+#define RX_BUF_BURST				16
+
+#define DHD_STOP_QUEUE_THRESHOLD	200
+#define DHD_START_QUEUE_THRESHOLD	100
+
+#define MODX(x, n)	((x) & ((n) -1))
+#define align(x, n)	(MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n)))
+#define RX_DMA_OFFSET		8
+#define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+
+#define DMA_D2H_SCRATCH_BUF_LEN	8
+#define DMA_ALIGN_LEN		4
+#define DMA_XFER_LEN_LIMIT	0x400000
+
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
+
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST			8
+#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
+
+#define DHD_PROT_FUNCS	22
+
+typedef struct dhd_mem_map {
+	void *va;
+	dmaaddr_t pa;
+	void *dmah;
+} dhd_mem_map_t;
+
+typedef struct dhd_dmaxfer {
+	dhd_mem_map_t	srcmem;
+	dhd_mem_map_t	destmem;
+	uint32		len;
+	uint32		srcdelay;
+	uint32		destdelay;
+} dhd_dmaxfer_t;
+
+#define TXP_FLUSH_NITEMS
+#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
+
+typedef struct msgbuf_ring {
+	bool		inited;
+	uint16		idx;
+	uchar		name[24];
+	dhd_mem_map_t	ring_base;
+#ifdef TXP_FLUSH_NITEMS
+	void*		start_addr;
+	uint16		pend_items_count;
+#endif /* TXP_FLUSH_NITEMS */
+	ring_mem_t	*ringmem;
+	ring_state_t	*ringstate;
+} msgbuf_ring_t;
+
+
+typedef struct dhd_prot {
+	osl_t *osh;		/* OSL handle */
+	uint32 reqid;
+	uint32 lastcmd;
+	uint32 pending;
+	uint16 rxbufpost;
+	uint16 max_rxbufpost;
+	uint16 max_eventbufpost;
+	uint16 max_ioctlrespbufpost;
+	uint16 cur_event_bufs_posted;
+	uint16 cur_ioctlresp_bufs_posted;
+	uint16 active_tx_count;
+	uint16 max_tx_count;
+	uint16 txp_threshold;
+	/* Ring info */
+	msgbuf_ring_t	*h2dring_txp_subn;
+	msgbuf_ring_t	*h2dring_rxp_subn;
+	msgbuf_ring_t	*h2dring_ctrl_subn;	/* Cbuf handle for H2D ctrl ring */
+	msgbuf_ring_t	*d2hring_tx_cpln;
+	msgbuf_ring_t	*d2hring_rx_cpln;
+	msgbuf_ring_t	*d2hring_ctrl_cpln;	/* Cbuf handle for D2H ctrl ring */
+	uint32		rx_dataoffset;
+	dhd_mem_map_t	retbuf;
+	dhd_mem_map_t	ioctbuf;	/* For holding ioct request buf */
+	dhd_mb_ring_t	mb_ring_fn;
+
+	uint32		d2h_dma_scratch_buf_len; /* For holding ioct request buf */
+	dhd_mem_map_t	d2h_dma_scratch_buf;	/* For holding ioct request buf */
+
+	uint32	h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */
+	dhd_mem_map_t 	h2d_dma_writeindx_buf;	/* For holding dma ringupd buf - submission write */
+
+	uint32	h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */
+	dhd_mem_map_t	h2d_dma_readindx_buf;	/* For holding dma ringupd buf - submission read */
+
+	uint32	d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */
+	dhd_mem_map_t	d2h_dma_writeindx_buf;	/* For holding dma ringupd buf - completion write */
+
+	uint32	d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */
+	dhd_mem_map_t	d2h_dma_readindx_buf;	/* For holding dma ringupd buf - completion read */
+
+	dhd_dmaxfer_t	dmaxfer;
+	bool		dmaxfer_in_progress;
+
+	uint16		ioctl_seq_no;
+	uint16		data_seq_no;
+	uint16		ioctl_trans_id;
+	void		*pktid_map_handle;
+	uint16		rx_metadata_offset;
+	uint16		tx_metadata_offset;
+	uint16          rx_cpln_early_upd_idx;
+} dhd_prot_t;
+
+static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf);
+
+static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd);
+static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count);
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt);
+static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len);
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	uint16 msglen, uint16 *alloced);
+static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf,
+	int ifidx);
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid);
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid);
+static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
+static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
+	uint destdelay, dhd_dmaxfer_t *dma);
+static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+
+
+
+
+#ifdef DHD_RX_CHAINING
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+	(!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+	 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+	 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+	 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+	 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+	 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+	 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+
+static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN	64
+#endif /* DHD_RX_CHAINING */
+
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
+static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item,
+	uint16 len_item, uint16 ringid);
+static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced);
+static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index);
+static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len);
+static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len);
+static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
+	NULL,
+	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
+	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
+	NULL,
+	dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+	NULL,
+	dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+	NULL,
+	dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+	NULL,
+	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
+	NULL,
+	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
+	NULL,
+	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
+	NULL,
+	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
+	NULL,
+	dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
+	NULL,
+	dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+	NULL,
+};
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define MAX_PKTID_ITEMS     (3072) /* Maximum number of pktids supported */
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returing an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map);
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle,
+                                           void *pkt);
+static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt,
+                       uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma);
+static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt,
+                                  dmaaddr_t physaddr, uint32 len, uint8 dma);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id,
+                                dmaaddr_t *physaddr, uint32 *len);
+
+/* Packet metadata saved in packet id mapper */
+typedef struct dhd_pktid_item {
+	bool        inuse;    /* tag an item to be in use */
+	uint8       dma;      /* map direction: flush or invalidate */
+	uint16      len;      /* length of mapped packet's buffer */
+	void        *pkt;     /* opaque native pointer to a packet */
+	dmaaddr_t   physaddr; /* physical address of mapped packet's buffer */
+} dhd_pktid_item_t;
+
+typedef struct dhd_pktid_map {
+    void        *osh;
+    int         items;    /* total items in map */
+    int         avail;    /* total available items */
+    int         failures; /* lockers unavailable count */
+    uint32      keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
+    dhd_pktid_item_t lockers[0];           /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+#define DHD_PKTID_INVALID               (0U)
+
+#define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
+#define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
+	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+
+#define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items))
+#define NATIVE_TO_PKTID_FINI(map)        dhd_pktid_map_fini(map)
+#define NATIVE_TO_PKTID_CLEAR(map)       dhd_pktid_map_clear(map)
+
+#define NATIVE_TO_PKTID_RSV(map, pkt)    dhd_pktid_map_reserve((map), (pkt))
+#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma) \
+	dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma)
+#define NATIVE_TO_PKTID(map, pkt, pa, len, dma) \
+	dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma)
+
+#define PKTID_TO_NATIVE(map, pktid, pa, len) \
+	dhd_pktid_map_free((map), (uint32)(pktid), \
+	                   (dmaaddr_t *)&(pa), (uint32 *)&(len))
+
+#define PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
+
+/*
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle inuse lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/* Allocate and initialize a mapper of num_items <numbered_key, locker> */
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(void *osh, uint32 num_items)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+
+	ASSERT((num_items >= 1) && num_items <= MAX_PKTID_ITEMS);
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
+
+	if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
+		           __FUNCTION__, __LINE__, dhd_pktid_map_sz));
+		return NULL;
+	}
+	bzero(map, dhd_pktid_map_sz);
+
+	map->osh = osh;
+	map->items = num_items;
+	map->avail = num_items;
+
+	map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */
+
+	for (nkey = 1; nkey <= num_items; nkey++) { /* locker #0 is reserved */
+		map->keys[nkey] = nkey; /* populate with unique keys */
+		map->lockers[nkey].inuse = FALSE;
+	}
+
+	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+}
+
+/*
+ * Retrieve all allocated keys and free all <numbered_key, locker>.
+ * Freeing implies: unmapping the buffers and freeing the native packet
+ * This could have been a callback registered with the pktid mapper.
+ */
+static void
+dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle)
+{
+	void *osh;
+	int nkey;
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+	dhd_pktid_item_t *locker;
+
+	if (handle == NULL)
+		return;
+
+	map = (dhd_pktid_map_t *)handle;
+	osh = map->osh;
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+
+	nkey = 1; /* skip reserved KEY #0, and start from 1 */
+	locker = &map->lockers[nkey];
+
+	for (; nkey <= map->items; nkey++, locker++) {
+		if (locker->inuse == TRUE) { /* numbered key still in use */
+			locker->inuse = FALSE; /* force open the locker */
+
+			{   /* This could be a callback registered with dhd_pktid_map */
+				DMA_UNMAP(osh, locker->physaddr, locker->len,
+				          locker->dma, 0, 0);
+				PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+			}
+		}
+	}
+
+	MFREE(osh, handle, dhd_pktid_map_sz);
+}
+
+static void
+dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle)
+{
+	void *osh;
+	int nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+
+	if (handle == NULL)
+		return;
+
+	map = (dhd_pktid_map_t *)handle;
+	osh = map->osh;
+	map->failures = 0;
+
+	nkey = 1; /* skip reserved KEY #0, and start from 1 */
+	locker = &map->lockers[nkey];
+
+	for (; nkey <= map->items; nkey++, locker++) {
+		map->keys[nkey] = nkey; /* populate with unique keys */
+		if (locker->inuse == TRUE) { /* numbered key still in use */
+			locker->inuse = FALSE; /* force open the locker */
+			DHD_TRACE(("%s free id%d\n", __FUNCTION__, nkey));
+			DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len,
+				locker->dma, 0, 0);
+			PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+		}
+	}
+	map->avail = map->items;
+}
+
+/* Get the pktid free count */
+static INLINE uint32 BCMFASTPATH
+dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
+{
+	dhd_pktid_map_t *map;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	return map->avail;
+}
+
+/*
+ * Allocate locker, save pkt contents, and return the locker's numbered key.
+ * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
+ * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
+ * implying a depleted pool of pktids.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	if (map->avail <= 0) { /* no more pktids to allocate */
+		map->failures++;
+		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+		return DHD_PKTID_INVALID; /* failed alloc request */
+	}
+	ASSERT(map->avail <= map->items);
+
+	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+	map->avail--;
+
+	locker = &map->lockers[nkey]; /* save packet metadata in locker */
+	locker->inuse = TRUE; /* reserve this locker */
+	locker->pkt = pkt;
+
+	ASSERT(nkey != DHD_PKTID_INVALID);
+	return nkey; /* return locker's numbered key */
+}
+
+static INLINE void
+dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey,
+                   dmaaddr_t physaddr, uint32 len, uint8 dma)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+	locker = &map->lockers[nkey];
+	ASSERT(locker->pkt == pkt);
+
+	locker->dma = dma; /* store contents in locker */
+	locker->physaddr = physaddr;
+	locker->len = (uint16)len; /* 16bit len */
+}
+
+static uint32 BCMFASTPATH
+dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt,
+                    dmaaddr_t physaddr, uint32 len, uint8 dma)
+{
+	uint32 nkey = dhd_pktid_map_reserve(handle, pkt);
+	if (nkey != DHD_PKTID_INVALID) {
+		dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma);
+	}
+	return nkey;
+}
+
+/*
+ * Given a numbered key, return the locker contents.
+ * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
+ * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
+ * value. Only a previously allocated pktid may be freed.
+ */
+static void * BCMFASTPATH
+dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey,
+                   dmaaddr_t *physaddr, uint32 *len)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+
+	map = (dhd_pktid_map_t *)handle;
+	ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+	locker = &map->lockers[nkey];
+
+	if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */
+		DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
+		           __FUNCTION__, __LINE__, nkey));
+		ASSERT(locker->inuse != FALSE);
+		return NULL;
+	}
+
+	map->avail++;
+	map->keys[map->avail] = nkey; /* make this numbered key available */
+	locker->inuse = FALSE; /* open and free Locker */
+
+	*physaddr = locker->physaddr; /* return contents of locker */
+	*len = (uint32)locker->len;
+
+	return locker->pkt;
+}
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+int dhd_prot_attach(dhd_pub_t *dhd)
+{
+	uint alloced = 0;
+
+	dhd_prot_t *prot;
+
+	/* Allocate prot structure */
+	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
+		sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(prot, 0, sizeof(*prot));
+
+	prot->osh = dhd->osh;
+	dhd->prot = prot;
+
+	/* DMAing ring completes supported? FALSE by default  */
+	dhd->dma_d2h_ring_upd_support = FALSE;
+	dhd->dma_h2d_ring_upd_support = FALSE;
+
+	/* Ring Allocations */
+	/* 1.0	 H2D	TXPOST ring */
+	if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp",
+		H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+		BCMPCIE_H2D_TXFLOWRINGID))) {
+		DHD_ERROR(("%s: kmalloc for H2D    TXPOST ring  failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* 2.0	 H2D	RXPOST ring */
+	if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp",
+		H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+		BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) {
+		DHD_ERROR(("%s: kmalloc for H2D    RXPOST ring  failed\n", __FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 3.0	 H2D	CTRL_SUBMISSION ring */
+	if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl",
+		H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+		BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) {
+		DHD_ERROR(("%s: kmalloc for H2D    CTRL_SUBMISSION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 4.0	 D2H	TX_COMPLETION ring */
+	if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl",
+		D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_TX_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    TX_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 5.0	 D2H	RX_COMPLETION ring */
+	if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl",
+		D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_RX_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    RX_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 6.0	 D2H	CTRL_COMPLETION ring */
+	if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl",
+		D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    CTRL_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+	}
+
+	/* Return buffer for ioctl */
+	prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+		&alloced, &prot->retbuf.pa, &prot->retbuf.dmah);
+	if (prot->retbuf.va ==  NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+	OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+
+	/* IOCTL request buffer */
+	prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+		&alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah);
+
+	if (prot->ioctbuf.va ==  NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+
+	/* Scratch buffer for dma rx offset */
+	prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN;
+	prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN,
+		DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa,
+		&prot->d2h_dma_scratch_buf.dmah);
+
+	if (prot->d2h_dma_scratch_buf.va == NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+	ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+	OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+
+
+	/* PKTID handle INIT */
+	prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS);
+	if (prot->pktid_map_handle == NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	prot->dmaxfer.srcmem.va = NULL;
+	prot->dmaxfer.destmem.va = NULL;
+	prot->dmaxfer_in_progress = FALSE;
+
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+	return 0;
+
+fail:
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+	if (prot != NULL)
+		dhd_prot_detach(dhd);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	return BCME_NOMEM;
+}
+
+/* Init memory block on host DMA'ing indices */
+int
+dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length)
+{
+	uint alloced = 0;
+
+	dhd_prot_t *prot = dhd->prot;
+	uint32 dma_block_size = 4 * length;
+
+	if (prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	switch (type) {
+		case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+			/* ring update dma buffer for submission write */
+			prot->h2d_dma_writeindx_buf_len = dma_block_size;
+			prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->h2d_dma_writeindx_buf.pa,
+				&prot->h2d_dma_writeindx_buf.dmah);
+
+			if (prot->h2d_dma_writeindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4));
+			bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size);
+			DHD_ERROR(("H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
+				"inited for dma'ing h2d-w indices\n",
+				prot->h2d_dma_writeindx_buf_len));
+			break;
+
+		case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+			/* ring update dma buffer for submission read */
+			prot->h2d_dma_readindx_buf_len = dma_block_size;
+			prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->h2d_dma_readindx_buf.pa,
+				&prot->h2d_dma_readindx_buf.dmah);
+			if (prot->h2d_dma_readindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4));
+			bzero(prot->h2d_dma_readindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size);
+			DHD_ERROR(("H2D_READINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing h2d-r indices\n",
+				prot->h2d_dma_readindx_buf_len));
+			break;
+
+		case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+			/* ring update dma buffer for completion write */
+			prot->d2h_dma_writeindx_buf_len = dma_block_size;
+			prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->d2h_dma_writeindx_buf.pa,
+				&prot->d2h_dma_writeindx_buf.dmah);
+
+			if (prot->d2h_dma_writeindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4));
+			bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size);
+			DHD_ERROR(("D2H_WRITEINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing d2h-w indices\n",
+				prot->d2h_dma_writeindx_buf_len));
+			break;
+
+		case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+			/* ring update dma buffer for completion read */
+			prot->d2h_dma_readindx_buf_len = dma_block_size;
+			prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->d2h_dma_readindx_buf.pa,
+				&prot->d2h_dma_readindx_buf.dmah);
+
+			if (prot->d2h_dma_readindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4));
+			bzero(prot->d2h_dma_readindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size);
+			DHD_ERROR(("D2H_READINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing d2h-r indices\n",
+				prot->d2h_dma_readindx_buf_len));
+			break;
+
+		default:
+			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+			return BCME_BADOPTION;
+	}
+
+	return BCME_OK;
+
+}
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+void dhd_prot_detach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	/* Stop the protocol module */
+	if (dhd->prot) {
+
+		/* free up scratch buffer */
+		if (prot->d2h_dma_scratch_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va,
+			DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa,
+			prot->d2h_dma_scratch_buf.dmah);
+			prot->d2h_dma_scratch_buf.va = NULL;
+		}
+		/* free up ring upd buffer for submission writes */
+		if (prot->h2d_dma_writeindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va,
+			  prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa,
+			  prot->h2d_dma_writeindx_buf.dmah);
+			prot->h2d_dma_writeindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for submission reads */
+		if (prot->h2d_dma_readindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va,
+			  prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa,
+			  prot->h2d_dma_readindx_buf.dmah);
+			prot->h2d_dma_readindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for completion writes */
+		if (prot->d2h_dma_writeindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va,
+			  prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa,
+			  prot->d2h_dma_writeindx_buf.dmah);
+			prot->d2h_dma_writeindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for completion writes */
+		if (prot->d2h_dma_readindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va,
+			  prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa,
+			  prot->d2h_dma_readindx_buf.dmah);
+			prot->d2h_dma_readindx_buf.va = NULL;
+		}
+
+		/* ioctl return buffer */
+		if (prot->retbuf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va,
+			IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah);
+			dhd->prot->retbuf.va = NULL;
+		}
+
+		/* ioctl request buffer */
+		if (prot->ioctbuf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va,
+			IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah);
+
+			dhd->prot->ioctbuf.va = NULL;
+		}
+
+
+		/* 1.0	 H2D	TXPOST ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn);
+		/* 2.0	 H2D	RXPOST ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn);
+		/* 3.0	 H2D	CTRL_SUBMISSION ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn);
+		/* 4.0	 D2H	TX_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln);
+		/* 5.0	 D2H	RX_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln);
+		/* 6.0	 D2H	CTRL_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln);
+
+		NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle);
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+		dhd->prot = NULL;
+	}
+}
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->rx_dataoffset = rx_offset;
+}
+
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Post event buffer after shim layer is attached */
+	ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+	dhd_process_cid_mac(dhd, TRUE);
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	if (!ret)
+		dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+done:
+	return ret;
+}
+
+/* This function does all necessary initialization needed
+* for IOCTL/IOVAR path
+*/
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Max pkts in ring */
+	prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
+
+	DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+
+	/* Read max rx packets supported by dongle */
+	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+	if (prot->max_rxbufpost == 0) {
+		/* This would happen if the dongle firmware is not */
+		/* using the latest shared structure template */
+		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+	}
+	DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+	prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
+	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+
+	prot->ioctl_trans_id = 1;
+
+	/* Register the interrupt function upfront */
+	/* remove corerev checks in data path */
+	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+
+	/* Initialise rings */
+	/* 1.0	 H2D	TXPOST ring */
+	if (dhd_bus_is_txmode_push(dhd->bus)) {
+		dhd_ring_init(dhd, prot->h2dring_txp_subn);
+	}
+
+	/* 2.0	 H2D	RXPOST ring */
+	dhd_ring_init(dhd, prot->h2dring_rxp_subn);
+	/* 3.0	 H2D	CTRL_SUBMISSION ring */
+	dhd_ring_init(dhd, prot->h2dring_ctrl_subn);
+	/* 4.0	 D2H	TX_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_tx_cpln);
+	/* 5.0	 D2H	RX_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_rx_cpln);
+	/* 6.0	 D2H	CTRL_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_ctrl_cpln);
+
+	/* init the scratch buffer */
+	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa,
+		sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0);
+	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len,
+		sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0);
+
+	/* If supported by the host, indicate the memory block
+	 * for comletion writes / submission reads to shared space
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa,
+			sizeof(prot->d2h_dma_writeindx_buf.pa),
+			DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0);
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa,
+			sizeof(prot->h2d_dma_readindx_buf.pa),
+			HOST_TO_DNGL_DMA_READINDX_BUFFER, 0);
+	}
+
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa,
+			sizeof(prot->h2d_dma_writeindx_buf.pa),
+			HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0);
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa,
+			sizeof(prot->d2h_dma_readindx_buf.pa),
+			DNGL_TO_HOST_DMA_READINDX_BUFFER, 0);
+
+	}
+
+	ret = dhd_msgbuf_rxbuf_post(dhd);
+	ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+	return ret;
+}
+
+#define DHD_DBG_SHOW_METADATA	0
+#if DHD_DBG_SHOW_METADATA
+static void BCMFASTPATH
+dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
+{
+	uint8 tlv_t;
+	uint8 tlv_l;
+	uint8 *tlv_v = (uint8 *)ptr;
+
+	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
+		return;
+
+	len -= BCMPCIE_D2H_METADATA_HDRLEN;
+	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
+
+	while (len > TLV_HDR_LEN) {
+		tlv_t = tlv_v[TLV_TAG_OFF];
+		tlv_l = tlv_v[TLV_LEN_OFF];
+
+		len -= TLV_HDR_LEN;
+		tlv_v += TLV_HDR_LEN;
+		if (len < tlv_l)
+			break;
+		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
+			break;
+
+		switch (tlv_t) {
+		case WLFC_CTL_TYPE_TXSTATUS:
+			bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_RSSI:
+			bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
+			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
+			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_RX_STAMP:
+			bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_TRANS_ID:
+			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_COMP_TXSTATUS:
+			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
+			break;
+
+		default:
+			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
+			break;
+		}
+
+		len -= tlv_l;
+		tlv_v += tlv_l;
+	}
+}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+static INLINE void BCMFASTPATH
+dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid)
+{
+	void *PKTBUF;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+
+	if (PKTBUF) {
+		DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+		PKTFREE(dhd->osh, PKTBUF, FALSE);
+	}
+	return;
+}
+
+static INLINE void * BCMFASTPATH
+dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid)
+{
+	void *PKTBUF;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+	if (PKTBUF) {
+		DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0);
+	}
+
+	return PKTBUF;
+}
+
+static int BCMFASTPATH
+dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int16 fillbufs;
+	uint16 cnt = 64;
+	int retcount = 0;
+
+	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+	while (fillbufs > 0) {
+		cnt--;
+		if (cnt == 0) {
+			/* find a better way to reschedule rx buf post if space not available */
+			DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
+			DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
+			break;
+		}
+
+		/* Post in a burst of 8 buffers ata time */
+		fillbufs = MIN(fillbufs, RX_BUF_BURST);
+
+		/* Post buffers */
+		retcount = dhd_prot_rxbufpost(dhd, fillbufs);
+
+		if (retcount > 0) {
+			prot->rxbufpost += (uint16)retcount;
+
+			/* how many more to post */
+			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+		} else {
+			/* Make sure we don't run loop any further */
+			fillbufs = 0;
+		}
+	}
+
+	return 0;
+}
+
+/* Post count no of rx buffers down to dongle */
+static int BCMFASTPATH
+dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count)
+{
+	void *p;
+	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	uint8 *rxbuf_post_tmp;
+	host_rxbuf_post_t *rxbuf_post;
+	void* msg_start;
+	dmaaddr_t physaddr;
+	uint32 pktlen;
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t * ring = prot->h2dring_rxp_subn;
+	uint8 i = 0;
+	uint16 alloced = 0;
+	unsigned long flags;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Claim space for 'count' no of messages */
+	msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (msg_start == NULL) {
+		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
+	ASSERT(alloced > 0);
+
+	rxbuf_post_tmp = (uint8*)msg_start;
+
+	/* loop through each message */
+	for (i = 0; i < alloced; i++) {
+		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+		/* Create a rx buffer */
+		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+			return -1;
+		}
+
+		pktlen = PKTLEN(dhd->osh, p);
+		physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+		if (PHYSADDRISZERO(physaddr)) {
+			if (RING_WRITE_PTR(ring) < alloced - i)
+				RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+			else
+				RING_WRITE_PTR(ring) -= alloced - i;
+			alloced = i;
+			DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+			PKTFREE(dhd->osh, p, FALSE);
+			DHD_ERROR(("Invalid phyaddr 0\n"));
+			ASSERT(0);
+			break;
+		}
+
+		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
+		pktlen = PKTLEN(dhd->osh, p);
+
+		/* CMN msg header */
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+		rxbuf_post->cmn_hdr.if_id = 0;
+
+		/* get the lock before calling NATIVE_TO_PKTID */
+		DHD_GENERAL_LOCK(dhd, flags);
+
+		rxbuf_post->cmn_hdr.request_id =
+			htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
+			pktlen, DMA_RX));
+
+		/* free lock */
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+			if (RING_WRITE_PTR(ring) < alloced - i)
+				RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+			else
+				RING_WRITE_PTR(ring) -= alloced - i;
+			alloced = i;
+			DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+			PKTFREE(dhd->osh, p, FALSE);
+			DHD_ERROR(("Pktid pool depleted.\n"));
+			break;
+		}
+
+		rxbuf_post->data_buf_len = htol16((uint16)pktlen);
+		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+		rxbuf_post->data_buf_addr.low_addr =
+			htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset);
+
+		if (prot->rx_metadata_offset) {
+			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
+			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+		} else {
+			rxbuf_post->metadata_buf_len = 0;
+			rxbuf_post->metadata_buf_addr.high_addr = 0;
+			rxbuf_post->metadata_buf_addr.low_addr  = 0;
+		}
+
+		/* Move rxbuf_post_tmp to next item */
+		rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring);
+	}
+	/* Update the write pointer in TCM & ring bell */
+	if (alloced > 0)
+		prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced);
+
+	return alloced;
+}
+
+static int
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
+{
+	void *p;
+	uint16 pktsz;
+	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
+	dmaaddr_t physaddr;
+	uint32 pktlen;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+	unsigned long flags;
+
+	if (event_buf) {
+		/* Allocate packet for event buffer post */
+		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	} else {
+		/* Allocate packet for ctrl/ioctl buffer post */
+		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
+	}
+
+	if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+		DHD_ERROR(("%s:%d: PKTGET for ctrl rxbuf failed\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+
+	pktlen = PKTLEN(dhd->osh, p);
+	physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+	if (PHYSADDRISZERO(physaddr)) {
+
+		DHD_ERROR(("Invalid phyaddr 0\n"));
+		ASSERT(0);
+		goto free_pkt_return;
+	}
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (rxbuf_post == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+			__FUNCTION__, __LINE__));
+		DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+		goto free_pkt_return;
+	}
+
+	/* CMN msg header */
+	if (event_buf)
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
+	else
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
+	rxbuf_post->cmn_hdr.if_id = 0;
+
+	rxbuf_post->cmn_hdr.request_id =
+		htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX));
+
+	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+		if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0)
+			RING_WRITE_PTR(prot->h2dring_ctrl_subn) =
+				RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1;
+		else
+			RING_WRITE_PTR(prot->h2dring_ctrl_subn)--;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+		goto free_pkt_return;
+	}
+
+	rxbuf_post->cmn_hdr.flags = 0;
+	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
+	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 1;
+
+free_pkt_return:
+	PKTFREE(dhd->osh, p, FALSE);
+
+	return -1;
+}
+
+static uint16
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
+{
+	uint32 i = 0;
+	int32 ret_val;
+
+	DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
+	while (i < max_to_post) {
+		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
+		if (ret_val < 0)
+			break;
+		i++;
+	}
+	DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
+	return (uint16)i;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint16 retcnt = 0;
+
+	DHD_INFO(("ioctl resp buf post\n"));
+	retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE,
+		prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted);
+	prot->cur_ioctlresp_bufs_posted += retcnt;
+	return 0;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
+		prot->max_eventbufpost - prot->cur_event_bufs_posted);
+	return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+
+		/* Store current read pointer */
+		/* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */
+		prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln);
+
+		/* Get the message from ring */
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len);
+		if (src_addr == NULL)
+			break;
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr,
+			src_len) != BCME_OK) {
+			prot_upd_read_idx(dhd, prot->d2hring_rx_cpln);
+			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+	}
+
+	return 0;
+}
+
+void
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info)
+{
+	uint16 r_index = 0;
+	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info;
+
+	/* Update read pointer */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+		ring->ringstate->r_offset = r_index;
+	}
+
+	DHD_TRACE(("flow %d, write %d read %d \n\n", flow_id, RING_WRITE_PTR(ring),
+		RING_READ_PTR(ring)));
+
+	/* Need more logic here, but for now use it directly */
+	dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE);
+}
+
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len);
+		if (src_addr == NULL)
+			break;
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr,
+			src_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process txcmpl msgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+
+		/* Write to dngl rd ptr */
+		prot_upd_read_idx(dhd, prot->d2hring_tx_cpln);
+	}
+
+	return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len);
+
+		if (src_addr == NULL) {
+			break;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr,
+			src_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process ctrlmsgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+
+		/* Write to dngl rd ptr */
+		prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln);
+	}
+
+	return 0;
+}
+
+static int BCMFASTPATH
+dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint32 cur_dma_len = 0;
+	int ret = BCME_OK;
+
+	DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len));
+
+	while (len > 0) {
+		ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset));
+		if (prot->rx_dataoffset) {
+			cur_dma_len = *(uint32 *) buf;
+			ASSERT(cur_dma_len <= len);
+			buf += prot->rx_dataoffset;
+			len -= (uint16)prot->rx_dataoffset;
+		}
+		else {
+			cur_dma_len = len;
+		}
+		if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process msg of dmalen %d\n",
+				__FUNCTION__, cur_dma_len));
+			ret = BCME_ERROR;
+		}
+
+		len -= (uint16)cur_dma_len;
+		buf += cur_dma_len;
+	}
+	return ret;
+}
+
+#define PCIE_M2M_D2H_DMA_WAIT_TRIES     256
+#define PCIE_D2H_RESET_MARK             0xdeadbeef
+void dhd_msgbuf_d2h_check_cmplt(msgbuf_ring_t *ring, void *msg)
+{
+	uint32 tries;
+	uint32 *marker = (uint32 *)msg + RING_LEN_ITEMS(ring) / sizeof(uint32) - 1;
+
+	for (tries = 0; tries < PCIE_M2M_D2H_DMA_WAIT_TRIES; tries++) {
+		if (*(volatile uint32 *)marker != PCIE_D2H_RESET_MARK)
+			return;
+		OSL_CACHE_INV(msg, RING_LEN_ITEMS(ring));
+	}
+
+	/* only print error for data ring */
+	if (ring->idx == BCMPCIE_D2H_MSGRING_TX_COMPLETE ||
+		ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+		DHD_ERROR(("%s: stale msgbuf content after %d retries\n",
+			__FUNCTION__, tries));
+}
+
+static int BCMFASTPATH
+dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+	uint16 pktlen = len;
+	uint16 msglen;
+	uint8 msgtype;
+	cmn_msg_hdr_t *msg = NULL;
+	int ret = BCME_OK;
+	uint8 *buf_head = buf;
+
+	ASSERT(ring && ring->ringmem);
+	msglen = RING_LEN_ITEMS(ring);
+	if (msglen == 0) {
+		DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n",
+			__FUNCTION__, ring->idx, msglen, pktlen));
+		return BCME_ERROR;
+	}
+
+	while (pktlen > 0) {
+		msg = (cmn_msg_hdr_t *)buf;
+
+		dhd_msgbuf_d2h_check_cmplt(ring, msg);
+
+		msgtype = msg->msg_type;
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(buf + msglen);
+
+		DHD_INFO(("msgtype %d, msglen is %d, pktlen is %d \n",
+			msgtype, msglen, pktlen));
+		if (msgtype == MSG_TYPE_LOOPBACK) {
+			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen);
+			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", msglen));
+		}
+
+		ASSERT(msgtype < DHD_PROT_FUNCS);
+		if (table_lookup[msgtype]) {
+			table_lookup[msgtype](dhd, buf, msglen);
+		}
+
+		if (pktlen < msglen) {
+			ret = BCME_ERROR;
+			goto done;
+		}
+		pktlen = pktlen - msglen;
+		buf = buf + msglen;
+
+		if (msgtype == MSG_TYPE_RX_CMPLT)
+			prot_early_upd_rxcpln_read_idx(dhd,
+				dhd->prot->d2hring_rx_cpln);
+	}
+done:
+	OSL_CACHE_FLUSH(buf_head, len - pktlen);
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_commit(dhd);
+#endif
+
+	return ret;
+}
+
+static void
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf;
+	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
+		ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
+		ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+static void
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf;
+	DHD_ERROR(("gen status: request_id %d, status 0x%04x, flow ring %d \n",
+		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
+		gen_status->compl_hdr.flow_ring_id));
+
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+static void
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf;
+
+	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
+		ioct_ack->compl_hdr.flow_ring_id));
+	if (ioct_ack->compl_hdr.status != 0)  {
+		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+	}
+
+	memset(buf, 0, msglen);
+	ioct_ack->marker = PCIE_D2H_RESET_MARK;
+}
+static void
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	uint16 status;
+	uint32 resp_len = 0;
+	uint32 pkt_id, xt_id;
+	ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf;
+
+	resp_len = ltoh16(ioct_resp->resp_len);
+	xt_id = ltoh16(ioct_resp->trans_id);
+	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
+	status = ioct_resp->compl_hdr.status;
+
+	memset(buf, 0, msglen);
+	ioct_resp->marker = PCIE_D2H_RESET_MARK;
+
+	DHD_CTL(("IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n",
+		pkt_id, xt_id, status, resp_len));
+
+	dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len);
+	dhd_os_ioctl_resp_wake(dhd);
+}
+
+static void BCMFASTPATH
+dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_cmpl_t * txstatus;
+	unsigned long flags;
+	uint32 pktid;
+	void *pkt;
+
+	/* locks required to protect circular buffer accesses */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	txstatus = (host_txbuf_cmpl_t *)buf;
+	pktid = ltoh32(txstatus->cmn_hdr.request_id);
+
+	DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+	if (prot->active_tx_count)
+		prot->active_tx_count--;
+	else
+		DHD_ERROR(("Extra packets are freed\n"));
+
+	ASSERT(pktid != 0);
+	pkt = dhd_prot_packet_get(dhd, pktid);
+	if (pkt) {
+#if defined(BCMPCIE)
+		dhd_txcomplete(dhd, pkt, true);
+#endif 
+
+#if DHD_DBG_SHOW_METADATA
+		if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+			uchar *ptr;
+			/* The Ethernet header of TX frame was copied and removed.
+			 * Here, move the data pointer forward by Ethernet header size.
+			 */
+			PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+			ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
+			bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+			dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+		}
+#endif /* DHD_DBG_SHOW_METADATA */
+		PKTFREE(dhd->osh, pkt, TRUE);
+	}
+
+	memset(buf, 0, msglen);
+	txstatus->marker = PCIE_D2H_RESET_MARK;
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return;
+}
+
+static void
+dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len)
+{
+	wlevent_req_msg_t *evnt;
+	uint32 bufid;
+	uint16 buflen;
+	int ifidx = 0;
+	void* pkt;
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Event complete header */
+	evnt = (wlevent_req_msg_t *)buf;
+	bufid = ltoh32(evnt->cmn_hdr.request_id);
+	buflen = ltoh16(evnt->event_data_len);
+
+	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
+
+	/* Post another rxbuf to the device */
+	if (prot->cur_event_bufs_posted)
+		prot->cur_event_bufs_posted--;
+	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+	memset(buf, 0, len);
+	evnt->marker = PCIE_D2H_RESET_MARK;
+
+	/* locks required to protect pktid_map */
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, ltoh32(bufid));
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt)
+		return;
+
+	/* DMA RX offset updated through shared area */
+	if (dhd->prot->rx_dataoffset)
+		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+	PKTSETLEN(dhd->osh, pkt, buflen);
+
+	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+}
+
+static void BCMFASTPATH
+dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	host_rxbuf_cmpl_t *rxcmplt_h;
+	uint16 data_offset;             /* offset at which data starts */
+	void * pkt;
+	unsigned long flags;
+	static uint8 current_phase = 0;
+	uint ifidx;
+
+	/* RXCMPLT HDR */
+	rxcmplt_h = (host_rxbuf_cmpl_t *)buf;
+
+	/* Post another set of rxbufs to the device */
+	dhd_prot_return_rxbuf(dhd, 1);
+
+	/* offset from which data starts is populated in rxstatus0 */
+	data_offset = ltoh16(rxcmplt_h->data_offset);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id));
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt) {
+		return;
+	}
+
+	DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
+		ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
+		rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+		ltoh16(rxcmplt_h->metadata_len)));
+
+#if DHD_DBG_SHOW_METADATA
+	if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
+		uchar *ptr;
+		ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
+		/* header followed by data */
+		bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
+		dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
+	}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+	if (current_phase !=  rxcmplt_h->cmn_hdr.flags) {
+		current_phase = rxcmplt_h->cmn_hdr.flags;
+	}
+	if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)
+		DHD_INFO(("D11 frame rxed \n"));
+	/* data_offset from buf start */
+	if (data_offset) {
+		/* data offset given from dongle after split rx */
+		PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
+	} else {
+		/* DMA RX offset updated through shared area */
+		if (dhd->prot->rx_dataoffset)
+			PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+	}
+	/* Actual length of the packet */
+	PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
+
+	ifidx = rxcmplt_h->cmn_hdr.if_id;
+	memset(buf, 0, msglen);
+	rxcmplt_h->marker = PCIE_D2H_RESET_MARK;
+
+#ifdef DHD_RX_CHAINING
+	/* Chain the packets */
+	dhd_rxchain_frame(dhd, pkt, ifidx);
+#else /* ! DHD_RX_CHAINING */
+	/* offset from which data starts is populated in rxstatus0 */
+	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+#endif /* ! DHD_RX_CHAINING */
+
+}
+
+/* Stop protocol: sync w/dongle state. */
+void dhd_prot_stop(dhd_pub_t *dhd)
+{
+	/* nothing to do for pcie */
+}
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+void BCMFASTPATH
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+	return;
+}
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	return 0;
+}
+
+
+#define PKTBUF pktbuf
+
+int BCMFASTPATH
+dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_post_t *txdesc = NULL;
+	dmaaddr_t physaddr, meta_physaddr;
+	uint8 *pktdata;
+	uint16 pktlen;
+	uint32 pktid;
+	uint8	prio;
+	uint16 flowid = 0;
+	uint16 alloced = 0;
+	uint16	headroom;
+
+	msgbuf_ring_t *msg_ring;
+
+	if (!dhd_bus_is_txmode_push(dhd->bus)) {
+		flow_ring_table_t *flow_ring_table;
+		flow_ring_node_t *flow_ring_node;
+
+		flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF));
+
+		flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+		flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+
+		msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+	} else {
+		msg_ring = prot->h2dring_txp_subn;
+	}
+
+
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Create a unique 32-bit packet id */
+	pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF);
+	if (pktid == DHD_PKTID_INVALID) {
+		DHD_ERROR(("Pktid pool depleted.\n"));
+		/*
+		 * If we return error here, the caller would queue the packet
+		 * again. So we'll just free the skb allocated in DMA Zone.
+		 * Since we have not freed the original SKB yet the caller would
+		 * requeue the same.
+		 */
+		goto err_no_res_pktfree;
+	}
+
+	/* Reserve space in the circular buffer */
+	txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd,
+		msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (txdesc == NULL) {
+		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
+			__FUNCTION__, __LINE__, prot->active_tx_count));
+		/* Free up the PKTID */
+		PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr,
+			pktlen);
+		goto err_no_res_pktfree;
+	}
+
+	/* Extract the data pointer and length information */
+	pktdata = PKTDATA(dhd->osh, PKTBUF);
+	pktlen  = (uint16)PKTLEN(dhd->osh, PKTBUF);
+
+	/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
+	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
+
+	/* Extract the ethernet header and adjust the data pointer and length */
+	pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+	pktlen -= ETHER_HDR_LEN;
+
+	/* Map the data pointer to a DMA-able address */
+	physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+	if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) {
+		DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+		ASSERT(0);
+	}
+
+	/* No need to lock. Save the rest of the packet's metadata */
+	NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid,
+	                     physaddr, pktlen, DMA_TX);
+
+#ifdef TXP_FLUSH_NITEMS
+	if (msg_ring->pend_items_count == 0)
+		msg_ring->start_addr = (void *)txdesc;
+	msg_ring->pend_items_count++;
+#endif
+
+	/* Form the Tx descriptor message buffer */
+
+	/* Common message hdr */
+	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
+	txdesc->cmn_hdr.request_id = htol32(pktid);
+	txdesc->cmn_hdr.if_id = ifidx;
+	txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
+	prio = (uint8)PKTPRIO(PKTBUF);
+
+
+	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
+	txdesc->seg_cnt = 1;
+
+	txdesc->data_len = htol16(pktlen);
+	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+
+	/* Move data pointer to keep ether header in local PKTBUF for later reference */
+	PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+
+	/* Handle Tx metadata */
+	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
+	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
+		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+		prot->tx_metadata_offset, headroom));
+
+	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
+		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+
+		/* Adjust the data pointer to account for meta data in DMA_MAP */
+		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+		meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+			prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+		if (PHYSADDRISZERO(meta_physaddr)) {
+			DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+			ASSERT(0);
+		}
+
+		/* Adjust the data pointer back to original value */
+		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+		txdesc->metadata_buf_len = prot->tx_metadata_offset;
+		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr));
+		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr));
+	}
+	else {
+		txdesc->metadata_buf_len = htol16(0);
+		txdesc->metadata_buf_addr.high_addr = 0;
+		txdesc->metadata_buf_addr.low_addr = 0;
+	}
+
+
+	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+		txdesc->cmn_hdr.request_id));
+
+	/* Update the write pointer in TCM & ring bell */
+#ifdef TXP_FLUSH_NITEMS
+	/* Flush if we have either hit the txp_threshold or if this msg is */
+	/* occupying the last slot in the flow_ring - before wrap around.  */
+	if ((msg_ring->pend_items_count == prot->txp_threshold) ||
+		((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) {
+		dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
+	}
+#else
+	prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+#endif
+
+	prot->active_tx_count++;
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+
+err_no_res_pktfree:
+
+
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+	return BCME_NORESOURCE;
+
+}
+
+/* called with a lock */
+void BCMFASTPATH
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
+{
+#ifdef TXP_FLUSH_NITEMS
+	unsigned long flags = 0;
+	flow_ring_table_t *flow_ring_table;
+	flow_ring_node_t *flow_ring_node;
+	msgbuf_ring_t *msg_ring;
+
+
+	if (!in_lock) {
+		DHD_GENERAL_LOCK(dhd, flags);
+	}
+
+	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+	msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+	/* Update the write pointer in TCM & ring bell */
+	if (msg_ring->pend_items_count) {
+		prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr,
+			msg_ring->pend_items_count);
+		msg_ring->pend_items_count = 0;
+		msg_ring->start_addr = NULL;
+	}
+
+	if (!in_lock) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+#endif /* TXP_FLUSH_NITEMS */
+}
+
+#undef PKTBUF	/* Only defined in the above routine */
+int BCMFASTPATH
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
+{
+	return 0;
+}
+
+static void BCMFASTPATH
+dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	if (prot->rxbufpost >= rxcnt) {
+		prot->rxbufpost -= rxcnt;
+	} else {
+		/* ASSERT(0); */
+		prot->rxbufpost = 0;
+	}
+
+	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
+		dhd_msgbuf_rxbuf_post(dhd);
+
+	return;
+}
+
+
+
+/* Use protocol to issue ioctl to dongle */
+int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+	if (dhd->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+
+
+	if (action & WL_IOCTL_ACTION_SET) {
+		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	} else {
+		ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret;
+	}
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
+		dhd->dongle_error = ret;
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+
+	prot->pending = FALSE;
+
+done:
+	return ret;
+
+}
+
+int
+dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+
+	ioct_reqst_hdr_t *ioct_rqst;
+
+	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
+	uint16 msglen = len + hdrlen;
+
+
+	if (msglen  > MSGBUF_MAX_MSG_SIZE)
+		msglen = MSGBUF_MAX_MSG_SIZE;
+
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (ioct_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return 0;
+	}
+
+	{
+		uint8 *ptr;
+		uint16 i;
+
+		ptr = (uint8 *)ioct_rqst;
+		for (i = 0; i < msglen; i++) {
+			ptr[i] = i % 256;
+		}
+	}
+
+
+	/* Common msg buf hdr */
+	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
+	ioct_rqst->msg.if_id = 0;
+
+	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma)
+{
+	if (dma == NULL)
+		return;
+
+	if (dma->srcmem.va) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+			dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+		dma->srcmem.va = NULL;
+	}
+	if (dma->destmem.va) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va,
+			dma->len + 8, dma->destmem.pa, dma->destmem.dmah);
+		dma->destmem.va = NULL;
+	}
+}
+
+int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
+	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma)
+{
+	uint i;
+
+	if (!dma)
+		return BCME_ERROR;
+
+	/* First free up exisiting buffers */
+	dmaxfer_free_dmaaddr(dhd, dma);
+
+	dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN,
+	&i, &dma->srcmem.pa, &dma->srcmem.dmah);
+	if (dma->srcmem.va ==  NULL) {
+		return BCME_NOMEM;
+	}
+
+	/* Populate source with a pattern */
+	for (i = 0; i < len; i++) {
+		((uint8*)dma->srcmem.va)[i] = i % 256;
+	}
+	OSL_CACHE_FLUSH(dma->srcmem.va, len);
+
+	dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN,
+	&i, &dma->destmem.pa, &dma->destmem.dmah);
+	if (dma->destmem.va ==  NULL) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+			dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+		dma->srcmem.va = NULL;
+		return BCME_NOMEM;
+	}
+
+
+	/* Clear the destination buffer */
+	bzero(dma->destmem.va, len +8);
+	OSL_CACHE_FLUSH(dma->destmem.va, len+8);
+
+	dma->len = len;
+	dma->srcdelay = srcdelay;
+	dma->destdelay = destdelay;
+
+	return BCME_OK;
+}
+
+static void
+dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) {
+		if (memcmp(prot->dmaxfer.srcmem.va,
+			prot->dmaxfer.destmem.va,
+			prot->dmaxfer.len)) {
+			bcm_print_bytes("XFER SRC: ",
+				prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+			bcm_print_bytes("XFER DEST: ",
+				prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+		}
+		else {
+			DHD_INFO(("DMA successful\n"));
+		}
+	}
+	dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+	dhd->prot->dmaxfer_in_progress = FALSE;
+}
+
+int
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
+{
+	unsigned long flags;
+	int ret = BCME_OK;
+	dhd_prot_t *prot = dhd->prot;
+	pcie_dma_xfer_params_t *dmap;
+	uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len;
+	uint16 msglen = sizeof(pcie_dma_xfer_params_t);
+	uint16 alloced = 0;
+
+	if (prot->dmaxfer_in_progress) {
+		DHD_ERROR(("DMA is in progress...\n"));
+		return ret;
+	}
+	prot->dmaxfer_in_progress = TRUE;
+	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
+		&prot->dmaxfer)) != BCME_OK) {
+		prot->dmaxfer_in_progress = FALSE;
+		return ret;
+	}
+
+
+	if (msglen  > MSGBUF_MAX_MSG_SIZE)
+		msglen = MSGBUF_MAX_MSG_SIZE;
+
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (dmap == NULL) {
+		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+		prot->dmaxfer_in_progress = FALSE;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
+	dmap->cmn_hdr.request_id = 0x1234;
+
+	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
+	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
+	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa));
+	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa));
+	dmap->xfer_len = htol32(prot->dmaxfer.len);
+	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
+	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	DHD_ERROR(("DMA Started...\n"));
+
+	return BCME_OK;
+}
+
+static int
+dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	int ret = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+	DHD_INFO(("ACTION %d ifdix %d cmd %d len %d \n",
+		action, ifidx, cmd, len));
+
+	/* wait for interrupt and get first fragment */
+	ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+done:
+	return ret;
+}
+static int
+dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf)
+{
+	dhd_prot_t *prot = dhd->prot;
+	ioctl_comp_resp_msg_t  ioct_resp;
+	void* pkt;
+	int retlen;
+	int msgbuf_len = 0;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (prot->cur_ioctlresp_bufs_posted)
+		prot->cur_ioctlresp_bufs_posted--;
+
+	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+	retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len);
+	if (retlen <= 0) {
+		DHD_ERROR(("IOCTL request failed with error code %d\n", retlen));
+		return retlen;
+	}
+	DHD_INFO(("ioctl resp retlen %d status %d, resp_len %d, pktid %d\n",
+		retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len,
+		ioct_resp.cmn_hdr.request_id));
+	if (ioct_resp.resp_len != 0) {
+		DHD_GENERAL_LOCK(dhd, flags);
+		pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		DHD_INFO(("ioctl ret buf %p retlen %d status %x \n", pkt, retlen,
+			ioct_resp.compl_hdr.status));
+		/* get ret buf */
+		if ((buf) && (pkt)) {
+			/* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */
+			/* ioct_resp.resp_len could have been changed to make it > 8 bytes */
+			bcopy(PKTDATA(dhd->osh, pkt), buf, len);
+		}
+		if (pkt) {
+			PKTFREE(dhd->osh, pkt, FALSE);
+		}
+	} else {
+		DHD_GENERAL_LOCK(dhd, flags);
+		dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+
+	return (int)(ioct_resp.compl_hdr.status);
+}
+static int
+dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	int ret = 0;
+
+	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+	DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	/* Fill up msgbuf for ioctl req */
+	ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+	DHD_INFO(("ACTIOn %d ifdix %d cmd %d len %d \n",
+		action, ifidx, cmd, len));
+
+	ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+	return ret;
+}
+/* Handles a protocol control response asynchronously */
+int dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+	return 0;
+}
+
+/* Check for and handle local prot-specific iovar commands */
+int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
+                             void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+/* Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+
+}
+
+/* Update local copy of dongle statistics */
+void dhd_prot_dstats(dhd_pub_t *dhd)
+{
+		return;
+}
+
+int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
+{
+	return 0;
+}
+/* post a dummy message to interrupt dongle */
+/* used to process cons commands */
+int
+dhd_post_dummy_msg(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	hostevent_hdr_t *hevent = NULL;
+	uint16 alloced = 0;
+
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (hevent == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return -1;
+	}
+
+	/* CMN msg header */
+	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
+	hevent->msg.if_id = 0;
+
+	/* Event payload */
+	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
+
+	/* Since, we are filling the data directly into the bufptr obtained
+	 * from the msgbuf, we can directly call the write_complete
+	 */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+static void * BCMFASTPATH
+dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+	void * ret_buf;
+	uint16 r_index = 0;
+
+	/* Alloc space for nitems in the ring */
+	ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+	if (ret_buf == NULL) {
+		/* if alloc failed , invalidate cached read ptr */
+		if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+			r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+			ring->ringstate->r_offset = r_index;
+		} else
+			dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)),
+				RING_READ_PTR, ring->idx);
+
+		/* Try allocating once more */
+		ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+		if (ret_buf == NULL) {
+			DHD_INFO(("%s: Ring space not available  \n", ring->name));
+			return NULL;
+		}
+	}
+
+	/* Return alloced space */
+	return ret_buf;
+}
+
+#define DHD_IOCTL_REQ_PKTID	0xFFFE
+
+/* Non inline ioct request */
+/* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */
+/* Form a separate request buffer where a 4 byte cmn header is added in the front */
+/* buf contents from parent function is copied to remaining section of this buffer */
+static int
+dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	ioctl_req_msg_t *ioct_rqst;
+	void * ioct_buf;	/* For ioctl payload */
+	uint16  rqstlen, resplen;
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	rqstlen = len;
+	resplen = len;
+
+	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
+	/* 8K allocation of dongle buffer fails */
+	/* dhd doesnt give separate input & output buf lens */
+	/* so making the assumption that input length can never be more than 1.5k */
+	rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Request for cbuf space */
+	ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced);
+	if (ioct_rqst == NULL) {
+		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return -1;
+	}
+
+	/* Common msg buf hdr */
+	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
+	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
+	ioct_rqst->cmn_hdr.flags = 0;
+	ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID;
+
+	ioct_rqst->cmd = htol32(cmd);
+	ioct_rqst->output_buf_len = htol16(resplen);
+	ioct_rqst->trans_id = prot->ioctl_trans_id ++;
+
+	/* populate ioctl buffer info */
+	ioct_rqst->input_buf_len = htol16(rqstlen);
+	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
+	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
+	/* copy ioct payload */
+	ioct_buf = (void *) prot->ioctbuf.va;
+
+	if (buf)
+		memcpy(ioct_buf, buf, len);
+
+	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
+
+	if ((ulong)ioct_buf % DMA_ALIGN_LEN)
+		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+
+	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
+		ioct_rqst->trans_id));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+/* Packet to PacketID mapper */
+typedef struct {
+	ulong native;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	uchar dma;
+} pktid_t;
+
+typedef struct {
+	void	*osh;
+	void	*mwbmap_hdl;
+	pktid_t *pktid_list;
+	uint32	count;
+} pktid_map_t;
+
+
+void *pktid_map_init(void *osh, uint32 count)
+{
+	pktid_map_t *handle;
+
+	handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t));
+	if (handle == NULL) {
+		printf("%s:%d: MALLOC failed for size %d\n",
+			__FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t));
+		return NULL;
+	}
+	handle->osh = osh;
+	handle->count = count;
+	handle->mwbmap_hdl = bcm_mwbmap_init(osh, count);
+	if (handle->mwbmap_hdl == NULL) {
+		printf("%s:%d: bcm_mwbmap_init failed for count %d\n",
+			__FUNCTION__, __LINE__, count);
+		MFREE(osh, handle, sizeof(pktid_map_t));
+		return NULL;
+	}
+
+	handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1));
+	if (handle->pktid_list == NULL) {
+		printf("%s:%d: MALLOC failed for count %d / total = %d\n",
+			__FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count);
+		bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+		MFREE(osh, handle, sizeof(pktid_map_t));
+		return NULL;
+	}
+
+	return handle;
+}
+
+void
+pktid_map_uninit(void *pktid_map_handle)
+{
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+	uint32 ix;
+
+	if (handle != NULL) {
+		void *osh = handle->osh;
+		for (ix = 0; ix < MAX_PKTID_ITEMS; ix++)
+		{
+			if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) {
+				/* Mark the slot as free */
+				bcm_mwbmap_free(handle->mwbmap_hdl, ix);
+				/*
+				Here we can do dma unmapping for 32 bit also.
+				Since this in removal path, it will not affect performance
+				*/
+				DMA_UNMAP(osh, handle->pktid_list[ix+1].pa,
+					(uint) handle->pktid_list[ix+1].pa_len,
+					handle->pktid_list[ix+1].dma, 0, 0);
+				PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE);
+			}
+		}
+		bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+		MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1));
+		MFREE(osh, handle, sizeof(pktid_map_t));
+	}
+	return;
+}
+
+uint32 BCMFASTPATH
+pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma)
+{
+	uint32 id;
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+
+	if (handle == NULL) {
+		printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n",
+			__FUNCTION__, __LINE__);
+		return 0;
+	}
+	id = bcm_mwbmap_alloc(handle->mwbmap_hdl);
+	if (id == BCM_MWBMAP_INVALID_IDX) {
+		printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n",
+			__FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl));
+		return 0;
+	}
+
+	/* id=0 is invalid as we use this for error checking in the dongle */
+	id += 1;
+	handle->pktid_list[id].native = (ulong) pkt;
+	handle->pktid_list[id].pa     = physaddr;
+	handle->pktid_list[id].pa_len = (uint32) physlen;
+	handle->pktid_list[id].dma = (uchar)dma;
+
+	return id;
+}
+
+void * BCMFASTPATH
+pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen)
+{
+	void *native = NULL;
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+	if (handle == NULL) {
+		printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n",
+			__FUNCTION__, __LINE__);
+		return NULL;
+	}
+
+	/* Debug check */
+	if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) {
+		printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n",
+			__FUNCTION__, __LINE__, (id-1), (id-1));
+		return NULL;
+	}
+
+	native = (void *) handle->pktid_list[id].native;
+	*physaddr = handle->pktid_list[id].pa;
+	*physlen  = (uint32) handle->pktid_list[id].pa_len;
+
+	/* Mark the slot as free */
+	bcm_mwbmap_free(handle->mwbmap_hdl, (id-1));
+
+	return native;
+}
+static msgbuf_ring_t*
+prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid)
+{
+	uint alloced = 0;
+	msgbuf_ring_t *ring;
+	dmaaddr_t physaddr;
+	uint16 size, cnt;
+	uint32 *marker;
+
+	ASSERT(name);
+	BCM_REFERENCE(physaddr);
+
+	/* allocate ring info */
+	ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t));
+	if (ring == NULL) {
+		ASSERT(0);
+		return NULL;
+	}
+	bzero(ring, sizeof(*ring));
+
+	/* Init name */
+	strncpy(ring->name, name, sizeof(ring->name) - 1);
+
+	/* Ringid in the order given in bcmpcie.h */
+	ring->idx = ringid;
+
+	/* init ringmem */
+	ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t));
+	if (ring->ringmem == NULL)
+		goto fail;
+	bzero(ring->ringmem, sizeof(*ring->ringmem));
+
+	ring->ringmem->max_item = max_item;
+	ring->ringmem->len_items = len_item;
+	size = max_item * len_item;
+
+	/* Ring Memmory allocation */
+	ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN,
+		&alloced, &ring->ring_base.pa, &ring->ring_base.dmah);
+
+	if (ring->ring_base.va == NULL)
+		goto fail;
+	ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa));
+	ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa));
+
+	ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
+	bzero(ring->ring_base.va, size);
+	for (cnt = 0; cnt < max_item; cnt++) {
+		marker = (uint32 *)ring->ring_base.va +
+			(cnt + 1) * len_item / sizeof(uint32) - 1;
+		*marker = PCIE_D2H_RESET_MARK;
+	}
+	OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+	/* Ring state init */
+	ring->ringstate	= MALLOC(prot->osh, sizeof(ring_state_t));
+	if (ring->ringstate == NULL)
+		goto fail;
+	bzero(ring->ringstate, sizeof(*ring->ringstate));
+
+	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+		"ring start %p buf phys addr  %x:%x \n",
+		ring->name, ring->ringmem->max_item, ring->ringmem->len_items,
+		size, ring->ring_base.va, ring->ringmem->base_addr.high_addr,
+		ring->ringmem->base_addr.low_addr));
+	return ring;
+fail:
+	if (ring->ring_base.va)
+		PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr);
+		PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr);
+		size = ring->ringmem->max_item * ring->ringmem->len_items;
+		DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL);
+		ring->ring_base.va = NULL;
+	if (ring->ringmem)
+		MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+	MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+	ASSERT(0);
+	return NULL;
+}
+static void
+dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+	/* update buffer address of ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr,
+		sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx);
+
+	/* Update max items possible in ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item,
+		sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx);
+
+	/* Update length of each item in the ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items,
+		sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx);
+
+	/* ring inited */
+	ring->inited = TRUE;
+}
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dmaaddr_t phyaddr;
+	uint16 size;
+	dhd_prot_t *prot = dhd->prot;
+
+	BCM_REFERENCE(phyaddr);
+
+	if (ring == NULL)
+		return;
+
+
+	if (ring->ringmem == NULL) {
+		DHD_ERROR(("%s: ring->ringmem is NULL\n", __FUNCTION__));
+			return;
+	}
+
+	ring->inited = FALSE;
+
+	PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr);
+	PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr);
+	size = ring->ringmem->max_item * ring->ringmem->len_items;
+	/* Free up ring */
+	if (ring->ring_base.va) {
+		DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa,
+			ring->ring_base.dmah);
+		ring->ring_base.va = NULL;
+	}
+
+	/* Free up ring mem space */
+	if (ring->ringmem) {
+		MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+		ring->ringmem = NULL;
+	}
+
+	/* Free up ring state info */
+	if (ring->ringstate) {
+		MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t));
+		ring->ringstate = NULL;
+	}
+
+	/* free up ring info */
+	MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+}
+/* Assumes only one index is updated ata time */
+static void *BCMFASTPATH
+prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+	void *ret_ptr = NULL;
+	uint16 ring_avail_cnt;
+
+	ASSERT(nitems <= RING_MAX_ITEM(ring));
+
+	ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring),
+		RING_MAX_ITEM(ring));
+
+	if (ring_avail_cnt == 0) {
+		DHD_INFO(("RING space not available on ring %s for %d items \n",
+			ring->name, nitems));
+		DHD_INFO(("write %d read %d \n\n", RING_WRITE_PTR(ring),
+			RING_READ_PTR(ring)));
+		return NULL;
+	}
+	*alloced = MIN(nitems, ring_avail_cnt);
+
+	/* Return next available space */
+	ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring));
+
+	/* Update write pointer */
+	if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring))
+		RING_WRITE_PTR(ring) = 0;
+	else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring))
+		RING_WRITE_PTR(ring) += *alloced;
+	else {
+		/* Should never hit this */
+		ASSERT(0);
+		return NULL;
+	}
+
+	return ret_ptr;
+}
+
+static void BCMFASTPATH
+prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* cache flush */
+	OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems);
+
+	/* update write pointer */
+	/* If dma'ing h2d indices are supported
+	 * update the values in the host memory
+	 * o/w update the values in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+			ring->idx, (uint16)RING_WRITE_PTR(ring));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)),
+			sizeof(uint16), RING_WRITE_PTR, ring->idx);
+
+	/* raise h2d interrupt */
+	prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring));
+}
+
+/* If dma'ing h2d indices are supported
+ * this function updates the indices in
+ * the host memory
+ */
+static void
+dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	uint32 *ptr = NULL;
+	uint16 offset = 0;
+
+	switch (type) {
+		case H2D_DMA_WRITEINDX:
+			ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			*ptr = htol16(new_index);
+
+			/* cache flush */
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+				prot->h2d_dma_writeindx_buf_len);
+
+			break;
+
+		case D2H_DMA_READINDX:
+			ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			*ptr = htol16(new_index);
+			/* cache flush */
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+				prot->d2h_dma_readindx_buf_len);
+
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+
+			break;
+	}
+	DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n",
+		__FUNCTION__, ptr, ringid, new_index));
+}
+
+
+static uint16
+dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+{
+	uint32 *ptr = NULL;
+	uint16 data = 0;
+	uint16 offset = 0;
+
+	switch (type) {
+		case H2D_DMA_WRITEINDX:
+			OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va,
+				dhd->prot->h2d_dma_writeindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case H2D_DMA_READINDX:
+			OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+				dhd->prot->h2d_dma_readindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case D2H_DMA_WRITEINDX:
+			OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+				dhd->prot->d2h_dma_writeindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case D2H_DMA_READINDX:
+			OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va,
+				dhd->prot->d2h_dma_readindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+
+			break;
+	}
+	DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data));
+	return (data);
+}
+
+/* D2H dircetion: get next space to read from */
+static uint8*
+prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len)
+{
+	uint16 w_ptr;
+	uint16 r_ptr;
+	uint16 depth;
+	void* ret_addr = NULL;
+	uint16 d2h_w_index = 0;
+
+	DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n",
+		__FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va),
+		(uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va)));
+
+	/* update write pointer */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		/* DMAing write/read indices supported */
+		d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx);
+		ring->ringstate->w_offset = d2h_w_index;
+	} else
+		dhd_bus_cmn_readshared(dhd->bus,
+			&(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx);
+
+	w_ptr = ring->ringstate->w_offset;
+	r_ptr = ring->ringstate->r_offset;
+	depth = ring->ringmem->max_item;
+
+	/* check for avail space */
+	*available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth);
+	if (*available_len == 0)
+		return NULL;
+
+	ASSERT(*available_len <= ring->ringmem->max_item);
+
+	/* if space available, calculate address to be read */
+	ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items);
+
+	/* update read pointer */
+	if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item)
+		ring->ringstate->r_offset = 0;
+	else
+		ring->ringstate->r_offset += *available_len;
+
+	ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item);
+
+	/* convert index to bytes */
+	*available_len = *available_len * ring->ringmem->len_items;
+
+	/* return read address */
+	return ret_addr;
+}
+static void
+prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	/* update read index */
+	/* If dma'ing h2d indices supported
+	 * update the r -indices in the
+	 * host memory o/w in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+			ring->idx, (uint16)RING_READ_PTR(ring));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)),
+			sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+
+static void
+prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dhd_prot_t *prot;
+
+	if (!dhd || !dhd->prot)
+		return;
+
+	prot = dhd->prot;
+	prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring);
+}
+
+static void
+prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dhd_prot_t *prot;
+
+	if (!dhd || !dhd->prot)
+		return;
+
+	prot = dhd->prot;
+
+	if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring))
+		return;
+
+	if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring))
+		prot->rx_cpln_early_upd_idx = 0;
+
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+			ring->idx, (uint16)prot->rx_cpln_early_upd_idx);
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx),
+			sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+
+int
+dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_create_request_t *flow_create_rqst;
+	msgbuf_ring_t *msgbuf_flow_info;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 hdrlen = sizeof(tx_flowring_create_request_t);
+	uint16 msglen = hdrlen;
+	unsigned long flags;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	uint16 alloced = 0;
+
+	if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr",
+		H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+		BCMPCIE_H2D_TXFLOWRINGID +
+		(flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) {
+		DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+	/* Clear write pointer of the ring */
+	flow_ring_node->prot_info = (void *)msgbuf_flow_info;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Request for ring buffer space */
+	flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (flow_create_rqst == NULL) {
+		DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return BCME_NOMEM;
+	}
+	msgbuf_flow_info->inited = TRUE;
+
+	/* Common msg buf hdr */
+	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
+	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_create_rqst->msg.request_id = htol16(0); /* TBD */
+
+	/* Update flow create message */
+	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
+	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
+	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
+	flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr;
+	flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr;
+	flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
+	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+	bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
+	DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer %s prio %d ifindex %d\n",
+		__FUNCTION__, flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
+		flow_ring_node->flow_info.ifindex));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+	/* If dma'ing indices supported
+	 * update the w-index in host memory o/w in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+			msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)),
+			sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf;
+
+	DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__,
+		flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id));
+
+	dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id,
+		flow_create_resp->cmplt.status);
+}
+
+void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	dhd_prot_ring_detach(dhd, flow_ring);
+	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
+}
+
+void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	uint16 rd, wrt;
+	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx);
+	dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx);
+	bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt);
+}
+
+void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "CtrlPost: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf);
+	bcm_bprintf(strbuf, "CtrlCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf);
+	bcm_bprintf(strbuf, "RxPost: ");
+	bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost);
+	dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf);
+	bcm_bprintf(strbuf, "RxCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf);
+	if (dhd_bus_is_txmode_push(dhd->bus)) {
+		bcm_bprintf(strbuf, "TxPost: ");
+		dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf);
+	}
+	bcm_bprintf(strbuf, "TxCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf);
+	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail %d\n",
+		dhd->prot->active_tx_count,
+		dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle));
+}
+
+int
+dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_delete_request_t *flow_delete_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 msglen = sizeof(tx_flowring_delete_request_t);
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	/* Request for ring buffer space */
+	DHD_GENERAL_LOCK(dhd, flags);
+	flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (flow_delete_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen));
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
+	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_delete_rqst->msg.request_id = htol16(0); /* TBD */
+
+	/* Update Delete info */
+	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_delete_rqst->reason = htol16(BCME_OK);
+
+	DHD_ERROR(("%s sending FLOW RING Delete req msglen %d \n", __FUNCTION__, msglen));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf;
+
+	DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__,
+		flow_delete_resp->cmplt.status));
+
+	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
+		flow_delete_resp->cmplt.status);
+}
+
+int
+dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_flush_request_t *flow_flush_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 msglen = sizeof(tx_flowring_flush_request_t);
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	/* Request for ring buffer space */
+	DHD_GENERAL_LOCK(dhd, flags);
+	flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (flow_flush_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen));
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
+	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_flush_rqst->msg.request_id = htol16(0); /* TBD */
+
+	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_flush_rqst->reason = htol16(BCME_OK);
+
+	DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf;
+
+	DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__,
+		flow_flush_resp->cmplt.status));
+
+	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
+		flow_flush_resp->cmplt.status);
+}
+
+int
+dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+	uint32 *ptr;
+	uint32 value;
+	uint32 i;
+	uint8 txpush = 0;
+	uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush);
+
+	OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+		dhd->prot->d2h_dma_writeindx_buf_len);
+
+	ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+	bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush);
+
+	bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+	if (txpush) {
+		ptr++;
+		value = ltoh32(*ptr);
+		bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value);
+	}
+	else {
+		ptr++;
+		bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+			value = ltoh32(*ptr);
+			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+			ptr++;
+		}
+	}
+
+	OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+		dhd->prot->h2d_dma_readindx_buf_len);
+
+	ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+	bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+
+	return 0;
+}
+
+uint32
+dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		prot->rx_metadata_offset = (uint16)val;
+	else
+		prot->tx_metadata_offset = (uint16)val;
+	return dhd_prot_metadatalen_get(dhd, rx);
+}
+
+uint32
+dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		return prot->rx_metadata_offset;
+	else
+		return prot->tx_metadata_offset;
+}
+
+uint32
+dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (set)
+		prot->txp_threshold = (uint16)val;
+	val = prot->txp_threshold;
+	return val;
+}
+
+#ifdef DHD_RX_CHAINING
+static INLINE void BCMFASTPATH
+dhd_rxchain_reset(rxchain_info_t *rxchain)
+{
+	rxchain->pkt_count = 0;
+}
+
+static void BCMFASTPATH
+dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
+{
+	uint8 *eh;
+	uint8 prio;
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	eh = PKTDATA(dhd->osh, pkt);
+	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+
+	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
+	/* so that the chain can be handed off to CTF bridge as is. */
+	if (rxchain->pkt_count == 0) {
+		/* First packet in chain */
+		rxchain->pkthead = rxchain->pkttail = pkt;
+
+		/* Keep a copy of ptr to ether_da, ether_sa and prio */
+		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+		rxchain->h_prio = prio;
+		rxchain->ifidx = ifidx;
+		rxchain->pkt_count++;
+	} else {
+		if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+			rxchain->h_da, rxchain->h_prio)) {
+			/* Same flow - keep chaining */
+			PKTSETCLINK(rxchain->pkttail, pkt);
+			rxchain->pkttail = pkt;
+			rxchain->pkt_count++;
+		} else {
+			/* Different flow - First release the existing chain */
+			dhd_rxchain_commit(dhd);
+
+			/* Create a new chain */
+			rxchain->pkthead = rxchain->pkttail = pkt;
+
+			/* Keep a copy of ptr to ether_da, ether_sa and prio */
+			rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+			rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+			rxchain->h_prio = prio;
+			rxchain->ifidx = ifidx;
+			rxchain->pkt_count++;
+		}
+	}
+
+	if ((!ETHER_ISMULTI(rxchain->h_da)) &&
+		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
+		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
+		PKTSETCHAINED(dhd->osh, pkt);
+		PKTCINCRCNT(rxchain->pkthead);
+		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
+	} else {
+		dhd_rxchain_commit(dhd);
+		return;
+	}
+
+	/* If we have hit the max chain length, dispatch the chain and reset */
+	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
+		dhd_rxchain_commit(dhd);
+	}
+}
+
+static void BCMFASTPATH
+dhd_rxchain_commit(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	if (rxchain->pkt_count == 0)
+		return;
+
+	/* Release the packets to dhd_linux */
+	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
+
+	/* Reset the chain */
+	dhd_rxchain_reset(rxchain);
+}
+#endif /* DHD_RX_CHAINING */
+
+static void
+dhd_prot_ring_clear(msgbuf_ring_t* ring)
+{
+	uint16 size;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+
+	size = ring->ringmem->max_item * ring->ringmem->len_items;
+	ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
+	OSL_CACHE_INV((void *) ring->ring_base.va, size);
+	bzero(ring->ring_base.va, size);
+
+	OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+	bzero(ring->ringstate, sizeof(*ring->ringstate));
+}
+
+void
+dhd_prot_clear(dhd_pub_t *dhd)
+{
+	struct dhd_prot *prot = dhd->prot;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+
+	if (prot == NULL)
+		return;
+
+	if (prot->h2dring_txp_subn)
+		dhd_prot_ring_clear(prot->h2dring_txp_subn);
+	if (prot->h2dring_rxp_subn)
+		dhd_prot_ring_clear(prot->h2dring_rxp_subn);
+	if (prot->h2dring_ctrl_subn)
+		dhd_prot_ring_clear(prot->h2dring_ctrl_subn);
+	if (prot->d2hring_tx_cpln)
+		dhd_prot_ring_clear(prot->d2hring_tx_cpln);
+	if (prot->d2hring_rx_cpln)
+		dhd_prot_ring_clear(prot->d2hring_rx_cpln);
+	if (prot->d2hring_ctrl_cpln)
+		dhd_prot_ring_clear(prot->d2hring_ctrl_cpln);
+
+	if (prot->retbuf.va) {
+		OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+		bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+		OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+	}
+
+	if (prot->ioctbuf.va) {
+		OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+		bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+		OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+	}
+
+	if (prot->d2h_dma_scratch_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+		bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+	}
+
+	if (prot->h2d_dma_readindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+		bzero(prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+	}
+
+	if (prot->h2d_dma_writeindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va,
+			prot->h2d_dma_writeindx_buf_len);
+		bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+			prot->h2d_dma_writeindx_buf_len);
+	}
+
+	if (prot->d2h_dma_readindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va,
+			prot->d2h_dma_readindx_buf_len);
+		bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+			prot->d2h_dma_readindx_buf_len);
+	}
+
+	if (prot->d2h_dma_writeindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va,
+			prot->d2h_dma_writeindx_buf_len);
+		bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va,
+			prot->d2h_dma_writeindx_buf_len);
+	}
+
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+
+	prot->rxbufpost = 0;
+	prot->cur_event_bufs_posted = 0;
+	prot->cur_ioctlresp_bufs_posted = 0;
+
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->pending = 0;
+	prot->lastcmd = 0;
+
+	prot->ioctl_trans_id = 1;
+
+	/* dhd_flow_rings_init is located at dhd_bus_start,
+	 *  so when stopping bus, flowrings shall be deleted
+	 */
+	dhd_flow_rings_deinit(dhd);
+	NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/net/wireless/bcmdhd/dhd_pcie.c
new file mode 100644
index 0000000000000000000000000000000000000000..b26ba8596df99e4d6cedbc24e3ea725c037cf0b9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.c
@@ -0,0 +1,4385 @@
+/*
+ * DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie.c 491657 2014-07-17 06:29:40Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_flowring.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <bcmpcie.h>
+#include <bcmendian.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	6144	/* max nvram buf size */
+
+#define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
+#define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
+/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+
+int dhd_dongle_memsize;
+int dhd_dongle_ramsize;
+#ifdef DHD_DEBUG
+static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
+#endif
+static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
+static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+	const char *name, void *params,
+	int plen, void *arg, int len, int val_size);
+static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
+static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
+	uint32 len, uint32 srcdelay, uint32 destdelay);
+static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
+static int _dhdpcie_download_firmware(struct dhd_bus *bus);
+static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
+static void dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static void dhdpci_bus_read_frames(dhd_bus_t *bus);
+static int dhdpcie_readshared(dhd_bus_t *bus);
+static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
+static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
+static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
+static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
+static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
+	bool dongle_isolation, bool reset_flag);
+static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
+static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
+static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+extern void dhd_dpc_kill(dhd_pub_t *dhdp);
+
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+
+
+
+#define     PCI_VENDOR_ID_BROADCOM          0x14e4
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_MEMBYTES,
+	IOV_MEMSIZE,
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_DEVRESET,
+	IOV_VARS,
+	IOV_MSI_SIM,
+	IOV_PCIE_LPBK,
+	IOV_CC_NVMSHADOW,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+	IOV_SLEEP_ALLOWED,
+	IOV_PCIE_DMAXFER,
+	IOV_PCIE_SUSPEND,
+	IOV_PCIEREG,
+	IOV_PCIECFGREG,
+	IOV_PCIECOREREG,
+	IOV_PCIESERDESREG,
+	IOV_BAR0_SECWIN_REG,
+	IOV_SBREG,
+	IOV_DONGLEISOLATION,
+	IOV_LTRSLEEPON_UNLOOAD,
+	IOV_RX_METADATALEN,
+	IOV_TX_METADATALEN,
+	IOV_TXP_THRESHOLD,
+	IOV_BUZZZ_DUMP,
+	IOV_DUMP_RINGUPD_BLOCK,
+	IOV_DMA_RINGINDICES,
+	IOV_DB1_FOR_MB,
+	IOV_FLOW_PRIO_MAP
+};
+
+
+const bcm_iovar_t dhdpcie_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"memsize",	IOV_MEMSIZE,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	IOVT_UINT32,	0 },
+	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
+	{"ramsize",	IOV_RAMSIZE,	0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	IOVT_UINT32,	0 },
+	{"pciereg",	IOV_PCIEREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"pciecfgreg",	IOV_PCIECFGREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"pciecorereg",	IOV_PCIECOREREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"pcieserdesreg",	IOV_PCIESERDESREG,	0,	IOVT_BUFFER,	3 * sizeof(int32) },
+	{"bar0secwinreg",	IOV_BAR0_SECWIN_REG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"pcie_dmaxfer",	IOV_PCIE_DMAXFER,	0,	IOVT_BUFFER,	3 * sizeof(int32) },
+	{"pcie_suspend", IOV_PCIE_SUSPEND,	0,	IOVT_UINT32,	0 },
+	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	IOVT_BOOL,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	IOVT_UINT32,	0 },
+	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0,	IOVT_BUFFER,	0 },
+	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0,	IOVT_UINT32,	0},
+	{"rx_metadata_len", IOV_RX_METADATALEN,	0,	IOVT_UINT32,	0 },
+	{"tx_metadata_len", IOV_TX_METADATALEN,	0,	IOVT_UINT32,	0 },
+	{"db1_for_mb", IOV_DB1_FOR_MB,	0,	IOVT_UINT32,	0 },
+	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	IOVT_UINT32,	0 },
+	{"buzzz_dump", IOV_BUZZZ_DUMP,		0,	IOVT_UINT32,	0 },
+	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0,	IOVT_UINT32,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+#define MAX_READ_TIMEOUT	5 * 1000 * 1000
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return dhdpcie_bus_register();
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhdpcie_bus_unregister();
+	return;
+}
+
+
+/** returns a host virtual address */
+uint32 *
+dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+	return;
+}
+
+/**
+ * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
+ * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
+ * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
+ *
+ * 'tcm' is the *host* virtual address at which tcm is mapped.
+ */
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm)
+{
+	dhd_bus_t *bus;
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+		bzero(bus, sizeof(dhd_bus_t));
+		bus->regs = regs;
+		bus->tcm = tcm;
+		bus->osh = osh;
+
+		dll_init(&bus->const_flowring);
+
+		/* Attach pcie shared structure */
+		bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
+
+		/* dhd_common_init(osh); */
+
+		if (dhdpcie_dongle_attach(bus)) {
+			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* software resources */
+		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
+			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+
+			break;
+		}
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->db1_for_mb = TRUE;
+		bus->dhd->hang_report  = TRUE;
+
+		DHD_TRACE(("%s: EXIT SUCCESS\n",
+			__FUNCTION__));
+
+		return bus;
+	} while (0);
+
+	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+
+	return NULL;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return  bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chippkg;
+}
+
+
+/*
+
+Name:  dhdpcie_bus_isr
+
+Parametrs:
+
+1: IN int irq   -- interrupt vector
+2: IN void *arg      -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+int32
+dhdpcie_bus_isr(dhd_bus_t *bus)
+{
+
+	do {
+			DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+			/* verify argument */
+			if (!bus) {
+				DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+				break;
+			}
+
+			if (bus->dhd->busstate == DHD_BUS_DOWN) {
+				DHD_ERROR(("%s : bus is down. we have nothing to do\n",
+					__FUNCTION__));
+				break;
+			}
+
+			/*  Overall operation:
+			 *    - Mask further interrupts
+			 *    - Read/ack intstatus
+			 *    - Take action based on bits and state
+			 *    - Reenable interrupts (as per state)
+			 */
+
+			/* Count the interrupt call */
+			bus->intrcount++;
+
+			/* read interrupt status register!! Status bits will be cleared in DPC !! */
+			bus->ipend = TRUE;
+			dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
+			bus->intdis = TRUE;
+
+#if defined(PCIE_ISR_THREAD)
+
+			DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+			DHD_OS_WAKE_LOCK(bus->dhd);
+			while (dhd_bus_dpc(bus));
+			DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+			DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+			return TRUE;
+
+	} while (0);
+
+	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
+	return FALSE;
+}
+
+static bool
+dhdpcie_dongle_attach(dhd_bus_t *bus)
+{
+
+	osl_t *osh = bus->osh;
+	void *regsva = (void*)bus->regs;
+	uint16 devid = bus->cl_devid;
+	uint32 val;
+	sbpcieregs_t *sbpcieregs;
+
+	DHD_TRACE(("%s: ENTER\n",
+		__FUNCTION__));
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Set bar0 window to si_enum_base */
+	dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	sbpcieregs = (sbpcieregs_t*)(bus->regs);
+
+	/* WAR where the BAR1 window may not be sized properly */
+	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
+	val = R_REG(osh, &sbpcieregs->configdata);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	/* Get info on the ARM and SOCRAM cores... */
+	/* Should really be qualified by device id */
+	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+		bus->armrev = si_corerev(bus->sih);
+	} else {
+		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+	} else {
+		/* cr4 has a different way to find the RAM size from TCM's */
+		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+		/* also populate base address */
+		switch ((uint16)bus->sih->chip) {
+		case BCM4339_CHIP_ID:
+		case BCM4335_CHIP_ID:
+			bus->dongle_ram_base = CR4_4335_RAM_BASE;
+			break;
+		case BCM4358_CHIP_ID:
+		case BCM4356_CHIP_ID:
+		case BCM4354_CHIP_ID:
+		case BCM43567_CHIP_ID:
+		case BCM43569_CHIP_ID:
+		case BCM4350_CHIP_ID:
+		case BCM43570_CHIP_ID:
+			bus->dongle_ram_base = CR4_4350_RAM_BASE;
+			break;
+		case BCM4360_CHIP_ID:
+			bus->dongle_ram_base = CR4_4360_RAM_BASE;
+			break;
+		case BCM4345_CHIP_ID:
+			bus->dongle_ram_base = CR4_4345_RAM_BASE;
+			break;
+		case BCM43602_CHIP_ID:
+			bus->dongle_ram_base = CR4_43602_RAM_BASE;
+			break;
+		case BCM4349_CHIP_GRPID:
+			bus->dongle_ram_base = CR4_4349_RAM_BASE;
+			break;
+		default:
+			bus->dongle_ram_base = 0;
+			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+			           __FUNCTION__, bus->dongle_ram_base));
+		}
+	}
+	bus->ramsize = bus->orig_ramsize;
+	if (dhd_dongle_memsize)
+		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+	bus->srmemsize = si_socram_srmem_size(bus->sih);
+
+
+	bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+
+	bus->wait_for_d3_ack = 1;
+	DHD_TRACE(("%s: EXIT: SUCCESS\n",
+		__FUNCTION__));
+	return 0;
+
+fail:
+	if (bus->sih != NULL)
+		si_detach(bus->sih);
+	DHD_TRACE(("%s: EXIT: FAILURE\n",
+		__FUNCTION__));
+	return -1;
+}
+
+int
+dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
+	return 0;
+}
+int
+dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
+	return 0;
+}
+
+void
+dhdpcie_bus_intr_enable(dhd_bus_t *bus)
+{
+	DHD_TRACE(("enable interrupts\n"));
+
+	if (!bus || !bus->sih)
+		return;
+
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		dhpcie_bus_unmask_interrupt(bus);
+	}
+	else if (bus->sih) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+			bus->def_intmask, bus->def_intmask);
+	}
+}
+
+void
+dhdpcie_bus_intr_disable(dhd_bus_t *bus)
+{
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	if (!bus || !bus->sih)
+		return;
+
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		dhpcie_bus_mask_interrupt(bus);
+	}
+	else if (bus->sih) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+			bus->def_intmask, 0);
+	}
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+
+/* Detach and free everything */
+void
+dhdpcie_bus_release(dhd_bus_t *bus)
+{
+	bool dongle_isolation = FALSE;
+	osl_t *osh = NULL;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+
+			if (bus->intr) {
+				dhdpcie_bus_intr_disable(bus);
+				dhdpcie_free_irq(bus);
+			}
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		/* unmap the regs and tcm here!! */
+		if (bus->regs) {
+			dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
+			bus->regs = NULL;
+		}
+		if (bus->tcm) {
+			dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
+			bus->tcm = NULL;
+		}
+
+		dhdpcie_bus_release_malloc(bus, osh);
+		/* Detach pcie shared structure */
+		if (bus->pcie_sh)
+			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+
+#ifdef DHD_DEBUG
+
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+
+		/* Finally free bus info */
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+
+}
+
+
+void
+dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
+		DHD_TRACE(("%s Exit\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->sih) {
+
+		if (!dongle_isolation)
+			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+
+		if (bus->ltrsleep_on_unload) {
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
+		}
+		si_detach(bus->sih);
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+uint32
+dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
+{
+	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
+	return data;
+}
+
+/* 32 bit config write */
+void
+dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
+}
+
+void
+dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
+}
+
+void
+dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_MEMSIZE;
+	/* Restrict the memsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_memsize, min_size));
+	if ((dhd_dongle_memsize > min_size) &&
+		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_memsize;
+}
+
+void
+dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+
+}
+
+/* Stop bus module: clear pending frames, disable data flow */
+void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	uint32 status;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus->dhd)
+		return;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
+		goto done;
+	}
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	dhdpcie_bus_intr_disable(bus);
+	status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+	if (!dhd_download_fw_on_driverload)
+		dhd_dpc_kill(bus->dhd);
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+done:
+	return;
+}
+
+/* Watchdog timer function */
+bool dhd_bus_watchdog(dhd_pub_t *dhd)
+{
+#ifdef DHD_DEBUG
+	dhd_bus_t *bus;
+	bus = dhd->bus;
+
+
+
+	/* Poll for console output periodically */
+	if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (dhdpcie_bus_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	return FALSE;
+}
+
+
+
+/* Download firmware image and nvram image */
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdpcie_download_firmware(bus, osh);
+
+	return ret;
+}
+
+static int
+dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
+{
+	int ret = 0;
+
+	DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	ret = _dhdpcie_download_firmware(bus);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+static int
+dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	/* Should succeed in opening image if it is actually given through registry
+	 * entry or in module param.
+	 */
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+
+static int
+dhdpcie_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+
+		/* nvram is string with null terminated. cannot use strlen */
+		len = bus->nvram_params_len;
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+
+		if (nvram_file_exists)
+			len = process_nvram_vars(bufp, len);
+
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *p_dlarray  = NULL;
+	unsigned int dlarray_size = 0;
+	unsigned int downloded_len, remaining_len, len;
+	char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+	uint8 *memblock = NULL, *memptr;
+
+	downloded_len = 0;
+	remaining_len = 0;
+	len = 0;
+
+	p_dlarray = dlarray;
+	dlarray_size = sizeof(dlarray);
+	p_dlimagename = dlimagename;
+	p_dlimagever  = dlimagever;
+	p_dlimagedate = dlimagedate;
+
+	if ((p_dlarray == 0) ||	(dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+		(p_dlimagename == 0) ||	(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	while (downloded_len  < dlarray_size) {
+		remaining_len = dlarray_size - downloded_len;
+		if (remaining_len >= MEMBLOCK)
+			len = MEMBLOCK;
+		else
+			len = remaining_len;
+
+		memcpy(memptr, (p_dlarray + downloded_len), len);
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+		downloded_len += len;
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+		offset += MEMBLOCK;
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		unsigned char *ularray = NULL;
+		unsigned int uploded_len;
+		uploded_len = 0;
+		bcmerror = -1;
+		ularray = MALLOC(bus->dhd->osh, dlarray_size);
+		if (ularray == NULL)
+			goto upload_err;
+		/* Upload image to verify downloaded contents. */
+		offset = bus->dongle_ram_base;
+		memset(ularray, 0xaa, dlarray_size);
+		while (uploded_len  < dlarray_size) {
+			remaining_len = dlarray_size - uploded_len;
+			if (remaining_len >= MEMBLOCK)
+				len = MEMBLOCK;
+			else
+				len = remaining_len;
+			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+				(uint8 *)(ularray + uploded_len), len);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto upload_err;
+			}
+
+			uploded_len += len;
+			offset += MEMBLOCK;
+		}
+
+		if (memcmp(p_dlarray, ularray, dlarray_size)) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+			goto upload_err;
+
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+upload_err:
+		if (ularray)
+			MFREE(bus->dhd->osh, ularray, dlarray_size);
+	}
+#endif /* DHD_DEBUG */
+err:
+
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+
+static int
+_dhdpcie_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdpcie_bus_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdpcie_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+
+	/* External nvram takes precedence if specified */
+	if (dhdpcie_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdpcie_bus_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+	if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+		bus->ioct_resp.cmn_hdr.request_id = 0;
+		bus->ioct_resp.compl_hdr.status = 0xffff;
+		bus->rxlen = 0;
+	}
+	rxlen = bus->rxlen;
+	bcopy(&bus->ioct_resp, msg, sizeof(ioctl_comp_resp_msg_t));
+	bus->rxlen = 0;
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
+	} else if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+	} else if (pending == TRUE) {
+		DHD_CTL(("%s: canceled\n", __FUNCTION__));
+		return -ERESTARTSYS;
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+	}
+	if (timeleft == 0) {
+		bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+	}
+	else
+		bus->dhd->rxcnt_timeout = 0;
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+
+}
+
+#define CONSOLE_LINE_MAX	192
+
+#ifdef DHD_DEBUG
+static int
+dhdpcie_bus_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return -1;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	pciedev_shared_t *pciedev_shared = bus->pcie_sh;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdpcie_readshared(bus)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
+
+	if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (bus->pcie_sh->assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+				                                  bus->pcie_sh->assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (bus->pcie_sh->assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+				                                  bus->pcie_sh->assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
+		}
+
+		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+			                                  bus->pcie_sh->trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(bus->pcie_sh->trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+
+printbuf:
+	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+#endif /* DHD_DEBUG */
+
+
+/**
+ * Transfers bytes from host to dongle using pio mode.
+ * Parameter 'address' is a backplane address.
+ */
+static int
+dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint dsize;
+	int detect_endian_flag = 0x01;
+	bool little_endian;
+
+	/* Detect endianness. */
+	little_endian = *(char *)&detect_endian_flag;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+
+	/* Determine initial transfer parameters */
+	dsize = sizeof(uint64);
+
+	/* Do the transfer(s) */
+	if (write) {
+		while (size) {
+			if (size >= sizeof(uint64) && little_endian)
+				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
+			else {
+				dsize = sizeof(uint8);
+				dhdpcie_bus_wtcm8(bus, address, *data);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize)) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	} else {
+		while (size) {
+			if (size >= sizeof(uint64) && little_endian)
+				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
+			else {
+				dsize = sizeof(uint8);
+				*data = dhdpcie_bus_rtcm8(bus, address);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize) > 0) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	}
+	return bcmerror;
+}
+
+int BCMFASTPATH
+dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
+{
+	flow_ring_node_t *flow_ring_node;
+	int ret = BCME_OK;
+
+	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+	/* ASSERT on flow_id */
+	if (flow_id >= bus->max_sub_queues) {
+		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
+			flow_id, bus->max_sub_queues));
+		return 0;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+
+	{
+		unsigned long flags;
+		void *txp = NULL;
+		flow_queue_t *queue;
+
+		queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+		DHD_QUEUE_LOCK(queue->lock, flags);
+
+		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+			PKTORPHAN(txp);
+
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpack_check_xmit(bus->dhd, txp);
+#endif /* DHDTCPACK_SUPPRESS */
+			/* Attempt to transfer packet over flow ring */
+
+			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
+			if (ret != BCME_OK) { /* may not have resources in flow ring */
+				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
+				dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+				/* reinsert at head */
+				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
+				DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+				/* If we are able to requeue back, return success */
+				return BCME_OK;
+			}
+		}
+
+		dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+
+		DHD_QUEUE_UNLOCK(queue->lock, flags);
+	}
+
+	return ret;
+}
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+int BCMFASTPATH
+dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+	unsigned long flags;
+	int ret = BCME_OK;
+	void *txp_pend = NULL;
+	if (!bus->txmode_push) {
+		uint16 flowid;
+		flow_queue_t *queue;
+		flow_ring_node_t *flow_ring_node;
+		if (!bus->dhd->flowid_allocator) {
+			DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+			goto toss;
+		}
+
+		flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
+
+		flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+
+		DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
+			__FUNCTION__, flowid, flow_ring_node->status,
+			flow_ring_node->active));
+
+		if ((flowid >= bus->dhd->num_flow_rings) ||
+			(!flow_ring_node->active) ||
+			(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+			DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
+				__FUNCTION__, flowid, flow_ring_node->status,
+				flow_ring_node->active));
+			ret = BCME_ERROR;
+			goto toss;
+		}
+
+		queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+		DHD_QUEUE_LOCK(queue->lock, flags);
+
+		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
+			txp_pend = txp;
+
+		DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+		if (flow_ring_node->status) {
+			DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+			    __FUNCTION__, flowid, flow_ring_node->status,
+			    flow_ring_node->active));
+			if (txp_pend) {
+				txp = txp_pend;
+				goto toss;
+			}
+			return BCME_OK;
+		}
+		ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+		/* If we have anything pending, try to push into q */
+		if (txp_pend) {
+			DHD_QUEUE_LOCK(queue->lock, flags);
+
+			if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+				DHD_QUEUE_UNLOCK(queue->lock, flags);
+				txp = txp_pend;
+				goto toss;
+			}
+
+			DHD_QUEUE_UNLOCK(queue->lock, flags);
+		}
+
+		return ret;
+
+	} else { /* bus->txmode_push */
+		return dhd_prot_txdata(bus->dhd, txp, ifidx);
+	}
+
+toss:
+	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+	PKTCFREE(bus->dhd->osh, txp, TRUE);
+	return ret;
+}
+
+
+void
+dhd_bus_stop_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+	bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_start_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+	bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
+	uint32 resp_len)
+{
+	bus->rxlen = retlen;
+	bus->ioct_resp.cmn_hdr.request_id = pkt_id;
+	bus->ioct_resp.compl_hdr.status = status;
+	bus->ioct_resp.resp_len = (uint16)resp_len;
+}
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhd->bus;
+	uint32 addr, val;
+	int rv;
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* generate an interurpt to dongle to indicate that it needs to process cons command */
+	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
+done:
+	return rv;
+}
+#endif /* defined(DHD_DEBUG) */
+
+/* Process rx frame , Send up the layer to netif */
+void BCMFASTPATH
+dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
+{
+	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
+}
+
+/** 'offset' is a backplane address */
+void
+dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	*(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+
+uint8
+dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint8 data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+	data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+#else
+	data = *(volatile uint8 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	return data;
+}
+
+void
+dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	*(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+void
+dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	*(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+void
+dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	*(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+
+uint16
+dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint16 data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+	data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+#else
+	data = *(volatile uint16 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	return data;
+}
+
+uint32
+dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint32 data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+	data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+#else
+	data = *(volatile uint32 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	return data;
+}
+
+uint64
+dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint64 data;
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+	data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+#else
+	data = *(volatile uint64 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	return data;
+}
+
+void
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
+{
+	uint64 long_data;
+	ulong tcm_offset;
+	pciedev_shared_t *sh;
+	pciedev_shared_t *shmem = NULL;
+
+	sh = (pciedev_shared_t*)bus->shared_addr;
+
+	DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
+
+	switch (type) {
+		case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
+			tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
+			dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case RING_LEN_ITEMS :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, len_items);
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_MAX_ITEM :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, max_item);
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_BUF_ADDR :
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, base_addr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case RING_WRITE_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+		case RING_READ_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case DTOH_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+
+		case HTOD_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+		default:
+			break;
+	}
+}
+
+
+void
+dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
+{
+	pciedev_shared_t *sh;
+	ulong tcm_offset;
+
+	sh = (pciedev_shared_t*)bus->shared_addr;
+
+	switch (type) {
+		case RING_WRITE_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case RING_READ_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case TOTAL_LFRAG_PACKET_CNT :
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong) &sh->total_lfrag_pkt_cnt));
+			break;
+		case HTOD_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
+			break;
+		case DTOH_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
+			break;
+		case MAX_HOST_RXBUFS :
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong) &sh->max_host_rxbufs));
+			break;
+		default :
+			break;
+	}
+}
+
+uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
+{
+	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
+		goto exit;
+	}
+
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+#ifdef BCM_BUZZZ
+#include <bcm_buzzz.h>
+
+int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+	int bytes = 0;
+	uint32 ctr, curr[3], prev[3], delta[3];
+
+	/* Compute elapsed counter values per counter event type */
+	for (ctr = 0U; ctr < 3; ctr++) {
+		prev[ctr] = core[ctr];
+		curr[ctr] = *log++;
+		core[ctr] = curr[ctr];  /* saved for next log */
+
+		if (curr[ctr] < prev[ctr])
+			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
+		else
+			delta[ctr] = (curr[ctr] - prev[ctr]);
+
+		/* Adjust for instrumentation overhead */
+		if (delta[ctr] >= ovhd[ctr])
+			delta[ctr] -= ovhd[ctr];
+		else
+			delta[ctr] = 0;
+
+		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
+	}
+
+	return bytes;
+}
+
+typedef union cm3_cnts { /* export this in bcm_buzzz.h */
+	uint32 u32;
+	uint8  u8[4];
+	struct {
+		uint8 cpicnt;
+		uint8 exccnt;
+		uint8 sleepcnt;
+		uint8 lsucnt;
+	};
+} cm3_cnts_t;
+
+int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+	int bytes = 0;
+
+	uint32 cyccnt, instrcnt;
+	cm3_cnts_t cm3_cnts;
+	uint8 foldcnt;
+
+	{   /* 32bit cyccnt */
+		uint32 curr, prev, delta;
+		prev = core[0]; curr = *log++; core[0] = curr;
+		if (curr < prev)
+			delta = curr + (~0U - prev);
+		else
+			delta = (curr - prev);
+		if (delta >= ovhd[0])
+			delta -= ovhd[0];
+		else
+			delta = 0;
+
+		bytes += sprintf(p + bytes, "%12u ", delta);
+		cyccnt = delta;
+	}
+
+	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
+		int i;
+		uint8 max8 = ~0;
+		cm3_cnts_t curr, prev, delta;
+		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
+		for (i = 0; i < 4; i++) {
+			if (curr.u8[i] < prev.u8[i])
+				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
+			else
+				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
+			if (delta.u8[i] >= ovhd[i + 1])
+				delta.u8[i] -= ovhd[i + 1];
+			else
+				delta.u8[i] = 0;
+			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
+		}
+		cm3_cnts.u32 = delta.u32;
+	}
+
+	{   /* Extract the foldcnt from arg0 */
+		uint8 curr, prev, delta, max8 = ~0;
+		buzzz_arg0_t arg0; arg0.u32 = *log;
+		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
+		if (curr < prev)
+			delta = curr + (max8 - prev);
+		else
+			delta = (curr - prev);
+		if (delta >= ovhd[5])
+			delta -= ovhd[5];
+		else
+			delta = 0;
+		bytes += sprintf(p + bytes, "%4u ", delta);
+		foldcnt = delta;
+	}
+
+	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
+		                 + cm3_cnts.u8[3]) + foldcnt;
+	if (instrcnt > 0xFFFFFF00)
+		bytes += sprintf(p + bytes, "[%10s] ", "~");
+	else
+		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
+	return bytes;
+}
+
+int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
+{
+	int bytes = 0;
+	buzzz_arg0_t arg0;
+	static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
+
+	if (buzzz->counters == 6) {
+		bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
+		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
+	} else {
+		bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
+		log += 3; /* (3 x 32bit) CR4 */
+	}
+
+	/* Dump the logged arguments using the registered formats */
+	arg0.u32 = *log++;
+
+	switch (arg0.klog.args) {
+		case 0:
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
+			break;
+		case 1:
+		{
+			uint32 arg1 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
+			break;
+		}
+		default:
+			printf("Maximum one argument supported\n");
+			break;
+	}
+	bytes += sprintf(p + bytes, "\n");
+
+	return bytes;
+}
+
+void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
+{
+	int i;
+	uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
+	void * log;
+
+	for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
+		core[i] = 0;
+
+	log_sz = buzzz_p->log_sz;
+
+	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
+
+	if (buzzz_p->wrap == TRUE) {
+		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
+		total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+	} else {
+		part2 = 0U;
+		total = buzzz_p->count;
+	}
+
+	if (total == 0U) {
+		printf("buzzz_dump total<%u> done\n", total);
+		return;
+	} else {
+		printf("buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
+		       total, part2, part1);
+	}
+
+	if (part2) {   /* with wrap */
+		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
+		while (part2--) {   /* from cur to end : part2 */
+			p[0] = '\0';
+			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+			printf("%s", p);
+			log = (void*)((size_t)log + buzzz_p->log_sz);
+		}
+	}
+
+	log = (void*)buffer_p;
+	while (part1--) {
+		p[0] = '\0';
+		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+		printf("%s", p);
+		log = (void*)((size_t)log + buzzz_p->log_sz);
+	}
+
+	printf("buzzz_dump done.\n");
+}
+
+int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
+{
+	buzzz_t * buzzz_p = NULL;
+	void * buffer_p = NULL;
+	char * page_p = NULL;
+	pciedev_shared_t *sh;
+	int ret = 0;
+
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		return BCME_UNSUPPORTED;
+	}
+	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
+		printf("Page memory allocation failure\n");
+		goto done;
+	}
+	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
+		printf("Buzzz memory allocation failure\n");
+		goto done;
+	}
+
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		goto done;
+	}
+
+	sh = bus->pcie_sh;
+
+	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
+
+	if (sh->buzzz != 0U) {	/* Fetch and display dongle BUZZZ Trace */
+		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
+		                     (uint8 *)buzzz_p, sizeof(buzzz_t));
+		if (buzzz_p->count == 0) {
+			printf("Empty dongle BUZZZ trace\n\n");
+			goto done;
+		}
+		if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
+			printf("Counters<%u> mismatch\n", buzzz_p->counters);
+			goto done;
+		}
+		/* Allocate memory for trace buffer and format strings */
+		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
+		if (buffer_p == NULL) {
+			printf("Buffer memory allocation failure\n");
+			goto done;
+		}
+		/* Fetch the trace and format strings */
+		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
+		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
+		/* Process and display the trace using formatted output */
+		printf("<#cycle> <#instruction> <#ctr3> <event information>\n");
+		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
+		printf("----- End of dongle BUZZZ Trace -----\n\n");
+		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
+	}
+
+done:
+
+	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
+	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
+	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
+
+	return BCME_OK;
+}
+#endif /* BCM_BUZZZ */
+
+#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
+	((sih)->buscoretype == PCIE2_CORE_ID))
+
+static bool
+pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
+{
+	uint mdiodata, mdioctrl, i = 0;
+	uint pcie_serdes_spinwait = 200;
+
+	mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
+	mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
+
+	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
+	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
+
+	OSL_DELAY(10);
+	/* retry till the transaction is complete */
+	while (i < pcie_serdes_spinwait) {
+		uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
+			0, 0);
+		if (!(mdioctrl_read & MDIODATA2_DONE)) {
+			break;
+		}
+		OSL_DELAY(1000);
+		i++;
+	}
+
+	if (i >= pcie_serdes_spinwait) {
+		DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+
+static int
+pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
+	bool slave_bypass)
+{
+	uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
+	uint32 reg32;
+
+	pcie2_mdiosetblock(bus, physmedia);
+
+	/* enable mdio access to SERDES */
+	mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
+	mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
+
+	if (slave_bypass)
+		mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
+
+	if (!write)
+		mdio_ctrl |= MDIOCTL2_READ;
+
+	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
+
+	if (write) {
+		reg32 =  PCIE2_MDIO_WR_DATA;
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
+			*val | MDIODATA2_DONE);
+	}
+	else
+		reg32 =  PCIE2_MDIO_RD_DATA;
+
+	/* retry till the transaction is complete */
+	while (i < pcie_serdes_spinwait) {
+		uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
+		if (!(done_val & MDIODATA2_DONE)) {
+			if (!write) {
+				*val = si_corereg(bus->sih, bus->sih->buscoreidx,
+					PCIE2_MDIO_RD_DATA, 0, 0);
+				*val = *val & MDIODATA2_MASK;
+			}
+			return 0;
+		}
+		OSL_DELAY(1000);
+		i++;
+	}
+	return -1;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int bcmerror = 0;
+#ifdef CONFIG_ARCH_MSM
+	int retry = POWERUP_MAX_RETRY;
+#endif /* CONFIG_ARCH_MSM */
+
+	if (dhd_download_fw_on_driverload) {
+		bcmerror = dhd_bus_start(dhdp);
+	} else {
+		if (flag == TRUE) { /* Turn off WLAN */
+			/* Removing Power */
+			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+			bus->dhd->up = FALSE;
+			if (bus->dhd->busstate != DHD_BUS_DOWN) {
+				dhd_prot_clear(dhdp);
+				dhd_os_wd_timer(dhdp, 0);
+				dhd_bus_stop(bus, TRUE);
+#ifdef CONFIG_ARCH_MSM
+				dhd_bus_release_dongle(bus);
+#endif /* CONFIG_ARCH_MSM */
+				dhdpcie_bus_free_resource(bus);
+				bcmerror = dhdpcie_bus_disable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#ifdef CONFIG_ARCH_MSM
+				bcmerror = dhdpcie_bus_clock_stop(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			} else {
+				dhd_prot_clear(dhdp);
+#ifdef CONFIG_ARCH_MSM
+				dhd_bus_release_dongle(bus);
+#endif /* CONFIG_ARCH_MSM */
+				dhdpcie_bus_free_resource(bus);
+				bcmerror = dhdpcie_bus_disable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+#ifdef CONFIG_ARCH_MSM
+				bcmerror = dhdpcie_bus_clock_stop(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif  /* CONFIG_ARCH_MSM */
+			}
+
+			bus->dhd->dongle_reset = TRUE;
+			DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
+
+		} else { /* Turn on WLAN */
+			if (bus->dhd->busstate == DHD_BUS_DOWN) {
+				/* Powering On */
+				DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+				while (retry--) {
+					bcmerror = dhdpcie_bus_clock_start(bus);
+					if (!bcmerror) {
+						DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
+							__FUNCTION__));
+						break;
+					}
+					else
+						OSL_SLEEP(10);
+				}
+
+				if (bcmerror && !retry) {
+					DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				bcmerror = dhdpcie_bus_enable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host configuration restore failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bcmerror = dhdpcie_bus_alloc_resource(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bcmerror = dhdpcie_bus_dongle_attach(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_dongle_attach: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bus->dhd->dongle_reset = FALSE;
+
+				bcmerror = dhd_bus_start(dhdp);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhd_bus_start: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bus->dhd->up = TRUE;
+				DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+			} else {
+				DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
+				goto done;
+			}
+		}
+	}
+done:
+	if (bcmerror)
+		bus->dhd->busstate = DHD_BUS_DOWN;
+
+	return bcmerror;
+}
+
+static int
+dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	int32 int_val2 = 0;
+	int32 int_val3 = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	if (plen >= (int)sizeof(int_val) * 2)
+		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+	if (plen >= (int)sizeof(int_val) * 3)
+		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	switch (actionid) {
+
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdpcie_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_SVAL(IOV_PCIEREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+			int_val);
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+			int_val2);
+		break;
+
+	case IOV_GVAL(IOV_PCIEREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+			int_val);
+		int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_BAR0_SECWIN_REG):
+	{
+		uint32 cur_base, base;
+		uchar *bar0;
+		volatile uint32 *offset;
+		/* set the bar0 secondary window to this */
+		/* write the register value */
+		cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+		base = int_val & 0xFFFFF000;
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
+		bar0 = (uchar *)bus->regs;
+		offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+		int_val = *offset;
+		bcopy(&int_val, arg, val_size);
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+	}
+		break;
+	case IOV_SVAL(IOV_BAR0_SECWIN_REG):
+	{
+		uint32 cur_base, base;
+		uchar *bar0;
+		volatile uint32 *offset;
+		/* set the bar0 secondary window to this */
+		/* write the register value */
+		cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+		base = int_val & 0xFFFFF000;
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
+		bar0 = (uchar *)bus->regs;
+		offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+		*offset = int_val2;
+		bcopy(&int_val2, arg, val_size);
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+	}
+		break;
+
+	case IOV_SVAL(IOV_PCIECOREREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+		break;
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, coreidx;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = sdreg.offset;
+		coreidx =  (addr & 0xF000) >> 12;
+
+		int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, coreidx;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = sdreg.offset;
+		coreidx =  (addr & 0xF000) >> 12;
+
+		si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_PCIESERDESREG):
+	{
+		uint val;
+		if (!PCIE_GEN2(bus->sih)) {
+			DHD_ERROR(("supported only in pcie gen2\n"));
+			bcmerror = BCME_ERROR;
+			break;
+		}
+		if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
+			bcopy(&val, arg, sizeof(int32));
+		}
+		else {
+			DHD_ERROR(("pcie2_mdioop failed.\n"));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+	}
+	case IOV_SVAL(IOV_PCIESERDESREG):
+		if (!PCIE_GEN2(bus->sih)) {
+			DHD_ERROR(("supported only in pcie gen2\n"));
+			bcmerror = BCME_ERROR;
+			break;
+		}
+		if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
+			DHD_ERROR(("pcie2_mdioop failed.\n"));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+	case IOV_GVAL(IOV_PCIECOREREG):
+		int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PCIECFGREG):
+		OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
+		break;
+
+	case IOV_GVAL(IOV_PCIECFGREG):
+		int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_LPBK):
+		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_DMAXFER):
+		bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
+		break;
+
+	case IOV_GVAL(IOV_PCIE_SUSPEND):
+		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_SUSPEND):
+		dhdpcie_bus_suspend(bus, bool_val);
+		break;
+
+	case IOV_GVAL(IOV_MEMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;		/* absolute backplane address */
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
+		          (set ? "write" : "read"), size, address, dsize));
+
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+			if (set && address == bus->dongle_ram_base) {
+				bus->resetinstr = *(((uint32*)params) + 2);
+			}
+		} else {
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect, remap;
+			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+
+			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_ARM_ADDR;
+				address += SOCDEVRAM_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+				/* Can not access remap region while devram remap bit is set
+				 * ROM content would be returned in this case
+				 */
+				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+					__FUNCTION__, address));
+				bcmerror = BCME_ERROR;
+				break;
+			}
+		}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+#ifdef BCM_BUZZZ
+	case IOV_GVAL(IOV_BUZZZ_DUMP):
+		bcmerror = dhd_buzzz_dump_dngl(bus);
+		break;
+#endif /* BCM_BUZZZ */
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
+		break;
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_CC_NVMSHADOW):
+	{
+		struct bcmstrbuf dump_b;
+
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
+		break;
+	}
+
+	case IOV_GVAL(IOV_SLEEP_ALLOWED):
+		bool_val = bus->sleep_allowed;
+		bcopy(&bool_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SLEEP_ALLOWED):
+		bus->sleep_allowed = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
+		int_val = bus->ltrsleep_on_unload;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
+		bus->ltrsleep_on_unload = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
+	{
+		struct bcmstrbuf dump_b;
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
+		break;
+	}
+	case IOV_GVAL(IOV_DMA_RINGINDICES):
+	{	int h2d_support, d2h_support;
+
+		d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
+		h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
+		int_val = d2h_support | (h2d_support << 1);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_DMA_RINGINDICES):
+		/* Can change it only during initialization/FW download */
+		if (bus->dhd->busstate == DHD_BUS_DOWN) {
+			if ((int_val > 3) || (int_val < 0)) {
+				DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+				bcmerror = BCME_BADARG;
+			} else {
+				bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+				bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+			}
+		} else {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+		}
+		break;
+
+	case IOV_GVAL(IOV_RX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+		case IOV_SVAL(IOV_RX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
+		break;
+
+	case IOV_SVAL(IOV_TXP_THRESHOLD):
+		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
+		break;
+
+	case IOV_GVAL(IOV_TXP_THRESHOLD):
+		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DB1_FOR_MB):
+		if (int_val)
+			bus->db1_for_mb = TRUE;
+		else
+			bus->db1_for_mb = FALSE;
+		break;
+
+	case IOV_GVAL(IOV_DB1_FOR_MB):
+		if (bus->db1_for_mb)
+			int_val = 1;
+		else
+			int_val = 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_TX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
+		break;
+
+	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
+		int_val = bus->dhd->flow_prio_map_type;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
+		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	return bcmerror;
+}
+
+/* Transfers bytes from host to dongle using pio mode */
+static int
+dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return 0;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return 0;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return 0;
+	}
+	dhdmsgbuf_lpbk_req(bus->dhd, len);
+	return 0;
+}
+
+void
+dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
+{
+	struct  dhd_bus *bus = dhdp->bus;
+	if (bus) {
+		dhdpcie_bus_suspend(bus, state);
+	}
+}
+
+int
+dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state)
+{
+
+	int timeleft;
+	bool pending;
+	int rc = 0;
+
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	if (state == (bus->dhd->busstate == DHD_BUS_SUSPEND)) /* Set to same state */
+		return BCME_OK;
+
+	if (state) {
+#ifdef EXYNOS5433_PCIE_WAR
+		exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+		bus->wait_for_d3_ack = 0;
+		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+		timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
+		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+		if (bus->wait_for_d3_ack) {
+			/* Got D3 Ack. Suspend the bus */
+			rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+			bus->dhd->busstate = DHD_BUS_SUSPEND;
+		} else if (timeleft == 0) {
+			DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef EXYNOS5433_PCIE_WAR
+			exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+			return -ETIMEDOUT;
+		}
+		bus->wait_for_d3_ack = 1;
+	}
+	else {
+		/* Resume */
+		rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+		bus->dhd->busstate = DHD_BUS_DATA;
+
+	}
+#ifdef EXYNOS5433_PCIE_WAR
+	exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+	return rc;
+}
+
+/* Transfers bytes from host to dongle and to host again using DMA */
+static int
+dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	if (len < 5 || len > 4194296) {
+		DHD_ERROR(("len is too small or too large\n"));
+		return BCME_ERROR;
+	}
+	return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
+}
+
+
+
+static int
+dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
+{
+	int bcmerror = 0;
+	uint32 *cr4_regs;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
+		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+
+		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if (cr4_regs == NULL) { /* no CR4 present on chip */
+			si_core_disable(bus->sih, 0);
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+			if (bus->sih->chip == BCM43602_CHIP_ID) {
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+			}
+			/* reset last 4 bytes of RAM address. to be used for shared area */
+			dhdpcie_init_shared_addr(bus);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			if (bus->sih->chip == BCM43602_CHIP_ID) {
+				/* Firmware crashes on SOCSRAM access when core is in reset */
+				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
+						__FUNCTION__));
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+				si_core_reset(bus->sih, 0, 0);
+				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+			}
+
+			/* write vars */
+			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			/* write address 0 with reset instruction */
+			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to PCIE core */
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+static int
+dhdpcie_bus_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+		/* Write the vars list */
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+
+		/* Implement read back and verify later */
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+		bus->nvram_csm = varsizew;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		bus->nvram_csm = varsizew;
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+int
+dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+/* Add bus dump output to a buffer */
+void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	uint16 flowid;
+	flow_ring_node_t *flow_ring_node;
+
+	dhd_prot_print_info(dhdp, strbuf);
+	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+		if (flow_ring_node->active) {
+			bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d  Qlen %d ",
+				flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
+				flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
+			dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
+		}
+	}
+}
+
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
+{
+	dll_t *item, *next;
+	flow_ring_node_t *flow_ring_node;
+	struct dhd_bus *bus = dhd->bus;
+
+	for (item = dll_head_p(&bus->const_flowring);
+	         !dll_end(&bus->const_flowring, item); item = next) {
+		next = dll_next_p(item);
+
+		flow_ring_node = dhd_constlist_to_flowring(item);
+		ASSERT(flow_ring_node->active);
+		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+	}
+}
+
+
+/* Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		DHD_ERROR(("mailbox communication not supported\n"));
+		return;
+	}
+	if (bus->db1_for_mb)  {
+		/* this is a pcie core register, not the config regsiter */
+		DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+	}
+	else {
+		DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+	}
+}
+
+/* doorbell ring Function */
+void
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
+	} else {
+		/* this is a pcie core register, not the config regsiter */
+		DHD_INFO(("writing a door bell to the device\n"));
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
+	}
+}
+
+static void
+dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+{
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
+}
+
+static void
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
+{
+	uint32 w;
+	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
+}
+
+dhd_mb_ring_t
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIMailBoxInt);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhd_bus_ringbell_oldpcie;
+		}
+	} else {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIH2D_MailBox);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhd_bus_ringbell_fast;
+		}
+	}
+	return dhd_bus_ringbell;
+}
+
+bool BCMFASTPATH
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	uint32 intstatus = 0;
+	uint32 newstatus = 0;
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		return 0;
+	}
+
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+		resched = TRUE;
+		DHD_ERROR(("%s : pcie is still in suspend state!!!\n", __FUNCTION__));
+		OSL_DELAY(20 * 1000); /* 20ms */
+		return resched;
+	}
+
+	intstatus = bus->intstatus;
+
+	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+		(bus->sih->buscorerev == 2)) {
+		newstatus =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
+		/* Merge new bits with previous */
+		intstatus |= newstatus;
+		bus->intstatus = 0;
+		if (intstatus & I_MB) {
+			dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+		}
+	} else {
+		/* this is a PCIE core register..not a config register... */
+		newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		intstatus |= (newstatus & bus->def_intmask);
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, intstatus, intstatus);
+		if (intstatus & bus->def_intmask) {
+			dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+			intstatus &= ~bus->def_intmask;
+		}
+	}
+
+	dhdpcie_bus_intr_enable(bus);
+	return resched;
+
+}
+
+
+static void
+dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
+{
+	uint32 cur_h2d_mb_data = 0;
+
+	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+
+	if (cur_h2d_mb_data != 0) {
+		uint32 i = 0;
+		DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
+		while ((i++ < 100) && cur_h2d_mb_data) {
+			OSL_DELAY(10);
+			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+		}
+		if (i >= 100)
+			DHD_ERROR(("waited 1ms for the dngl to ack the previous mb transaction\n"));
+	}
+
+	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
+	dhd_bus_gen_devmb_intr(bus);
+
+	if (h2d_mb_data == H2D_HOST_D3_INFORM)
+		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+}
+
+static void
+dhdpcie_handle_mb_data(dhd_bus_t *bus)
+{
+	uint32 d2h_mb_data = 0;
+	uint32 zero = 0;
+	dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
+	if (!d2h_mb_data)
+		return;
+
+	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
+
+	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+	}
+	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+	}
+	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
+		/* what should we do */
+		DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__));
+		if (!bus->wait_for_d3_ack) {
+			bus->wait_for_d3_ack = 1;
+			dhd_os_ioctl_resp_wake(bus->dhd);
+		}
+	}
+	if (d2h_mb_data & D2H_DEV_FWHALT)  {
+		DHD_INFO(("FW trap has happened\n"));
+#ifdef DHD_DEBUG
+		dhdpcie_checkdied(bus, NULL, 0);
+#endif
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+}
+
+static void
+dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
+{
+
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		/* Msg stream interrupt */
+		if (intstatus & I_BIT1) {
+			dhdpci_bus_read_frames(bus);
+		} else if (intstatus & I_BIT0) {
+			/* do nothing for Now */
+		}
+	}
+	else {
+		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
+			dhdpcie_handle_mb_data(bus);
+		if (intstatus & PCIE_MB_D2H_MB_MASK) {
+				dhdpci_bus_read_frames(bus);
+		}
+	}
+}
+
+/* Decode dongle to host message stream */
+static void
+dhdpci_bus_read_frames(dhd_bus_t *bus)
+{
+	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
+	DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
+
+	dhd_prot_process_ctrlbuf(bus->dhd);
+
+	/* update the flow ring cpls */
+	dhd_update_txflowrings(bus->dhd);
+
+	dhd_prot_process_msgbuf_txcpl(bus->dhd);
+
+	dhd_prot_process_msgbuf_rxcpl(bus->dhd);
+
+	DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+}
+
+static int
+dhdpcie_readshared(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	int rv, w_init, r_init;
+	uint32 shaddr = 0;
+	pciedev_shared_t *sh = bus->pcie_sh;
+	dhd_timeout_t tmo;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	/* start a timer for 5 seconds */
+	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
+
+	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+	}
+
+	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+		(addr > shaddr)) {
+		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
+			__FUNCTION__, addr));
+		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
+		return BCME_ERROR;
+	} else {
+		bus->shared_addr = (ulong)addr;
+		DHD_ERROR(("PCIe shared addr read took %u usec "
+			"before dongle is ready\n", tmo.elapsed));
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
+		sizeof(pciedev_shared_t))) < 0) {
+		DHD_ERROR(("Failed to read PCIe shared struct,"
+			"size read %d < %d\n", rv, (int)sizeof(pciedev_shared_t)));
+		return rv;
+	}
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
+	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
+	/* load bus console address */
+
+#ifdef DHD_DEBUG
+	bus->console_addr = sh->console_addr;
+#endif
+
+	/* Read the dma rx offset */
+	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
+	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
+
+	DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
+
+	if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
+		DHD_ERROR(("%s: pcie_shared version %d in dhd "
+		           "is older than pciedev_shared version %d in dongle\n",
+		           __FUNCTION__, PCIE_SHARED_VERSION,
+		           sh->flags & PCIE_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+	if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
+		if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
+#ifdef DHDTCPACK_SUPPRESS
+			/* Do not use tcpack suppress as packets don't stay in queue */
+			dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+#endif
+			bus->txmode_push = TRUE;
+		} else
+			bus->txmode_push = FALSE;
+	}
+	DHD_ERROR(("bus->txmode_push is set to %d\n", bus->txmode_push));
+
+	/* Does the FW support DMA'ing r/w indices */
+	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+
+		DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
+			__FUNCTION__,
+			(DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
+			(DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
+
+	} else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
+	           DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+
+#ifdef BCM_INDX_DMA
+		DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+#endif
+		DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
+			__FUNCTION__));
+		bus->dhd->dma_d2h_ring_upd_support = FALSE;
+		bus->dhd->dma_h2d_ring_upd_support = FALSE;
+	}
+
+
+	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
+	{
+		ring_info_t  ring_info;
+
+		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
+			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
+			return rv;
+
+		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
+		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
+
+		bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
+
+		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
+		 * The max_sub_queues is read from FW initialized ring_info
+		 */
+		if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+			w_init = dhd_prot_init_index_dma_block(bus->dhd,
+				HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+				bus->max_sub_queues);
+			r_init = dhd_prot_init_index_dma_block(bus->dhd,
+				DNGL_TO_HOST_DMA_READINDX_BUFFER,
+				BCMPCIE_D2H_COMMON_MSGRINGS);
+
+			if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_h2d_ring_upd_support = FALSE;
+			}
+		}
+
+		if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
+			w_init = dhd_prot_init_index_dma_block(bus->dhd,
+				DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+				BCMPCIE_D2H_COMMON_MSGRINGS);
+			r_init = dhd_prot_init_index_dma_block(bus->dhd,
+				HOST_TO_DNGL_DMA_READINDX_BUFFER,
+				bus->max_sub_queues);
+
+			if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_d2h_ring_upd_support = FALSE;
+			}
+		}
+
+		/* read ringmem and ringstate ptrs from shared area and store in host variables */
+		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
+
+		bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
+		DHD_INFO(("ring_info\n"));
+
+		DHD_ERROR(("max H2D queues %d\n", ltoh16(ring_info.max_sub_queues)));
+
+		DHD_INFO(("mail box address\n"));
+		DHD_INFO(("h2d_mb_data_ptr_addr 0x%04x\n", bus->h2d_mb_data_ptr_addr));
+		DHD_INFO(("d2h_mb_data_ptr_addr 0x%04x\n", bus->d2h_mb_data_ptr_addr));
+	}
+	return BCME_OK;
+}
+/* Read ring mem and ring state ptr info from shared are in TCM */
+static void
+dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
+{
+	uint16 i = 0;
+	uint16 j = 0;
+	uint32 tcm_memloc;
+	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
+
+	/* Ring mem ptr info */
+	/* Alloated in the order
+		H2D_MSGRING_CONTROL_SUBMIT              0
+		H2D_MSGRING_RXPOST_SUBMIT               1
+		D2H_MSGRING_CONTROL_COMPLETE            2
+		D2H_MSGRING_TX_COMPLETE                 3
+		D2H_MSGRING_RX_COMPLETE                 4
+		TX_FLOW_RING				5
+	*/
+
+	{
+		/* ringmemptr holds start of the mem block address space */
+		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
+
+		/* Find out ringmem ptr for each ring common  ring */
+		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
+			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+			/* Update mem block */
+			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
+			DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
+				i, bus->ring_sh[i].ring_mem_addr));
+		}
+
+		/* Tx flow Ring */
+		if (bus->txmode_push) {
+			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+			DHD_INFO(("TX ring ring id %d ring mem addr 0x%04x \n",
+				i, bus->ring_sh[i].ring_mem_addr));
+		}
+	}
+
+	/* Ring state mem ptr info */
+	{
+		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
+		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
+		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
+		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+		/* Store h2d common ring write/read pointers */
+		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			/* update mem block */
+			h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+			h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+			DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+		/* Store d2h common ring write/read pointers */
+		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
+			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+
+			/* update mem block */
+			d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
+			d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
+
+			DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+
+		/* Store txflow ring write/read pointers */
+		if (bus->txmode_push) {
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			DHD_INFO(("txflow : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		} else {
+			for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
+				i++, j++)
+			{
+				bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+				bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+				/* update mem block */
+				h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+				h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+				DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
+					bus->ring_sh[i].ring_state_w,
+					bus->ring_sh[i].ring_state_r));
+			}
+		}
+	}
+}
+/* Initialize bus module: prepare for communication w/dongle */
+int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int  ret = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* before opening up bus for data transfer, check if shared are is intact */
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		return ret;
+	}
+
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* Set bus state according to enable result */
+	dhdp->busstate = DHD_BUS_DATA;
+
+	/* Enable the interrupt after device is up */
+	dhdpcie_bus_intr_enable(bus);
+
+	/* bcmsdh_intr_unmask(bus->sdh); */
+
+	return ret;
+
+}
+
+
+static void
+dhdpcie_init_shared_addr(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	uint32 val = 0;
+	addr = bus->dongle_ram_base + bus->ramsize - 4;
+	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
+}
+
+
+bool
+dhdpcie_chipmatch(uint16 vendor, uint16 device)
+{
+	if (vendor != PCI_VENDOR_ID_BROADCOM) {
+		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
+			vendor, device));
+		return (-ENODEV);
+	}
+
+	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
+		(device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
+		return 0;
+
+	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
+		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
+		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
+		(device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
+		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
+		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
+		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
+		(device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
+		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
+		return 0;
+	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
+		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
+		return 0;
+	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
+		(device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
+		return 0;
+
+
+	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+	return (-ENODEV);
+}
+
+
+/*
+
+Name:  dhdpcie_cc_nvmshadow
+
+Description:
+A shadow of OTP/SPROM exists in ChipCommon Region
+betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+can also be read from ChipCommon Registers.
+*/
+
+static int
+dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
+{
+	uint16 dump_offset = 0;
+	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+
+	/* Table for 65nm OTP Size (in bits) */
+	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+
+	volatile uint16 *nvm_shadow;
+
+	uint cur_coreid;
+	uint chipc_corerev;
+	chipcregs_t *chipcregs;
+
+
+	/* Save the current core */
+	cur_coreid = si_coreid(bus->sih);
+	/* Switch to ChipC */
+	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+	chipc_corerev = si_corerev(bus->sih);
+
+	/* Check ChipcommonCore Rev */
+	if (chipc_corerev < 44) {
+		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check ChipID */
+	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
+		((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
+		DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
+			__FUNCTION__));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+	if (chipcregs->sromcontrol & SRC_PRESENT) {
+		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+					>> SRC_SIZE_SHIFT))) * 1024;
+		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
+	}
+
+	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+		bcm_bprintf(b, "\nOTP Present");
+
+		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+			== OTPL_WRAP_TYPE_40NM) {
+			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+			otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+			bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+		} else {
+			/* This part is untested since newer chips have 40nm OTP */
+			otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
+				        >> CC_CAP_OTPSIZE_SHIFT];
+			bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+			DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
+				__FUNCTION__));
+		}
+	}
+
+	if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+		((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+		DHD_ERROR(("%s: SPROM and OTP could not be found \n",
+			__FUNCTION__));
+		return BCME_NOTFOUND;
+	}
+
+	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+	if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
+		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+
+		bcm_bprintf(b, "OTP Strap selected.\n"
+		               "\nOTP Shadow in ChipCommon:\n");
+
+		dump_size = otp_size / 16 ; /* 16bit words */
+
+	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+		(chipcregs->sromcontrol & SRC_PRESENT)) {
+
+		bcm_bprintf(b, "SPROM Strap selected\n"
+				"\nSPROM Shadow in ChipCommon:\n");
+
+		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+		/* dump_size in 16bit words */
+		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+	}
+	else {
+		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+			__FUNCTION__));
+		return BCME_NOTFOUND;
+	}
+
+	if (bus->regs == NULL) {
+		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+		return BCME_NOTREADY;
+	} else {
+	    bcm_bprintf(b, "\n OffSet:");
+
+	    /* Point to the SPROM/OTP shadow in ChipCommon */
+	    nvm_shadow = chipcregs->sromotp;
+
+	   /*
+	    * Read 16 bits / iteration.
+	    * dump_size & dump_offset in 16-bit words
+	    */
+	    while (dump_offset < dump_size) {
+		if (dump_offset % 2 == 0)
+			/* Print the offset in the shadow space in Bytes */
+			bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
+
+		bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
+		dump_offset += 0x1;
+	    }
+	}
+
+	/* Switch back to the original core */
+	si_setcore(bus->sih, cur_coreid, 0);
+
+	return BCME_OK;
+}
+
+
+uint8 BCMFASTPATH
+dhd_bus_is_txmode_push(dhd_bus_t *bus)
+{
+	return bus->txmode_push;
+}
+
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, uint16 flowid)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	queue = &flow_ring_node->queue;
+
+	/* Call Flow ring clean up */
+	dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
+	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+	                flow_ring_node->flowid);
+
+	/* clean up BUS level info */
+	DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+	flow_ring_node->active = FALSE;
+
+	dll_delete(&flow_ring_node->list);
+}
+
+/*
+ * Allocate a Flow ring buffer,
+ * Init Ring buffer,
+ * Send Msg to device about flow ring creation
+*/
+int
+dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
+{
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
+
+	/* Send Msg to device about flow ring creation */
+	dhd_prot_flow_ring_create(bus->dhd, flow_ring_node);
+
+	flow_ring_node->status = FLOW_RING_STATUS_PENDING;
+
+	dll_prepend(&bus->const_flowring, &flow_ring_node->list);
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
+		     __FUNCTION__, status));
+		/* Call Flow clean up */
+		dhd_bus_clean_flow_ring(bus, flowid);
+		return;
+	}
+
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+
+	dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+	return;
+}
+
+int
+dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
+{
+	void * pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+
+	if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
+		DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+	DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+	/* Send Msg to device about flow ring deletion */
+	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
+
+	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+	/* Call Flow clean up */
+	dhd_bus_clean_flow_ring(bus, flowid);
+
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	flow_ring_node->active = FALSE;
+	return;
+
+}
+
+int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+	DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+	/* Send Msg to device about flow ring flush */
+	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
+
+	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	return;
+}
+
+uint32
+dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
+{
+	if (bus->txmode_push)
+		*txpush = 1;
+	else
+		*txpush = 0;
+	return bus->max_sub_queues;
+}
+
+int
+dhdpcie_bus_clock_start(struct dhd_bus *bus)
+{
+	return dhdpcie_start_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_clock_stop(struct dhd_bus *bus)
+{
+	return dhdpcie_stop_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_disable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_disable_device(bus);
+}
+
+int
+dhdpcie_bus_enable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_enable_device(bus);
+}
+
+int
+dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
+{
+	return dhdpcie_alloc_resource(bus);
+}
+
+void
+dhdpcie_bus_free_resource(struct dhd_bus *bus)
+{
+	dhdpcie_free_resource(bus);
+}
+
+bool
+dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
+{
+	return dhdpcie_dongle_attach(bus);
+}
+
+int
+dhd_bus_release_dongle(struct dhd_bus *bus)
+{
+	bool dongle_isolation;
+	osl_t		*osh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h
new file mode 100644
index 0000000000000000000000000000000000000000..5ea6c42f15795f67b6f745bb502377808bade6ac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.h
@@ -0,0 +1,168 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie.h 491657 2014-07-17 06:29:40Z $
+ */
+
+
+#ifndef dhd_pcie_h
+#define dhd_pcie_h
+
+#include <bcmpcie.h>
+#include <hnd_cons.h>
+
+/* defines */
+
+#define PCMSGBUF_HDRLEN 0
+#define DONGLE_REG_MAP_SIZE (32 * 1024)
+#define DONGLE_TCM_MAP_SIZE (4096 * 1024)
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+#ifdef DHD_DEBUG
+#define DHD_PCIE_SUCCESS 0
+#define DHD_PCIE_FAILURE 1
+#endif /* DHD_DEBUG */
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+
+#define MAX_DHD_TX_FLOWS	256
+
+/* user defined data structures */
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+
+
+typedef struct dhd_console {
+	 uint		count;	/* Poll interval msec counter */
+	 uint		log_addr;		 /* Log struct address (fixed) */
+	 hnd_log_t	 log;			 /* Log struct (host copy) */
+	 uint		 bufsize;		 /* Size of log buffer */
+	 uint8		 *buf;			 /* Log buffer (host copy) */
+	 uint		 last;			 /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+typedef struct ring_sh_info {
+	uint32 ring_mem_addr;
+	uint32 ring_state_w;
+	uint32 ring_state_r;
+} ring_sh_info_t;
+
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+	struct pci_dev  *dev;		/* pci device handle */
+	dll_t       const_flowring; /* constructed list of tx flowring queues */
+
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+	sbpcieregs_t	*reg;			/* Registers for PCIE core */
+
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+	char		*nvram_params;		/* user specified nvram params. */
+	int		nvram_params_len;
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+
+	uint		rxlen;			/* Length of valid data in buffer */
+
+
+	bool		intr;			/* Use interrupts */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	ulong		shared_addr;
+	pciedev_shared_t	*pcie_sh;
+	bool bus_flowctrl;
+	ioctl_comp_resp_msg_t	ioct_resp;
+	uint32		dma_rxoffset;
+	volatile char	*regs;		/* pci device memory va */
+	volatile char	*tcm;		/* pci device memory va */
+	osl_t		*osh;
+	uint32		nvram_csm;	/* Nvram checksum */
+	uint16		pollrate;
+	uint16  polltick;
+
+	uint32  *pcie_mb_intr_addr;
+	void    *pcie_mb_intr_osh;
+	bool	sleep_allowed;
+
+	/* version 3 shared struct related info start */
+	ring_sh_info_t	ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
+	uint8	h2d_ring_count;
+	uint8	d2h_ring_count;
+	uint32  ringmem_ptr;
+	uint32  ring_state_ptr;
+
+	uint32 d2h_dma_scratch_buffer_mem_addr;
+
+	uint32 h2d_mb_data_ptr_addr;
+	uint32 d2h_mb_data_ptr_addr;
+	/* version 3 shared struct related info end */
+
+	uint32 def_intmask;
+	bool	ltrsleep_on_unload;
+	uint	wait_for_d3_ack;
+	uint8	txmode_push;
+	uint32 max_sub_queues;
+	bool	db1_for_mb;
+
+} dhd_bus_t;
+
+/* function declarations */
+
+extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
+extern int dhdpcie_bus_register(void);
+extern void dhdpcie_bus_unregister(void);
+extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
+
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm);
+extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
+extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern void dhdpcie_bus_release(struct dhd_bus *bus);
+extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
+extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state);
+extern int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state);
+extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_disable_device(dhd_bus_t *bus);
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
+extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
+extern void dhdpcie_free_resource(dhd_bus_t *bus);
+
+extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
+#endif /* dhd_pcie_h */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
new file mode 100644
index 0000000000000000000000000000000000000000..706a59e245f1070ac5c670c49786267445333c98
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
@@ -0,0 +1,859 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie_linux.c 491657 2014-07-17 06:29:40Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <dhd_linux.h>
+#ifdef CONFIG_ARCH_MSM
+#include <mach/msm_pcie.h>
+#endif
+
+#define PCI_CFG_RETRY 		10
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+
+/* user defined data structures  */
+
+typedef struct dhd_pc_res {
+	uint32 bar0_size;
+	void* bar0_addr;
+	uint32 bar1_size;
+	void* bar1_addr;
+} pci_config_res, *pPci_config_res;
+
+typedef bool (*dhdpcie_cb_fn_t)(void *);
+
+typedef struct dhdpcie_info
+{
+	dhd_bus_t	*bus;
+	osl_t 			*osh;
+	struct pci_dev  *dev;		/* pci device handle */
+	volatile char 	*regs;		/* pci device memory va */
+	volatile char 	*tcm;		/* pci device memory va */
+	uint32			tcm_size;	/* pci device memory size */
+	struct pcos_info *pcos_info;
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int	irq;
+	char pciname[32];
+	struct pci_saved_state* state;
+} dhdpcie_info_t;
+
+
+struct pcos_info {
+	dhdpcie_info_t *pc;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+	struct timer_list tuning_timer;
+	int tuning_timer_exp;
+	atomic_t timer_enab;
+	struct tasklet_struct tuning_tasklet;
+};
+
+
+/* function declarations */
+static int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev);
+static int dhdpcie_init(struct pci_dev *pdev);
+static irqreturn_t dhdpcie_isr(int irq, void *arg);
+/* OS Routine functions for PCI suspend/resume */
+
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+static int dhdpcie_resume_dev(struct pci_dev *dev);
+static int dhdpcie_suspend_dev(struct pci_dev *dev);
+static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
+	{ vendor: 0x14e4,
+	device: PCI_ANY_ID,
+	subvendor: PCI_ANY_ID,
+	subdevice: PCI_ANY_ID,
+	class: PCI_CLASS_NETWORK_OTHER << 8,
+	class_mask: 0xffff00,
+	driver_data: 0,
+	},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+
+static struct pci_driver dhdpcie_driver = {
+	node:		{},
+	name:		"pcieh",
+	id_table:	dhdpcie_pci_devid,
+	probe:		dhdpcie_pci_probe,
+	remove:		dhdpcie_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	save_state:	NULL,
+#endif
+	suspend:	dhdpcie_pci_suspend,
+	resume:		dhdpcie_pci_resume,
+};
+
+int dhdpcie_init_succeeded = FALSE;
+
+static void dhdpcie_pme_active(struct pci_dev *pdev, bool enable)
+{
+	uint16 pmcsr;
+
+	pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+	/* Clear PME Status by writing 1 to it and enable PME# */
+	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
+	if (!enable)
+		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+	pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
+{
+	int ret = 0;
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+
+	if (pch) {
+		bus = pch->bus;
+	}
+
+	/* When firmware is not loaded do the PCI bus */
+	/* suspend/resume only */
+	if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
+		!bus->dhd->dongle_reset) {
+		ret = dhdpcie_pci_suspend_resume(bus->dev, state);
+		return ret;
+	}
+
+	if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
+		(bus->dhd->busstate == DHD_BUS_DATA))) {
+
+		ret = dhdpcie_bus_suspend(bus, state);
+	}
+	return ret;
+}
+
+static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
+{
+	BCM_REFERENCE(state);
+	return dhdpcie_set_suspend_resume(pdev, TRUE);
+}
+
+static int dhdpcie_pci_resume(struct pci_dev *pdev)
+{
+	return dhdpcie_set_suspend_resume(pdev, FALSE);
+}
+
+static int dhdpcie_suspend_dev(struct pci_dev *dev)
+{
+	int ret;
+	DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+	dhdpcie_pme_active(dev, TRUE);
+	pci_save_state(dev);
+	pci_enable_wake(dev, PCI_D0, TRUE);
+	pci_disable_device(dev);
+	ret = pci_set_power_state(dev, PCI_D3hot);
+	if (ret) {
+		DHD_ERROR(("%s: pci_set_power_state error %d\n",
+			__FUNCTION__, ret));
+	}
+	return ret;
+}
+
+static int dhdpcie_resume_dev(struct pci_dev *dev)
+{
+	int err = 0;
+	DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+	pci_restore_state(dev);
+	err = pci_enable_device(dev);
+	if (err) {
+		printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
+		return err;
+	}
+	pci_set_master(dev);
+	err = pci_set_power_state(dev, PCI_D0);
+	if (err) {
+		printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
+		return err;
+	}
+	dhdpcie_pme_active(dev, FALSE);
+	return err;
+}
+
+int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state)
+{
+	int rc;
+
+	if (state)
+		rc = dhdpcie_suspend_dev(dev);
+	else
+		rc = dhdpcie_resume_dev(dev);
+	return rc;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+static int dhdpcie_device_scan(struct device *dev, void *data)
+{
+	struct pci_dev *pcidev;
+	int *cnt = data;
+
+	pcidev = container_of(dev, struct pci_dev, dev);
+	if (pcidev->vendor != 0x14e4)
+		return 0;
+
+	DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
+	*cnt += 1;
+	if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
+		DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
+			pcidev->device, pcidev->driver->name));
+
+	return 0;
+}
+#endif /* LINUX_VERSION >= 2.6.0 */
+
+int
+dhdpcie_bus_register(void)
+{
+	int error = 0;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (!(error = pci_module_init(&dhdpcie_driver)))
+		return 0;
+
+	DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#else
+	if (!(error = pci_register_driver(&dhdpcie_driver))) {
+		bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
+		if (!error) {
+			DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
+		} else if (!dhdpcie_init_succeeded) {
+			DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
+		} else {
+			return 0;
+		}
+
+		pci_unregister_driver(&dhdpcie_driver);
+		error = BCME_ERROR;
+	}
+#endif /* LINUX_VERSION < 2.6.0 */
+
+	return error;
+}
+
+
+void
+dhdpcie_bus_unregister(void)
+{
+	pci_unregister_driver(&dhdpcie_driver);
+}
+
+int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+
+	if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
+		DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
+			return -ENODEV;
+	}
+	printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
+		"(good PCI location)\n", pdev->bus->number,
+		PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+
+	if (dhdpcie_init (pdev)) {
+		DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+	return 0;
+}
+
+int
+dhdpcie_detach(dhdpcie_info_t *pch)
+{
+	if (pch) {
+		osl_t *osh = pch->osh;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload)
+			pci_load_and_free_saved_state(pch->dev, &pch->state);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+		MFREE(osh, pch, sizeof(dhdpcie_info_t));
+	}
+	return 0;
+}
+
+
+void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev)
+{
+	osl_t *osh = NULL;
+	dhdpcie_info_t *pch = NULL;
+	dhd_bus_t *bus = NULL;
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+	pch = pci_get_drvdata(pdev);
+	bus = pch->bus;
+	osh = pch->osh;
+
+	dhdpcie_bus_release(bus);
+	pci_disable_device(pdev);
+	/* pcie info detach */
+	dhdpcie_detach(pch);
+	/* osl detach */
+	osl_detach(osh);
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+
+	return;
+}
+
+/* Free Linux irq */
+int
+dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
+{
+	dhd_bus_t *bus = dhdpcie_info->bus;
+	struct pci_dev *pdev = dhdpcie_info->bus->dev;
+
+	snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+	    "dhdpcie:%s", pci_name(pdev));
+	if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+	                dhdpcie_info->pciname, bus) < 0) {
+			DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+			return -1;
+	}
+
+	DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
+
+	return 0; /* SUCCESS */
+}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRINTF_RESOURCE	"0x%016llx"
+#else
+#define PRINTF_RESOURCE	"0x%08x"
+#endif
+
+/*
+
+Name:  osl_pci_get_resource
+
+Parametrs:
+
+1: struct pci_dev *pdev   -- pci device structure
+2: pci_res                       -- structure containing pci configuration space values
+
+
+Return value:
+
+int   - Status (TRUE or FALSE)
+
+Description:
+Access PCI configuration space, retrieve  PCI allocated resources , updates in resource structure.
+
+ */
+int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
+{
+	phys_addr_t  bar0_addr, bar1_addr;
+	ulong bar1_size;
+	struct pci_dev *pdev = NULL;
+	pdev = dhdpcie_info->dev;
+	do {
+		if (pci_enable_device(pdev)) {
+			printf("%s: Cannot enable PCI device\n", __FUNCTION__);
+			break;
+		}
+		pci_set_master(pdev);
+		bar0_addr = pci_resource_start(pdev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(pdev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(pdev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device  size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			goto err;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+		dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+
+		if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
+			DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
+			break;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload) {
+			/* Backup PCIe configuration so as to use Wi-Fi on/off process
+			 * in case of built in driver
+			 */
+			pci_save_state(pdev);
+			dhdpcie_info->state = pci_store_saved_state(pdev);
+
+			if (dhdpcie_info->state == NULL) {
+				DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+					__FUNCTION__));
+				REG_UNMAP(dhdpcie_info->regs);
+				REG_UNMAP(dhdpcie_info->tcm);
+				pci_disable_device(pdev);
+				break;
+			}
+		}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0; /* SUCCESS  */
+	} while (0);
+err:
+	return -1;  /* FAILURE */
+}
+
+int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
+{
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		/* define it here only!! */
+		if (dhdpcie_get_resource (dhdpcie_info)) {
+			DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
+			break;
+		}
+		DHD_TRACE(("%s:Exit - SUCCESS \n",
+			__FUNCTION__));
+
+		return 0; /* SUCCESS */
+
+	} while (0);
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* FAILURE */
+
+}
+
+int dhdpcie_init(struct pci_dev *pdev)
+{
+
+	osl_t 				*osh = NULL;
+	dhd_bus_t 			*bus = NULL;
+	dhdpcie_info_t		*dhdpcie_info =  NULL;
+	wifi_adapter_info_t	*adapter = NULL;
+
+	do {
+		/* osl attach */
+		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+			DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* initialize static buffer */
+		adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
+			PCI_SLOT(pdev->devfn));
+		if (adapter != NULL)
+			DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
+		else
+			DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+		osl_static_mem_init(osh, adapter);
+
+		/*  allocate linux spcific pcie structure here */
+		if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+		bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
+		dhdpcie_info->osh = osh;
+		dhdpcie_info->dev = pdev;
+
+		/* Find the PCI resources, verify the  */
+		/* vendor and device ID, map BAR regions and irq,  update in structures */
+		if (dhdpcie_scan_resource(dhdpcie_info)) {
+			DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
+
+			break;
+		}
+
+		/* Bus initialization */
+		bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm);
+		if (!bus) {
+			DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info->bus = bus;
+		dhdpcie_info->bus->dev = pdev;
+
+		if (bus->intr) {
+			/* Register interrupt callback, but mask it (not operational yet). */
+			DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+			dhdpcie_bus_intr_disable(bus);
+
+			if (dhdpcie_request_irq(dhdpcie_info)) {
+				DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+				break;
+			}
+		} else {
+			bus->pollrate = 1;
+			DHD_INFO(("%s: PCIe interrupt function is NOT registered "
+				"due to polling mode\n", __FUNCTION__));
+		}
+
+		if (dhd_download_fw_on_driverload) {
+			if (dhd_bus_start(bus->dhd)) {
+				DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
+				break;
+			}
+		}
+
+		/* set private data for pci_dev */
+		pci_set_drvdata(pdev, dhdpcie_info);
+
+		/* Attach to the OS network interface */
+		DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
+		if (dhd_register_if(bus->dhd, 0, TRUE)) {
+			DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_init_succeeded = TRUE;
+
+		DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
+		return 0;  /* return  SUCCESS  */
+
+	} while (0);
+	/* reverse the initialization in order in case of error */
+
+	if (bus)
+		dhdpcie_bus_release(bus);
+
+	if (dhdpcie_info)
+		dhdpcie_detach(dhdpcie_info);
+	pci_disable_device(pdev);
+	if (osh)
+		osl_detach(osh);
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* return FAILURE  */
+}
+
+/* Free Linux irq */
+void
+dhdpcie_free_irq(dhd_bus_t *bus)
+{
+	struct pci_dev *pdev = NULL;
+
+	DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
+	if (bus) {
+		pdev = bus->dev;
+		free_irq(pdev->irq, bus);
+	}
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+}
+
+/*
+
+Name:  dhdpcie_isr
+
+Parametrs:
+
+1: IN int irq   -- interrupt vector
+2: IN void *arg      -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+irqreturn_t
+dhdpcie_isr(int irq, void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	if (dhdpcie_bus_isr(bus))
+		return TRUE;
+	else
+		return FALSE;
+}
+
+int
+dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret = 0;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+#if defined(CONFIG_ARCH_MSM)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus->islinkdown) {
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+	}
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+		NULL, NULL, options);
+	if (bus->islinkdown && !ret) {
+		msm_pcie_recover_config(bus->dev);
+		if (bus->dhd)
+			DHD_OS_WAKE_UNLOCK(bus->dhd);
+		bus->islinkdown = FALSE;
+	}
+#else
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+		NULL, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	if (ret) {
+		DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
+		goto done;
+	}
+
+done:
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret = 0;
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	int options = 0;
+#endif
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+#if defined(CONFIG_ARCH_MSM)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus->islinkdown)
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+
+	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND,	bus->dev->bus->number,
+		NULL, NULL, options);
+#else
+	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND,	bus->dev->bus->number,
+		NULL, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	if (ret) {
+		DHD_ERROR(("Failed to stop PCIe link\n"));
+		goto done;
+	}
+done:
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_disable_device(dhd_bus_t *bus)
+{
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+	pci_disable_device(bus->dev);
+
+	return 0;
+}
+
+int
+dhdpcie_enable_device(dhd_bus_t *bus)
+{
+	int ret = BCME_ERROR;
+	dhdpcie_info_t *pch;
+
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+	pch = pci_get_drvdata(bus->dev);
+	if (pch == NULL)
+		return BCME_ERROR;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	if (pci_load_saved_state(bus->dev, pch->state))
+		pci_disable_device(bus->dev);
+	else {
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+		pci_restore_state(bus->dev);
+		ret = pci_enable_device(bus->dev);
+		if (!ret)
+			pci_set_master(bus->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	if (ret)
+		pci_disable_device(bus->dev);
+
+	return ret;
+}
+
+int
+dhdpcie_alloc_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+	phys_addr_t bar0_addr, bar1_addr;
+	ulong bar1_size;
+
+	do {
+		if (bus == NULL) {
+			DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		if (bus->dev == NULL) {
+			DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info = pci_get_drvdata(bus->dev);
+		if (dhdpcie_info == NULL) {
+			DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		bar0_addr = pci_resource_start(bus->dev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(bus->dev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(bus->dev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			break;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		if (!dhdpcie_info->regs) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			break;
+		}
+
+		bus->regs = dhdpcie_info->regs;
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+		dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+		if (!dhdpcie_info->tcm) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			REG_UNMAP(dhdpcie_info->regs);
+			bus->regs = NULL;
+			break;
+		}
+
+		bus->tcm = dhdpcie_info->tcm;
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0;
+	} while (0);
+
+	return BCME_ERROR;
+}
+
+void
+dhdpcie_free_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdpcie_info = pci_get_drvdata(bus->dev);
+	if (dhdpcie_info == NULL) {
+		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->regs) {
+		REG_UNMAP(dhdpcie_info->regs);
+		bus->regs = NULL;
+	}
+
+	if (bus->tcm) {
+		REG_UNMAP(dhdpcie_info->tcm);
+		bus->tcm = NULL;
+	}
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c
new file mode 100644
index 0000000000000000000000000000000000000000..d3f9ad7782ddf3d400b357c501d8ece06133fe46
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.c
@@ -0,0 +1,1874 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload and Wi-Fi Location Service(WLS) code.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$
+ */
+#ifdef PNO_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <proto/bcmevent.h>
+#include <dhd.h>
+#include <dhd_pno.h>
+#include <dhd_dbg.h>
+
+#ifdef __BIG_ENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
+#define PNO_BESTNET_LEN 1024
+#define PNO_ON 1
+#define PNO_OFF 0
+#define CHANNEL_2G_MAX 14
+#define MAX_NODE_CNT 5
+#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
+#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000)  \
+						- (uint32)(timestamp2/1000)))
+
+#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
+#define TIME_MIN_DIFF 5
+static inline bool
+is_dfs(uint16 channel)
+{
+	if (channel >= 52 && channel <= 64)			/* class 2 */
+		return TRUE;
+	else if (channel >= 100 && channel <= 140)	/* class 4 */
+		return TRUE;
+	else
+		return FALSE;
+}
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+	int pfn = 0;
+	int err;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	/* Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn(error : %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = DHD_PNO_DISABLED;
+	err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n",
+			__FUNCTION__, err));
+	}
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_suspend(dhd_pub_t *dhd)
+{
+	int err;
+	int suspend = 1;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
+		goto exit;
+
+	}
+	_pno_state->pno_status = DHD_PNO_SUSPEND;
+exit:
+	return err;
+}
+static int
+_dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (enable & 0xfffe) {
+		DHD_ERROR(("%s invalid value\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (!dhd_support_sta_mode(dhd)) {
+		DHD_ERROR(("PNO is not allowed for non-STA mode"));
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (enable) {
+		if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+			dhd_is_associated(dhd, NULL, NULL)) {
+			DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
+				"in assoc mode , ignore it\n", __FUNCTION__));
+			err = BCME_BADOPTION;
+			goto exit;
+		}
+	}
+	/* Enable/Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_status = (enable)?
+		DHD_PNO_ENABLED : DHD_PNO_DISABLED;
+	if (!enable)
+		_pno_state->pno_mode = DHD_PNO_NONE_MODE;
+
+	DHD_PNO(("%s set pno as %s\n",
+		__FUNCTION__, enable ? "Enable" : "Disable"));
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	wl_pfn_param_t pfn_param;
+	dhd_pno_params_t *_params;
+	dhd_pno_status_info_t *_pno_state;
+	bool combined_scan = FALSE;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) |
+		(ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT));
+	if (mode == DHD_PNO_LEGACY_MODE) {
+		/* check and set extra pno params */
+		if ((pno_params->params_legacy.pno_repeat != 0) ||
+			(pno_params->params_legacy.pno_freq_expo_max != 0)) {
+			pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+			pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat);
+			pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max);
+		}
+		/* set up pno scan fr */
+		if (pno_params->params_legacy.scan_fr != 0)
+			pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr);
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n"));
+			mode |= DHD_PNO_BATCH_MODE;
+			combined_scan = TRUE;
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n"));
+			mode |= DHD_PNO_HOTLIST_MODE;
+			combined_scan = TRUE;
+		}
+	}
+	if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* Scan frequency of 30 sec */
+		pfn_param.scan_freq = htod32(30);
+		/* slow adapt scan is off by default */
+		pfn_param.slow_freq = htod32(0);
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(30);
+		/* Network timeout 60 sec */
+		pfn_param.lost_network_timeout = htod32(60);
+		/* best n = 2 by default */
+		pfn_param.bestn = DEFAULT_BESTN;
+		/* mscan m=0 by default, so not record best networks by default */
+		pfn_param.mscan = DEFAULT_MSCAN;
+		/*  default repeat = 10 */
+		pfn_param.repeat = DEFAULT_REPEAT;
+		/* by default, maximum scan interval = 2^2
+		 * scan_freq when adaptive scan is turned on
+		 */
+		pfn_param.exp = DEFAULT_EXP;
+		if (mode == DHD_PNO_BATCH_MODE) {
+			/* In case of BATCH SCAN */
+			if (pno_params->params_batch.bestn)
+				pfn_param.bestn = pno_params->params_batch.bestn;
+			if (pno_params->params_batch.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr);
+			if (pno_params->params_batch.mscan)
+				pfn_param.mscan = pno_params->params_batch.mscan;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		} else if (mode == DHD_PNO_HOTLIST_MODE) {
+			/* In case of HOTLIST SCAN */
+			if (pno_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr);
+			pfn_param.bestn = 0;
+			pfn_param.repeat = 0;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		if (combined_scan) {
+			/* Disable Adaptive Scan */
+			pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+			pfn_param.repeat = 0;
+			pfn_param.exp = 0;
+			if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+				/* In case of Legacy PNO + BATCH SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+				if (_params->params_batch.bestn)
+					pfn_param.bestn = _params->params_batch.bestn;
+				if (_params->params_batch.scan_fr)
+					pfn_param.scan_freq = htod32(_params->params_batch.scan_fr);
+				if (_params->params_batch.mscan)
+					pfn_param.mscan = _params->params_batch.mscan;
+			} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+				/* In case of Legacy PNO + HOTLIST SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+				if (_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr);
+				pfn_param.bestn = 0;
+				pfn_param.repeat = 0;
+			}
+		}
+	}
+	if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+		pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+		DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+			__FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (mode == DHD_PNO_BATCH_MODE) {
+		int _tmp = pfn_param.bestn;
+		/* set bestn to calculate the max mscan which firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		/* get max mscan which the firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn));
+		pfn_param.mscan = MIN(pfn_param.mscan, _tmp);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+		goto exit;
+	}
+	/* need to return mscan if this is for batch scan instead of err */
+	err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err;
+exit:
+	return err;
+}
+static int
+_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssids_list, int nssid)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_t pfn_element;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nssid) {
+		NULL_CHECK(ssids_list, "ssid list is NULL", err);
+	}
+	memset(&pfn_element, 0, sizeof(pfn_element));
+	{
+		int j;
+		for (j = 0; j < nssid; j++) {
+			DHD_PNO(("%d: scan  for  %s size = %d\n", j,
+				ssids_list[j].SSID, ssids_list[j].SSID_len));
+		}
+	}
+	/* Check for broadcast ssid */
+	for (i = 0; i < nssid; i++) {
+		if (!ssids_list[i].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i));
+			err = BCME_ERROR;
+			goto exit;
+		}
+	}
+	/* set all pfn ssid */
+	for (i = 0; i < nssid; i++) {
+		pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+		pfn_element.auth = (DOT11_OPEN_SYSTEM);
+		pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+		pfn_element.wsec = htod32(0);
+		pfn_element.infra = htod32(1);
+		pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+		memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID,
+			ssids_list[i].SSID_len);
+		pfn_element.ssid.SSID_len = ssids_list[i].SSID_len;
+		err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element,
+			sizeof(pfn_element), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+/* qsort compare function */
+static int
+_dhd_pno_cmpfunc(const void *a, const void *b)
+{
+	return (*(uint16*)a - *(uint16*)b);
+}
+static int
+_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan,
+	uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2)
+{
+	int err = BCME_OK;
+	int i = 0, j = 0, k = 0;
+	uint16 tmp;
+	NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	NULL_CHECK(nchan, "nchan is NULL", err);
+	NULL_CHECK(chan_list1, "chan_list1 is NULL", err);
+	NULL_CHECK(chan_list2, "chan_list2 is NULL", err);
+	/* chan_list1 and chan_list2 should be sorted at first */
+	while (i < nchan1 && j < nchan2) {
+		tmp = chan_list1[i] < chan_list2[j]?
+			chan_list1[i++] : chan_list2[j++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (i < nchan1) {
+		tmp = chan_list1[i++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (j < nchan2) {
+		tmp = chan_list2[j++];
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+
+	}
+	*nchan = k;
+	return err;
+}
+static int
+_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list,
+	int *nchan, uint8 band, bool skip_dfs)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 chan_buf[WL_NUMCHANNELS + 1];
+	wl_uint32_list_t *list;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (*nchan) {
+		NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	}
+	list = (wl_uint32_list_t *) (void *)chan_buf;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
+	if (err < 0) {
+		DHD_ERROR(("failed to get channel list (err: %d)\n", err));
+		goto exit;
+	}
+	for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
+		if (band == WLC_BAND_2G) {
+			if (dtoh32(list->element[i]) > CHANNEL_2G_MAX)
+				continue;
+		} else if (band == WLC_BAND_5G) {
+			if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX)
+				continue;
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else { /* All channels */
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+		}
+		d_chan_list[j++] = dtoh32(list->element[i]);
+	}
+	*nchan = j;
+exit:
+	return err;
+}
+static int
+_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch,
+	char *buf, int nbufsize)
+{
+	int err = BCME_OK;
+	int bytes_written = 0, nreadsize = 0;
+	int t_delta = 0;
+	int nleftsize = nbufsize;
+	uint8 cnt = 0;
+	char *bp = buf;
+	char eabuf[ETHER_ADDR_STR_LEN];
+#ifdef PNO_DEBUG
+	char *_base_bp;
+	char msg[150];
+#endif
+	dhd_pno_bestnet_entry_t *iter, *next;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	NULL_CHECK(params_batch, "params_batch is NULL", err);
+	if (nbufsize > 0)
+		NULL_CHECK(buf, "buf is NULL", err);
+	/* initialize the buffer */
+	memset(buf, 0, nbufsize);
+	DHD_PNO(("%s enter \n", __FUNCTION__));
+	/* # of scans */
+	if (!params_batch->get_batch.batch_started) {
+		bp += nreadsize = sprintf(bp, "scancount=%d\n",
+			params_batch->get_batch.expired_tot_scan_cnt);
+		nleftsize -= nreadsize;
+		params_batch->get_batch.batch_started = TRUE;
+	}
+	DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
+	/* preestimate scan count until which scan result this report is going to end */
+	list_for_each_entry_safe(siter, snext,
+		&params_batch->get_batch.expired_scan_results_list, list) {
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			/* if left_size is less than bestheader total size , stop this */
+			if (nleftsize <=
+				(phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD))
+				goto exit;
+			/* increase scan count */
+			cnt++;
+			/* # best of each scan */
+			DHD_PNO(("\n<loop : %d, apcount %d>\n", cnt - 1, phead->tot_cnt));
+			/* attribute of the scan */
+			if (phead->reason & PNO_STATUS_ABORT_MASK) {
+				bp += nreadsize = sprintf(bp, "trunc\n");
+				nleftsize -= nreadsize;
+			}
+			list_for_each_entry_safe(iter, next,
+				&phead->entry_list, list) {
+				t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
+#ifdef PNO_DEBUG
+				_base_bp = bp;
+				memset(msg, 0, sizeof(msg));
+#endif
+				/* BSSID info */
+				bp += nreadsize = sprintf(bp, "bssid=%s\n",
+				bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
+				nleftsize -= nreadsize;
+				/* SSID */
+				bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID);
+				nleftsize -= nreadsize;
+				/* channel */
+				bp += nreadsize = sprintf(bp, "freq=%d\n",
+				wf_channel2mhz(iter->channel,
+				iter->channel <= CH_MAX_2G_CHANNEL?
+				WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				nleftsize -= nreadsize;
+				/* RSSI */
+				bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI);
+				nleftsize -= nreadsize;
+				/* add the time consumed in Driver to the timestamp of firmware */
+				iter->timestamp += t_delta;
+				bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp);
+				nleftsize -= nreadsize;
+				/* RTT0 */
+				bp += nreadsize = sprintf(bp, "dist=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt0);
+				nleftsize -= nreadsize;
+				/* RTT1 */
+				bp += nreadsize = sprintf(bp, "distSd=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt1);
+				nleftsize -= nreadsize;
+				bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER);
+				nleftsize -= nreadsize;
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+#ifdef PNO_DEBUG
+				memcpy(msg, _base_bp, bp - _base_bp);
+				DHD_PNO(("Entry : \n%s", msg));
+#endif
+			}
+			bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER);
+			DHD_PNO(("%s", SCAN_END_MARKER));
+			nleftsize -= nreadsize;
+			pprev = phead;
+			/* reset the header */
+			siter->bestnetheader = phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+
+			siter->cnt_header--;
+		}
+		if (phead == NULL) {
+			/* we store all entry in this scan , so it is ok to delete */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+exit:
+	if (cnt < params_batch->get_batch.expired_tot_scan_cnt) {
+		DHD_ERROR(("Buffer size is small to save all batch entry,"
+			" cnt : %d (remained_scan_cnt): %d\n",
+			cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt));
+	}
+	params_batch->get_batch.expired_tot_scan_cnt -= cnt;
+	/* set FALSE only if the link list  is empty after returning the data */
+	if (list_empty(&params_batch->get_batch.expired_scan_results_list)) {
+		params_batch->get_batch.batch_started = FALSE;
+		bp += sprintf(bp, "%s", RESULTS_END_MARKER);
+		DHD_PNO(("%s", RESULTS_END_MARKER));
+		DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__));
+	}
+	/* return used memory in buffer */
+	bytes_written = (int32)(bp - buf);
+	return bytes_written;
+}
+static int
+_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last)
+{
+	int err = BCME_OK;
+	int removed_scan_cnt = 0;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	dhd_pno_bestnet_entry_t *iter, *next;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(head, "head is NULL", err);
+	NULL_CHECK(head->next, "head->next is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	list_for_each_entry_safe(siter, snext,
+		head, list) {
+		if (only_last) {
+			/* in case that we need to delete only last one */
+			if (!list_is_last(&siter->list, head)) {
+				/* skip if the one is not last */
+				continue;
+			}
+		}
+		/* delete all data belong if the one is last */
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			removed_scan_cnt++;
+			list_for_each_entry_safe(iter, next,
+			&phead->entry_list, list) {
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+			}
+			pprev = phead;
+			phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+		}
+		if (phead == NULL) {
+			/* it is ok to delete top node */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+	return removed_scan_cnt;
+}
+
+static int
+_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_cfg_t pfncfg_param;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nchan) {
+		NULL_CHECK(channel_list, "nchan is NULL", err);
+	}
+	DHD_PNO(("%s enter :  nchan : %d\n", __FUNCTION__, nchan));
+	memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t));
+	/* Setup default values */
+	pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET);
+	pfncfg_param.channel_num = htod32(0);
+
+	for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++)
+		pfncfg_param.channel_list[i] = channel_list[i];
+
+	pfncfg_param.channel_num = htod32(nchan);
+	err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+static int
+_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	switch (mode) {
+	case DHD_PNO_LEGACY_MODE: {
+		struct dhd_pno_ssid *iter, *next;
+		if (params->params_legacy.nssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_legacy.ssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_legacy.nssid = 0;
+		params->params_legacy.scan_fr = 0;
+		params->params_legacy.pno_freq_expo_max = 0;
+		params->params_legacy.pno_repeat = 0;
+		params->params_legacy.nchan = 0;
+		memset(params->params_legacy.chan_list, 0,
+			sizeof(params->params_legacy.chan_list));
+		break;
+	}
+	case DHD_PNO_BATCH_MODE: {
+		params->params_batch.scan_fr = 0;
+		params->params_batch.mscan = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.rtt = 0;
+		params->params_batch.bestn = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_batch.chan_list, 0,
+			sizeof(params->params_batch.chan_list));
+		params->params_batch.get_batch.batch_started = FALSE;
+		params->params_batch.get_batch.buf = NULL;
+		params->params_batch.get_batch.bufsize = 0;
+		params->params_batch.get_batch.reason = 0;
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.scan_results_list, FALSE);
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.expired_scan_results_list, FALSE);
+		params->params_batch.get_batch.tot_scan_cnt = 0;
+		params->params_batch.get_batch.expired_tot_scan_cnt = 0;
+		params->params_batch.get_batch.top_node_cnt = 0;
+		INIT_LIST_HEAD(&params->params_batch.get_batch.scan_results_list);
+		INIT_LIST_HEAD(&params->params_batch.get_batch.expired_scan_results_list);
+		break;
+	}
+	case DHD_PNO_HOTLIST_MODE: {
+		struct dhd_pno_bssid *iter, *next;
+		if (params->params_hotlist.nbssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_hotlist.bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_hotlist.scan_fr = 0;
+		params->params_hotlist.nbssid = 0;
+		params->params_hotlist.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_hotlist.chan_list, 0,
+			sizeof(params->params_hotlist.chan_list));
+		break;
+	}
+	default:
+		DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode));
+		break;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+}
+static int
+_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nbssid) {
+		NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)&p_pfn_bssid,
+		sizeof(wl_pfn_bssid_t) * nbssid, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+int
+dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid;
+	NULL_CHECK(dhd, "dev is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
+		DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+	/* restart Batch mode  if the batch mode is on */
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			/* restart HOTLIST SCAN */
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+			_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+				" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+			&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid->macaddr,
+				&iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid->flags = iter->flags;
+				p_pfn_bssid++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	return (_dhd_pno_enable(dhd, enable));
+}
+
+int
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	struct dhd_pno_ssid *_pno_ssid;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int32 tot_nchan = 0;
+	int err = BCME_OK;
+	int i;
+	int mode = 0;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d,"
+			"pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+			scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+
+	_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		DHD_ERROR(("%s : Legacy PNO mode was already started, "
+			"will disable previous one to start new one\n", __FUNCTION__));
+		err = dhd_pno_stop_for_ssid(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	_pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+	err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	memset(_chan_list, 0, sizeof(_chan_list));
+	tot_nchan = nchan;
+	if (tot_nchan > 0 && channel_list) {
+		for (i = 0; i < nchan; i++)
+		_params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
+	}
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* use superset of channel list between two mode */
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			if (_params2->params_batch.nchan > 0 && nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_batch.chan_list[0],
+					_params2->params_batch.nchan,
+					&channel_list[0], nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and batch\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}  else {
+				DHD_PNO(("superset channel will use"
+				" all channels in firmware\n"));
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			if (_params2->params_hotlist.nchan > 0 && nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_hotlist.chan_list[0],
+					_params2->params_hotlist.nchan,
+					&channel_list[0], nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+		}
+	}
+	_params->params_legacy.scan_fr = scan_fr;
+	_params->params_legacy.pno_repeat = pno_repeat;
+	_params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
+	_params->params_legacy.nchan = nchan;
+	_params->params_legacy.nssid = nssid;
+	INIT_LIST_HEAD(&_params->params_legacy.ssid_list);
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
+		DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
+		goto exit;
+	}
+	for (i = 0; i < nssid; i++) {
+		_pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
+		if (_pno_ssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+				__FUNCTION__));
+			goto exit;
+		}
+		_pno_ssid->SSID_len = ssid_list[i].SSID_len;
+		memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+		list_add_tail(&_pno_ssid->list, &_params->params_legacy.ssid_list);
+
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+	return err;
+}
+int
+dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params)
+{
+	int err = BCME_OK;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0, tot_nchan = 0;
+	int mode = 0, mscan = 0;
+	int i = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	wlc_ssid_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(batch_params, "batch_params is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_BATCH_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	} else {
+		/* batch mode is already started */
+		return -EBUSY;
+	}
+	_params->params_batch.scan_fr = batch_params->scan_fr;
+	_params->params_batch.bestn = batch_params->bestn;
+	_params->params_batch.mscan = (batch_params->mscan)?
+		batch_params->mscan : DEFAULT_BATCH_MSCAN;
+	_params->params_batch.nchan = batch_params->nchan;
+	memcpy(_params->params_batch.chan_list, batch_params->chan_list,
+		sizeof(_params->params_batch.chan_list));
+
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan;
+	if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_batch.chan_list[batch_params->nchan],
+		&rem_nchan, batch_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, batch_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_batch.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_batch.chan_list, _params->params_batch.nchan,
+			sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_batch.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
+		tot_nchan = _params->params_batch.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		struct dhd_pno_ssid *iter, *next;
+		DHD_PNO(("PNO SSID is on progress in firmware\n"));
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* Use the superset for channelist between two mode */
+		_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+		if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) {
+			err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+				&_params2->params_legacy.chan_list[0],
+				_params2->params_legacy.nchan,
+				&_params->params_batch.chan_list[0], _params->params_batch.nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to merge channel list"
+				" between legacy and batch\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		} else {
+			DHD_PNO(("superset channel will use all channels in firmware\n"));
+		}
+		p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
+							_params2->params_legacy.nssid, GFP_KERNEL);
+		if (p_ssid_list == NULL) {
+			DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+				__FUNCTION__, _params2->params_legacy.nssid));
+			err = BCME_ERROR;
+			_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+			goto exit;
+		}
+		i = 0;
+		/* convert dhd_pno_ssid to dhd_pno_ssid */
+		list_for_each_entry_safe(iter, next, &_params2->params_legacy.ssid_list, list) {
+			p_ssid_list[i].SSID_len = iter->SSID_len;
+			memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list[i].SSID_len);
+			i++;
+		}
+		if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list,
+			_params2->params_legacy.nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	} else {
+		/* we need to return mscan */
+		mscan = err;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	else {
+		/* return #max scan firmware can do */
+		err = mscan;
+	}
+	if (p_ssid_list)
+		kfree(p_ssid_list);
+	return err;
+}
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 timestamp = 0;
+	dhd_pno_params_t *_params = NULL;
+	dhd_pno_status_info_t *_pno_state = NULL;
+	wl_pfn_lscanresults_t *plbestnet = NULL;
+	wl_pfn_lnet_info_t *plnetinfo;
+	dhd_pno_bestnet_entry_t *pbestnet_entry;
+	dhd_pno_best_header_t *pbestnetheader = NULL;
+	dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+	bool allocate_header = FALSE;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (buf && bufsize) {
+		if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
+			/* need to check whether we have cashed data or not */
+			DHD_PNO(("%s: have cashed batching data in Driver\n",
+				__FUNCTION__));
+			/* convert to results format */
+			goto convert_format;
+		} else {
+			/* this is a first try to get batching results */
+			if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+				/* move the scan_results_list to expired_scan_results_lists */
+				list_for_each_entry_safe(siter, snext,
+					&_params->params_batch.get_batch.scan_results_list, list) {
+					list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+				}
+				_params->params_batch.get_batch.top_node_cnt = 0;
+				_params->params_batch.get_batch.expired_tot_scan_cnt =
+					_params->params_batch.get_batch.tot_scan_cnt;
+				_params->params_batch.get_batch.tot_scan_cnt = 0;
+				goto convert_format;
+			}
+		}
+	}
+	/* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */
+	pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE);
+	if (pscan_results == NULL) {
+		err = BCME_NOMEM;
+		DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n"));
+		goto exit;
+	}
+	pscan_results->bestnetheader = NULL;
+	pscan_results->cnt_header = 0;
+	/* add the element into list unless total node cnt is less than MAX_NODE_ CNT */
+	if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) {
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+		_params->params_batch.get_batch.top_node_cnt++;
+	} else {
+		int _removed_scan_cnt;
+		/* remove oldest one and add new one */
+		DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__));
+		_removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd,
+			&_params->params_batch.get_batch.scan_results_list, TRUE);
+		_params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt;
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+
+	}
+	plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+	NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	memset(plbestnet, 0, PNO_BESTNET_LEN);
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0);
+		if (err < 0) {
+			if (err == BCME_EPERM) {
+				DHD_ERROR(("we cannot get the batching data "
+					"during scanning in firmware, try again\n,"));
+				msleep(500);
+				continue;
+			} else {
+				DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit;
+		}
+		plnetinfo = plbestnet->netinfo;
+		for (i = 0; i < plbestnet->count; i++) {
+			pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+			MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+			if (pbestnet_entry == NULL) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+				goto exit;
+			}
+			memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+			pbestnet_entry->recorded_time = jiffies; /* record the current time */
+			/* create header for the first entry */
+			allocate_header = (i == 0)? TRUE : FALSE;
+			/* check whether the new generation is started or not */
+			if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+				> TIME_MIN_DIFF))
+				allocate_header = TRUE;
+			timestamp = plnetinfo->timestamp;
+			if (allocate_header) {
+				pbestnetheader = (dhd_pno_best_header_t *)
+				MALLOC(dhd->osh, BEST_HEADER_SIZE);
+				if (pbestnetheader == NULL) {
+					err = BCME_NOMEM;
+					if (pbestnet_entry)
+						MFREE(dhd->osh, pbestnet_entry,
+						BESTNET_ENTRY_SIZE);
+					DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+					goto exit;
+				}
+				/* increase total cnt of bestnet header */
+				pscan_results->cnt_header++;
+				/* need to record the reason to call dhd_pno_get_for_bach */
+				if (reason)
+					pbestnetheader->reason = (ENABLE << reason);
+				memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+				/* initialize the head of linked list */
+				INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+				/* link the pbestnet heaer into existed list */
+				if (pscan_results->bestnetheader == NULL)
+					/* In case of header */
+					pscan_results->bestnetheader = pbestnetheader;
+				else {
+					dhd_pno_best_header_t *head = pscan_results->bestnetheader;
+					pscan_results->bestnetheader = pbestnetheader;
+					pbestnetheader->next = head;
+				}
+			}
+			/* fills the best network info */
+			pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+			pbestnet_entry->RSSI = plnetinfo->RSSI;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				/* if RSSI is positive value, we assume that
+				 * this scan is aborted by other scan
+				 */
+				DHD_PNO(("This scan is aborted\n"));
+				pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+			}
+			pbestnet_entry->rtt0 = plnetinfo->rtt0;
+			pbestnet_entry->rtt1 = plnetinfo->rtt1;
+			pbestnet_entry->timestamp = plnetinfo->timestamp;
+			pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+			memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
+				pbestnet_entry->SSID_len);
+			memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+			/* add the element into list */
+			list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+			/* increase best entry count */
+			pbestnetheader->tot_cnt++;
+			pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+			DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+			DHD_PNO(("\tSSID : "));
+			for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+				DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j]));
+			DHD_PNO(("\n"));
+			DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+				plnetinfo->pfnsubnet.BSSID.octet[0],
+				plnetinfo->pfnsubnet.BSSID.octet[1],
+				plnetinfo->pfnsubnet.BSSID.octet[2],
+				plnetinfo->pfnsubnet.BSSID.octet[3],
+				plnetinfo->pfnsubnet.BSSID.octet[4],
+				plnetinfo->pfnsubnet.BSSID.octet[5]));
+			DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+				plnetinfo->pfnsubnet.channel,
+				plnetinfo->RSSI, plnetinfo->timestamp));
+			DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1));
+			plnetinfo++;
+		}
+	}
+	if (pscan_results->cnt_header == 0) {
+		/* In case that we didn't get any data from the firmware
+		 * Remove the current scan_result list from get_bach.scan_results_list.
+		 */
+		DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+		list_del(&pscan_results->list);
+		MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+		_params->params_batch.get_batch.top_node_cnt--;
+	}
+	/* increase total scan count using current scan count */
+	_params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
+
+	if (buf && bufsize) {
+		/* This is a first try to get batching results */
+		if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+			/* move the scan_results_list to expired_scan_results_lists */
+			list_for_each_entry_safe(siter, snext,
+				&_params->params_batch.get_batch.scan_results_list, list) {
+				list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+			}
+			/* reset gloval values after  moving to expired list */
+			_params->params_batch.get_batch.top_node_cnt = 0;
+			_params->params_batch.get_batch.expired_tot_scan_cnt =
+				_params->params_batch.get_batch.tot_scan_cnt;
+			_params->params_batch.get_batch.tot_scan_cnt = 0;
+		}
+convert_format:
+		err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+		if (err < 0) {
+			DHD_ERROR(("failed to convert the data into upper layer format\n"));
+			goto exit;
+		}
+	}
+exit:
+	if (plbestnet)
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	if (_params) {
+		_params->params_batch.get_batch.buf = NULL;
+		_params->params_batch.get_batch.bufsize = 0;
+		_params->params_batch.get_batch.bytes_written = err;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	if (waitqueue_active(&_pno_state->get_batch_done.wait))
+		complete(&_pno_state->get_batch_done);
+	return err;
+}
+static void
+_dhd_pno_get_batch_handler(struct work_struct *work)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pub_t *dhd;
+	struct dhd_pno_batch_params *params_batch;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = container_of(work, struct dhd_pno_status_info, work);
+	dhd = _pno_state->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	_dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+		params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+
+}
+
+int
+dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	char *pbuf = buf;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_batch_params *params_batch;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		memset(pbuf, 0, bufsize);
+		pbuf += sprintf(pbuf, "scancount=%d\n", 0);
+		sprintf(pbuf, "%s", RESULTS_END_MARKER);
+		err = strlen(buf);
+		goto exit;
+	}
+	params_batch->get_batch.buf = buf;
+	params_batch->get_batch.bufsize = bufsize;
+	params_batch->get_batch.reason = reason;
+	params_batch->get_batch.bytes_written = 0;
+	schedule_work(&_pno_state->work);
+	wait_for_completion(&_pno_state->get_batch_done);
+	err = params_batch->get_batch.bytes_written;
+exit:
+	return err;
+}
+
+int
+dhd_pno_stop_for_batch(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode = 0;
+	int i = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid;
+	wlc_ssid_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		_pno_state->pno_mode = mode;
+		/* restart Legacy PNO if the Legacy PNO is on */
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			struct dhd_pno_legacy_params *_params_legacy;
+			struct dhd_pno_ssid *iter, *next;
+			_params_legacy =
+				&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
+				_params_legacy->nssid, GFP_KERNEL);
+			if (p_ssid_list == NULL) {
+				DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+					__FUNCTION__, _params_legacy->nssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_ssid to dhd_pno_ssid */
+			list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
+				p_ssid_list[i].SSID_len = iter->SSID_len;
+				memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len);
+				i++;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+				_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+					" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+				&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid[i].flags = iter->flags;
+				i++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	if (p_ssid_list)
+		kfree(p_ssid_list);
+	return err;
+}
+
+int
+dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	int err = BCME_OK;
+	int i;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0;
+	int tot_nchan = 0;
+	int mode = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	struct dhd_pno_bssid *_pno_bssid;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(hotlist_params, "hotlist_params is NULL", err);
+	NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	}
+	_params->params_batch.nchan = hotlist_params->nchan;
+	_params->params_batch.scan_fr = hotlist_params->scan_fr;
+	if (hotlist_params->nchan)
+		memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list,
+			sizeof(_params->params_hotlist.chan_list));
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan;
+	if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_hotlist.chan_list[hotlist_params->nchan],
+		&rem_nchan, hotlist_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, hotlist_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_hotlist.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan,
+			sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		int i;
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_hotlist.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_hotlist.chan_list,
+			sizeof(_chan_list));
+		tot_nchan = _params->params_hotlist.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			DHD_PNO(("PNO SSID is on progress in firmware\n"));
+			/* store current pno_mode before disabling pno */
+			mode = _pno_state->pno_mode;
+			err = _dhd_pno_enable(dhd, PNO_OFF);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+				goto exit;
+			}
+			/* restore the previous mode */
+			_pno_state->pno_mode = mode;
+			/* Use the superset for channelist between two mode */
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+			if (_params2->params_legacy.nchan > 0 &&
+				_params->params_hotlist.nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_legacy.chan_list[0],
+					_params2->params_legacy.nchan,
+					&_params->params_hotlist.chan_list[0],
+					_params->params_hotlist.nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+						"between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+
+	}
+
+	INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list));
+
+	err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	for (i = 0; i < hotlist_params->nbssid; i++) {
+		_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+		NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
+		memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
+		_pno_bssid->flags = p_pfn_bssid[i].flags;
+		list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list);
+	}
+	_params->params_hotlist.nbssid = hotlist_params->nbssid;
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+	return err;
+}
+
+int
+dhd_pno_stop_for_hotlist(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wlc_ssid_t *p_ssid_list;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		DHD_ERROR(("%s : Hotlist MODE is not enabled\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+		/* restore previos pno mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			/* restart Legacy PNO Scan */
+			struct dhd_pno_legacy_params *_params_legacy;
+			struct dhd_pno_ssid *iter, *next;
+			_params_legacy =
+			&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list =
+			kzalloc(sizeof(wlc_ssid_t) * _params_legacy->nssid, GFP_KERNEL);
+			if (p_ssid_list == NULL) {
+				DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+					__FUNCTION__, _params_legacy->nssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_ssid to dhd_pno_ssid */
+			list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
+				p_ssid_list->SSID_len = iter->SSID_len;
+				memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list->SSID_len);
+				p_ssid_list++;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			/* restart Batching Scan */
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__,  err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+
+int
+dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int err = BCME_OK;
+	uint status, event_type, flags, datalen;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	datalen = ntoh32(event->datalen);
+	DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type));
+	switch (event_type) {
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+		/* TODO : need to implement event logic using generic netlink */
+		break;
+	case WLC_E_PFN_BEST_BATCHING:
+	{
+		struct dhd_pno_batch_params *params_batch;
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+		if (!waitqueue_active(&_pno_state->get_batch_done.wait)) {
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+			params_batch->get_batch.buf = NULL;
+			params_batch->get_batch.bufsize = 0;
+			params_batch->get_batch.reason = PNO_STATUS_EVENT;
+			schedule_work(&_pno_state->work);
+		} else
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+				"will skip this event\n", __FUNCTION__));
+		break;
+	}
+	default:
+		DHD_ERROR(("unknown event : %d\n", event_type));
+	}
+exit:
+	return err;
+}
+
+int dhd_pno_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	UNUSED_PARAMETER(_dhd_pno_suspend);
+	if (dhd->pno_state)
+		goto exit;
+	dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+	NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
+	memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
+	/* need to check whether current firmware support batching and hotlist scan */
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_pno_state->wls_supported = TRUE;
+	_pno_state->dhd = dhd;
+	mutex_init(&_pno_state->pno_mutex);
+	INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
+	init_completion(&_pno_state->get_batch_done);
+	err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0);
+	if (err == BCME_UNSUPPORTED) {
+		_pno_state->wls_supported = FALSE;
+		DHD_INFO(("Current firmware doesn't support"
+			" Android Location Service\n"));
+	}
+exit:
+	return err;
+}
+int dhd_pno_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	NULL_CHECK(_pno_state, "pno_state is NULL", err);
+	/* may need to free legacy ssid_list */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	}
+
+	if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+		/* clear resource if the BATCH MODE is on */
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	}
+	cancel_work_sync(&_pno_state->work);
+	MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
+	dhd->pno_state = NULL;
+	return err;
+}
+#endif /* PNO_SUPPORT */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h
new file mode 100644
index 0000000000000000000000000000000000000000..1980476fb9dcb9324b0097a70a7921c86a5e6696
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.h
@@ -0,0 +1,243 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $
+ */
+
+#ifndef __DHD_PNO_H__
+#define __DHD_PNO_H__
+
+#if defined(PNO_SUPPORT)
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
+#define PNO_TLV_RESERVED		'0'
+
+#define PNO_BATCHING_SET "SET"
+#define PNO_BATCHING_GET "GET"
+#define PNO_BATCHING_STOP "STOP"
+
+#define PNO_PARAMS_DELIMETER " "
+#define PNO_PARAM_CHANNEL_DELIMETER ","
+#define PNO_PARAM_VALUE_DELLIMETER '='
+#define PNO_PARAM_SCANFREQ "SCANFREQ"
+#define PNO_PARAM_BESTN	"BESTN"
+#define PNO_PARAM_MSCAN "MSCAN"
+#define PNO_PARAM_CHANNEL "CHANNEL"
+#define PNO_PARAM_RTT "RTT"
+
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX		'M'
+
+#define MAXNUM_SSID_PER_ADD	16
+#define MAXNUM_PNO_PARAMS 2
+#define PNO_TLV_COMMON_LENGTH	1
+#define DEFAULT_BATCH_MSCAN 16
+
+#define RESULTS_END_MARKER "----\n"
+#define SCAN_END_MARKER "####\n"
+#define AP_END_MARKER "====\n"
+
+enum scan_status {
+	/* SCAN ABORT by other scan */
+	PNO_STATUS_ABORT,
+	/* RTT is presence or not */
+	PNO_STATUS_RTT_PRESENCE,
+	/* Disable PNO by Driver */
+	PNO_STATUS_DISABLE,
+	/* NORMAL BATCHING GET */
+	PNO_STATUS_NORMAL,
+	/* WLC_E_PFN_BEST_BATCHING */
+	PNO_STATUS_EVENT,
+	PNO_STATUS_MAX
+};
+#define PNO_STATUS_ABORT_MASK 0x0001
+#define PNO_STATUS_RTT_MASK 0x0002
+#define PNO_STATUS_DISABLE_MASK 0x0004
+#define PNO_STATUS_OOM_MASK 0x0010
+
+enum index_mode {
+	INDEX_OF_LEGACY_PARAMS,
+	INDEX_OF_BATCH_PARAMS,
+	INDEX_OF_HOTLIST_PARAMS,
+	INDEX_MODE_MAX
+};
+enum dhd_pno_status {
+	DHD_PNO_DISABLED,
+	DHD_PNO_ENABLED,
+	DHD_PNO_SUSPEND
+};
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subtype;
+	char reserved;
+} cmd_tlv_t;
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2))
+} dhd_pno_mode_t;
+struct dhd_pno_ssid {
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+	struct list_head list;
+};
+struct dhd_pno_bssid {
+	struct ether_addr	macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16			flags;
+	struct list_head list;
+};
+typedef struct dhd_pno_bestnet_entry {
+	struct ether_addr BSSID;
+	uint8	SSID_len;
+	uint8	SSID[DOT11_MAX_SSID_LEN];
+	int8	RSSI;
+	uint8	channel;
+	uint32	timestamp;
+	uint16	rtt0; /* distance_cm based on RTT */
+	uint16	rtt1; /* distance_cm based on sample standard deviation */
+	unsigned long recorded_time;
+	struct list_head list;
+} dhd_pno_bestnet_entry_t;
+#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t))
+
+typedef struct dhd_pno_bestnet_header {
+	struct dhd_pno_bestnet_header *next;
+	uint8 reason;
+	uint32 tot_cnt;
+	uint32 tot_size;
+	struct list_head entry_list;
+} dhd_pno_best_header_t;
+#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t))
+
+typedef struct dhd_pno_scan_results {
+	dhd_pno_best_header_t *bestnetheader;
+	uint8 cnt_header;
+	struct list_head list;
+} dhd_pno_scan_results_t;
+#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t))
+
+struct dhd_pno_get_batch_info {
+	/* info related to get batch */
+	char *buf;
+	bool batch_started;
+	uint32 tot_scan_cnt;
+	uint32 expired_tot_scan_cnt;
+	uint32 top_node_cnt;
+	uint32 bufsize;
+	uint32 bytes_written;
+	int reason;
+	struct list_head scan_results_list;
+	struct list_head expired_scan_results_list;
+};
+struct dhd_pno_legacy_params {
+	uint16 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	int pno_repeat;
+	int pno_freq_expo_max;
+	int nssid;
+	struct list_head ssid_list;
+};
+struct dhd_pno_batch_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 band;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 rtt;
+	struct dhd_pno_get_batch_info get_batch;
+};
+struct dhd_pno_hotlist_params {
+	uint8 band;
+	int32 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 nbssid;
+	struct list_head bssid_list;
+};
+typedef union dhd_pno_params {
+	struct dhd_pno_legacy_params params_legacy;
+	struct dhd_pno_batch_params params_batch;
+	struct dhd_pno_hotlist_params params_hotlist;
+} dhd_pno_params_t;
+typedef struct dhd_pno_status_info {
+	dhd_pub_t *dhd;
+	struct work_struct work;
+	struct mutex pno_mutex;
+	struct completion get_batch_done;
+	bool wls_supported; /* wifi location service supported or not */
+	enum dhd_pno_status pno_status;
+	enum dhd_pno_mode pno_mode;
+	dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX];
+	struct list_head head_list;
+} dhd_pno_status_info_t;
+
+/* wrapper functions */
+extern int
+dhd_dev_pno_enable(struct net_device *dev, int enable);
+
+extern int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+	uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int
+dhd_dev_pno_set_for_batch(struct net_device *dev,
+	struct dhd_pno_batch_params *batch_params);
+
+extern int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize);
+
+extern int
+dhd_dev_pno_stop_for_batch(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+/* dhd pno fuctions */
+extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
+
+extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
+
+extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
+
+extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd);
+
+extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+extern int dhd_pno_init(dhd_pub_t *dhd);
+extern int dhd_pno_deinit(dhd_pub_t *dhd);
+#endif 
+
+#if (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT)
+extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
+extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
+extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
+	ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT) */
+#endif /* __DHD_PNO_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 0000000000000000000000000000000000000000..a44eacffe921af3f9e190fb2b7c822fc130a77ec
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,125 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_proto.h 490409 2014-07-10 16:34:27Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT  2000  /* In milli second default value for Production FW */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT  20000  /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Initilizes the index block for dma'ing indices */
+extern int dhd_prot_init_index_dma_block(dhd_pub_t *dhdp, uint8 type, uint32 length);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_sync_with_dongle(dhd_pub_t *dhdp);
+
+/* Protocol initialization needed for IOCTL/IOVAR path */
+extern int dhd_prot_init(dhd_pub_t *dhd);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+#ifdef BCMPCIE
+extern int dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay);
+
+extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info);
+extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
+extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
+extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
+extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf);
+extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
+extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
+extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern void dhd_prot_clear(dhd_pub_t *dhd);
+
+#endif /* BCMPCIE */
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 0000000000000000000000000000000000000000..34dc4cdf5383c51ce4b58658ac844b7abe8adbf0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,8580 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_sdio.c 489913 2014-07-08 18:57:48Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+bool dhd_mp_halting(dhd_pub_t *dhdp);
+extern void bcmsdh_waitfor_iodrain(void *sdh);
+extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
+extern bool  bcmsdh_fatal_error(void *sdh);
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME         "mem_dump"
+#endif
+
+#define QLEN		(1024) /* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES	3
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif
+
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+#endif
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	4096	/* max nvram buf size */
+#define MAX_DATA_BUF	(64 * 1024)	/* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD   32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM	(SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT	SDPCM_MAXGLOM_SIZE
+
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES    0
+#ifndef CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES     DEFAULT_TXINRX_THRES
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+DEFINE_MUTEX(_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif 
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hnd_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define	KSO_ENAB(bus)			((bus)->kso)
+#define	SR_ENAB(bus)			((bus)->_srenab)
+#define	SLPAUTO_ENAB(bus)		((SR_ENAB(bus)) && ((bus)->_slpauto))
+#define	MIN_RSRC_ADDR			(SI_ENUM_BASE + 0x618)
+#define	MIN_RSRC_SR			0x3
+#define	CORE_CAPEXT_ADDR		(SI_ENUM_BASE + 0x64c)
+#define	CORE_CAPEXT_SR_SUPPORTED_MASK	(1 << 1)
+#define RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define RCTL_LOGIC_DISABLE_MASK		(1 << 27)
+
+#define	OOB_WAKEUP_ENAB(bus)		((bus)->_oobwakeup)
+#define	GPIO_DEV_SRSTATE		16	/* Host gpio17 mapped to device gpio0 SR state */
+#define	GPIO_DEV_SRSTATE_TIMEOUT	320000	/* 320ms */
+#define	GPIO_DEV_WAKEUP			17	/* Host gpio17 mapped to device gpio1 wakeup */
+#define	CC_CHIPCTRL2_GPIO1_WAKEUP	(1  << 0)
+#define	CC_CHIPCTRL3_SR_ENG_ENABLE	(1  << 2)
+#define OVERFLOW_BLKSZ512_WM		96
+#define OVERFLOW_BLKSZ512_MES		80
+
+#define CC_PMUCC3	(0x3)
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		bus_num;		/* bus number */
+	uint32		slot_num;		/* slot ID */
+	uint32		hostintmask;	/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+	const char      *nvram_params;		/* user specified nvram params. */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+	wait_queue_head_t bus_sleep;
+	uint		rxflow_mode;		/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+	int32		txinrx_thres;	/* num of in-queued pkts */
+	int32		dotxinrx;	/* tx first in dhdsdio_readframes */
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_prev_time;	/* Time at which previous stats where printed */
+	uint		pktgen_prev_sent;	/* Number of test packets generated when
+						 * previous stats were printed
+						 */
+	uint		pktgen_prev_rcvd;	/* Number of test packets received when
+						 * previous stats were printed
+						 */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#define PKTGEN_RCV_IDLE     (0)
+#define PKTGEN_RCV_ONGOING  (1)
+	uint16		pktgen_rcv_state;		/* receive state */
+	uint		pktgen_rcvd_rcvsession;	/* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+#ifdef DHDENABLE_TAILPAD
+	uint		tx_tailpad_chain;	/* Number of tail padding by chaining pad_pkt */
+	uint		tx_tailpad_pktget;	/* Number of tail padding by new PKTGET */
+#endif /* DHDENABLE_TAILPAD */
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+	uint32		rxint_mode;	/* rx interrupt mode */
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	bool		kso;
+	bool		_slpauto;
+	bool		_oobwakeup;
+	bool		_srenab;
+	bool        readframes;
+	bool        reqbussleep;
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	void		*glom_pkt_arr[SDPCM_MAXGLOM_SIZE];	/* Array of pkts for glomming */
+	uint32		txglom_cnt;	/* Number of pkts in the glom array */
+	uint32		txglom_total_len;	/* Total length of pkts in glom array */
+	bool		txglom_enable;	/* Flag to indicate whether tx glom is enabled/disabled */
+	uint32		txglomsize;	/* Glom size limitation */
+#ifdef DHDENABLE_TAILPAD
+	void		*pad_pkt;
+#endif /* DHDENABLE_TAILPAD */
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_RAMSIZE (128 *1024)
+int dhd_dongle_ramsize;
+
+uint dhd_doflow = TRUE;
+uint dhd_dpcpoll = FALSE;
+
+module_param(dhd_doflow, uint, 0644);
+module_param(dhd_dpcpoll, uint, 0644);
+
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT  4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uintptr datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), (uint)datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Number of pkts available in dongle for data RX */
+#define DATABUFCNT(bus) \
+	((uint8)(bus->tx_max - bus->tx_seq) - 1)
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+#define BUS_WAKE(bus) \
+	do { \
+		bus->idlecount = 0; \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0:	Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1:	(sdiod core rev >= 4)
+ *		Device sets a new bit in the intstatus whenever there is a packet
+ *		available in fifo.  Host can't clear this specific status bit until all the
+ *		packets are read from the FIFO.  No need to ack dongle intstatus.
+ * Mode 2:	(sdiod core rev >= 4)
+ *		Device sets a bit in the intstatus, and host acks this by writing
+ *		one to this bit.  Dongle won't generate anymore packet interrupts
+ *		until host reads all the packets from the dongle and reads a zero to
+ *		figure that there are no more packets.  No need to disable host ints.
+ *		Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT		0	/* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0	1	/* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1	2	/* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) 	\
+	((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus)	((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+	bool reset_flag);
+
+static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
+{
+	int err;
+	uint8 devctl, wm, mes;
+
+	if (bus->sih->buscorerev >= 15) {
+		/* See .ppt in PR for these recommended values */
+		if (bus->blocksize == 512) {
+			wm = OVERFLOW_BLKSZ512_WM;
+			mes = OVERFLOW_BLKSZ512_MES;
+		} else {
+			mes = bus->blocksize/4;
+			wm = bus->blocksize/4;
+		}
+
+		watermark = wm;
+		mesbusyctrl = mes;
+	} else {
+		DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+			bus->sih->buscorerev));
+		return;
+	}
+
+	/* Update watermark */
+	if (wm > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+
+		devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+	}
+
+	/* Update MES */
+	if (mes > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			(mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+	}
+
+	DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
+}
+
+static void
+dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_RAMSIZE;
+	/* Restrict the ramsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_ramsize, min_size));
+	if ((dhd_dongle_ramsize > min_size) &&
+		(dhd_dongle_ramsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_ramsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+	uint32 val, addr, data;
+
+	bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+	/* Set device for gpio1 wakeup */
+	bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+	val = bcmsdh_reg_read(bus->sdh, data, 4);
+	val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+	bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+	bus->_oobwakeup = TRUE;
+
+	return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+	bool cap = FALSE;
+	uint32  core_capext, addr, data;
+
+	if (bus->sih->chip == BCM43430_CHIP_ID) {
+		/* check if fw initialized sr engine */
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1);
+		if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0)
+			cap = TRUE;
+
+		return cap;
+	}
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+			addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+			data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+			bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+			core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+	} else if (bus->sih->chip == BCM4330_CHIP_ID) {
+			core_capext = FALSE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(BCM4349_CHIP(bus->sih->chip))		||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		core_capext = TRUE;
+	} else {
+			core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
+			core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+	}
+	if (!(core_capext))
+		return FALSE;
+
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+		/* FIX: Should change to query SR control register instead */
+		cap = TRUE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		uint32 enabval = 0;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+		data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+		bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
+		enabval = bcmsdh_reg_read(bus->sdh, data, 4);
+
+		if ((bus->sih->chip == BCM4350_CHIP_ID) ||
+			(bus->sih->chip == BCM4345_CHIP_ID) ||
+			(bus->sih->chip == BCM4354_CHIP_ID) ||
+			(bus->sih->chip == BCM4356_CHIP_ID) ||
+			(bus->sih->chip == BCM4358_CHIP_ID))
+			enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+
+		if (enabval)
+			cap = TRUE;
+	} else {
+		data = bcmsdh_reg_read(bus->sdh,
+			SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4);
+		if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
+			cap = TRUE;
+	}
+
+	return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+	bcmsdh_gpio_init(bus->sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+
+#ifdef USE_OOB_GPIO1
+	dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+	return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+		dhdsdio_srwar_init(bus);
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+		1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+
+	/* Add CMD14 Support */
+	dhdsdio_devcap_set(bus,
+		(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+		SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+
+	bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+	bus->_srenab = TRUE;
+
+	return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	/* set flag */
+	bus->kso = TRUE;
+
+	/*
+	 * Enable KeepSdioOn (KSO) bit for normal operation
+	 * Default is 0 (4334A0) so set it. Fixed in B0.
+	 */
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+		if (err)
+			DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+	}
+
+	return 0;
+}
+
+#define KSO_DBG(x)
+#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define ERROR_BCME_NODEVICE_MAX 1
+
+#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+	uint8 wr_val = 0, rd_val, cmp_val, bmask;
+	int err = 0;
+	int try_cnt = 0;
+
+	KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
+
+	wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
+	if (on) {
+		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+		bmask = cmp_val;
+
+		OSL_SLEEP(3);
+	} else {
+		/* Put device to sleep, turn off  KSO  */
+		cmp_val = 0;
+		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+	}
+
+	do {
+		rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+		if (((rd_val & bmask) == cmp_val) && !err)
+			break;
+
+		KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
+
+		if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+			OSL_SLEEP(KSO_WAIT_MS);
+		} else
+			OSL_DELAY(KSO_WAIT_US);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
+
+	if (try_cnt > 2)
+		KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+
+	if (try_cnt > MAX_KSO_ATTEMPTS)  {
+		DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+	}
+	return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0;
+
+	if (on == FALSE) {
+
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+		dhdsdio_clk_kso_enab(bus, FALSE);
+	} else {
+		DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		dhdsdio_clk_kso_enab(bus, TRUE);
+
+		DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+			dhdsdio_sleepcsr_get(bus)));
+	}
+
+	bus->kso = on;
+	BCM_REFERENCE(err);
+
+	return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+	int err = 0;
+	uint8 val = 0;
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+	if (err)
+		DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+	return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+	return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+	int err = 0;
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+	if (err)
+		DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+	return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0, retry;
+	uint8 val;
+
+	retry = 0;
+	if (on == TRUE) {
+		/* Enter Sleep */
+
+		/* Be sure we request clk before going to sleep
+		 * so we can wake-up with clk request already set
+		 * else device can go back to sleep immediately
+		 */
+		if (!SLPAUTO_ENAB(bus))
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		else {
+			val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+			if ((val & SBSDIO_CSR_MASK) == 0) {
+				DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+					__FUNCTION__, val));
+
+				/* Reset clock request */
+				bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					SBSDIO_ALP_AVAIL_REQ, &err);
+				DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+					bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+			}
+		}
+
+		DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+		err = dhdsdio_clk_kso_enab(bus, FALSE);
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE);  /* GPIO_1 is off */
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+		}
+#endif /* USE_CMD14 */
+	} else {
+		/* Exit Sleep */
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+
+		if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+				GPIO_DEV_SRSTATE_TIMEOUT);
+
+			if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+				DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+			}
+		}
+#endif
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, FALSE);
+		if (SLPAUTO_ENAB(bus) && (err != 0)) {
+			OSL_DELAY(10000);
+			DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+			/* Toggle sleep to resync with host and device */
+			err = bcmsdh_sleep(bus->sdh, TRUE);
+			OSL_DELAY(10000);
+			err = bcmsdh_sleep(bus->sdh, FALSE);
+
+			if (err) {
+				OSL_DELAY(10000);
+				DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+				/* Toggle sleep to resync with host and device */
+				err = bcmsdh_sleep(bus->sdh, TRUE);
+				OSL_DELAY(10000);
+				err = bcmsdh_sleep(bus->sdh, FALSE);
+				if (err) {
+					DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+					DHD_ERROR(("%s: FATAL: Device non-response!\n",
+						__FUNCTION__));
+					err = 0;
+				}
+			}
+		}
+#else
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE);  /* GPIO_1 is on */
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+		}
+		do {
+			err = dhdsdio_clk_kso_enab(bus, TRUE);
+			if (err)
+				OSL_SLEEP(10);
+		} while ((err != 0) && (++retry < 3));
+
+		if (err != 0) {
+			DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+			err = 0; /* continue anyway */
+		}
+#endif /* !USE_CMD14 */
+
+		if (err == 0) {
+			uint8 csr;
+
+			/* Wait for device ready during transition to wake-up */
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = dhdsdio_sleepcsr_get(bus)) &
+				SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+				(SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
+
+			DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+			if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+				DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+				SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+				(SBSDIO_HT_AVAIL)), (10000));
+
+			DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+			if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+				DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+		}
+	}
+
+	/* Update if successful */
+	if (err == 0)
+		bus->kso = on ? FALSE : TRUE;
+	else {
+		DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+			__FUNCTION__, bus->kso, on, err));
+		if (!on && retry > 2)
+			bus->kso = FALSE;
+	}
+
+	return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+#define HT_AVAIL_ERROR_MAX 10
+	static int ht_avail_error = 0;
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (!KSO_ENAB(bus))
+		return BCME_OK;
+
+	if (SLPAUTO_ENAB(bus)) {
+		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+		return BCME_OK;
+	}
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			ht_avail_error++;
+			if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
+				DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+				dhd_os_send_hang_message(bus->dhd);
+			}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+			return BCME_ERROR;
+		} else {
+			ht_avail_error = 0;
+		}
+
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+#if !defined(OOB_INTR_ONLY)
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else
+#endif /* !defined (OOB_INTR_ONLY) */
+		{
+			if (bus->clkstate == CLK_PENDING) {
+				/* Cancel CA-only interrupt filter */
+				devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+				devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			}
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+		bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		if (!SR_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+			DHD_INFO(("CLKCTL: turned OFF\n"));
+			if (err) {
+				DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			iovalue = bus->sd_mode;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_mode: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			if (sd1idle) {
+				/* Change to SD1 mode and turn off clock */
+				iovalue = 1;
+				err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+				                      &iovalue, sizeof(iovalue), TRUE);
+				if (err) {
+					DHD_ERROR(("%s: error changing sd_clock: %d\n",
+					           __FUNCTION__, err));
+					return BCME_ERROR;
+				}
+			}
+
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		}
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+		if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+		if (bus->poll == 0)
+			dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	int err = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	          (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	if (bus->dhd->hang_was_sent)
+		return BCME_ERROR;
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+			return BCME_BUSY;
+
+
+		if (!SLPAUTO_ENAB(bus)) {
+			/* Disable SDIO interrupts (no longer interested) */
+			bcmsdh_intr_disable(bus->sdh);
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Tell device to start using OOB wakeup */
+			W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+			/* Turn off our contribution to the HT clock request */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+			/* Isolate the bus */
+			if (bus->sih->chip != BCM4329_CHIP_ID &&
+				bus->sih->chip != BCM4319_CHIP_ID) {
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+					SBSDIO_DEVCTL_PADS_ISO, NULL);
+			}
+		} else {
+			/* Leave interrupts enabled since device can exit sleep and
+			 * interrupt host
+			 */
+			err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+		}
+
+		/* Change state */
+		bus->sleeping = TRUE;
+		wake_up(&bus->bus_sleep);
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		if (!SLPAUTO_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+			/* Force pad isolation off if possible (in case power never toggled) */
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Send misc interrupt to indicate OOB not needed */
+			W_SDREG(0, &regs->tosbmailboxdata, retries);
+			if (retries <= retry_limit)
+				W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+			/* Make sure we have SD bus access */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			/* Enable interrupts again */
+			if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+				bus->intdis = FALSE;
+				bcmsdh_intr_enable(bus->sdh);
+			}
+		} else {
+			err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+		}
+
+		if (err == 0) {
+			/* Change state */
+			bus->sleeping = FALSE;
+		}
+	}
+
+	return err;
+}
+
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif 
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	uint8 *dump_data;
+	uint16 protocol;
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#else /* SDTEST */
+	BCM_REFERENCE(datalen);
+#endif /* SDTEST */
+
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	dump_data = PKTDATA(osh, pkt);
+	dump_data += 4; /* skip 4 bytes header */
+	protocol = (dump_data[12] << 8) | dump_data[13];
+
+	if (protocol == ETHER_TYPE_802_1X) {
+		DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+			dump_data[14], dump_data[15], dump_data[30]));
+	}
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
+	{
+		int i;
+		DHD_ERROR(("TX DUMP\n"));
+
+		for (i = 0; i < (datalen - 4); i++) {
+			DHD_ERROR(("%02X ", dump_data[i]));
+			if ((i & 15) == 15)
+				printf("\n");
+		}
+		DHD_ERROR(("\n"));
+	}
+#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		bool deq_ret;
+		int pkq_len;
+
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+			if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+			{
+#ifdef DHDTCPACK_SUPPRESS
+				if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+					DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+						__FUNCTION__, __LINE__));
+					dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+				}
+#endif /* DHDTCPACK_SUPPRESS */
+				dhd_txcomplete(bus->dhd, pkt, FALSE);
+				PKTFREE(osh, pkt, TRUE);
+			}
+			ret = BCME_NORESOURCE;
+		} else
+			ret = BCME_OK;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		pkq_len = pktq_len(&bus->txq);
+		dhd_os_sdunlock_txq(bus->dhd);
+		if (pkq_len >= FCHI) {
+			bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+			wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+				WLFC_UNSUPPORTED);
+#endif
+			if (!wlfc_enabled && dhd_doflow) {
+				dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+			}
+		}
+
+#ifdef DHD_DEBUG
+		dhd_os_sdlock_txq(bus->dhd);
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+#endif
+
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+	} else {
+		int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+		chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
+#endif
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+		ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+		if (ret != BCME_OK)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+	}
+
+	return ret;
+}
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ *		   negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int pkt_len;
+	int modulo;
+	int head_padding;
+	int tail_padding = 0;
+	uint32 swheader;
+	uint32 swhdr_offset;
+	bool alloc_new_pkt = FALSE;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	*new_pkt = NULL;
+	osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+	if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+		DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+			__FUNCTION__, __LINE__));
+		dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Add space for the SDPCM hardware/software headers */
+	PKTPUSH(osh, pkt, sdpcm_hdrlen);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+	frame = (uint8*)PKTDATA(osh, pkt);
+	if (PKTLEN(osh, pkt) >= 100) {
+		htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
+		if (htsf_ts->magic == HTSFMAGIC) {
+			htsf_ts->c20 = get_cycles();
+			htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+#ifdef DHD_DEBUG
+	if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+		tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
+
+	/* align the data pointer, allocate a new packet if there is not enough space (new
+	 * packet data pointer will be aligned thus no padding will be needed)
+	 */
+	head_padding = (ulong)frame % DHD_SDALIGN;
+	if (PKTHEADROOM(osh, pkt) < head_padding) {
+		head_padding = 0;
+		alloc_new_pkt = TRUE;
+	} else {
+		uint cur_chain_total_len;
+		int chain_tail_padding = 0;
+
+		/* All packets need to be aligned by DHD_SDALIGN */
+		modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+		tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+		/* Total pkt chain length needs to be aligned by block size,
+		 * unless it is a single pkt chain with total length less than one block size,
+		 * which we prefer sending by byte mode.
+		 *
+		 * Do the chain alignment here if
+		 * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+		 * 2-1. This chain is of multiple pkts, or
+		 * 2-2. This is a single pkt whose size is longer than one block size.
+		 */
+		cur_chain_total_len = prev_chain_total_len +
+			(head_padding + pkt_len + tail_padding);
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_chain_total_len % bus->blocksize;
+			chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+
+#ifdef DHDENABLE_TAILPAD
+		if (PKTTAILROOM(osh, pkt) < tail_padding) {
+			/* We don't have tail room to align by DHD_SDALIGN */
+			alloc_new_pkt = TRUE;
+			bus->tx_tailpad_pktget++;
+		} else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+			/* We have tail room for tail_padding of this pkt itself, but not for
+			 * total pkt chain alignment by block size.
+			 * Use the padding packet to avoid memory copy if applicable,
+			 * otherwise, just allocate a new pkt.
+			 */
+			if (bus->pad_pkt) {
+				*pad_pkt_len = chain_tail_padding;
+				bus->tx_tailpad_chain++;
+			} else {
+				alloc_new_pkt = TRUE;
+				bus->tx_tailpad_pktget++;
+			}
+		} else
+		/* This last pkt's tailroom is sufficient to hold both tail_padding
+		 * of the pkt itself and chain_tail_padding of total pkt chain
+		 */
+#endif /* DHDENABLE_TAILPAD */
+		tail_padding += chain_tail_padding;
+	}
+
+	DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+		__FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
+
+	if (alloc_new_pkt) {
+		void *tmp_pkt;
+		int newpkt_size;
+		int cur_total_len;
+
+		ASSERT(*pad_pkt_len == 0);
+
+		DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
+
+		/* head pointer is aligned now, no padding needed */
+		head_padding = 0;
+
+		/* update the tail padding as it depends on the head padding, since a new packet is
+		 * allocated, the head padding is non longer needed and packet length is chagned
+		 */
+
+		cur_total_len = prev_chain_total_len + pkt_len;
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_total_len % bus->blocksize;
+			tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+		else {
+			modulo = pkt_len % DHD_SDALIGN;
+			tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+		}
+
+		newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+		bus->dhd->tx_realloc++;
+		tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+		if (tmp_pkt == NULL) {
+			DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+			return BCME_NOMEM;
+		}
+		PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+		bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+		*new_pkt = tmp_pkt;
+		pkt = tmp_pkt;
+	}
+
+	if (head_padding)
+		PKTPUSH(osh, pkt, head_padding);
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	bzero(frame, head_padding + sdpcm_hdrlen);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+	/* the header has the followming format
+	 * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+	 *
+	 * 8-byte HW extesion flags (glom mode only) as the following:
+	 *			2-byte packet length, excluding HW tag and padding
+	 *			2-byte frame channel and frame flags (e.g. next frame following)
+	 *			2-byte header length
+	 *			2-byte tail padding size
+	 *
+	 * 8-byte SW frame tags as the following
+	 *			4-byte flags: host tx seq, channel, data offset
+	 *			4-byte flags: TBD
+	 */
+
+	swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+	/* hardware frame tag:
+	 *
+	 * in tx-glom mode, dongle only checks the hardware frame tag in the first
+	 * packet and sees it as the total lenght of the glom (including tail padding),
+	 * for each packet in the glom, the packet length needs to be updated, (see
+	 * below PKTSETLEN)
+	 *
+	 * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+	 * referred to in sdioh_request_buffer(). The tail length will be excluded in
+	 * dhdsdio_txpkt_postprocess().
+	 */
+	*(uint16*)frame = (uint16)htol16(pkt_len);
+	*(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+	pkt_len += tail_padding;
+
+	/* hardware extesion flags */
+	if (bus->txglom_enable) {
+		uint32 hwheader1;
+		uint32 hwheader2;
+
+		swhdr_offset += SDPCM_HWEXT_LEN;
+		hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+			(last_chained_pkt << 24);
+		hwheader2 = (tail_padding) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+	}
+	PKTSETLEN((osh), (pkt), (pkt_len));
+
+	/* software frame tags */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		| (txseq % SDPCM_SEQUENCE_WRAP) |
+		(((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + swhdr_offset);
+	htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
+
+	return pkt_len;
+}
+
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int data_offset;
+	int tail_padding;
+	int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
+
+	(void)osh;
+	osh = bus->dhd->osh;
+
+	/* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+	DHD_INFO(("%s PKTLEN before postprocess %d",
+		__FUNCTION__, PKTLEN(osh, pkt)));
+
+	/* PKTLEN still includes tail_padding, so exclude it.
+	 * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+	 */
+	if (bus->txglom_enable) {
+		/* txglom pkts have tail_padding length in HW ext header */
+		tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+		PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+		DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+			tail_padding, PKTLEN(osh, pkt)));
+	} else {
+		/* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+		 * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+		 * have the field for the total length of the chain.
+		 */
+		PKTSETLEN(osh, pkt, *(uint16*)frame);
+		DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+			*(uint16*)frame, PKTLEN(osh, pkt)));
+	}
+
+	data_offset = ltoh32_ua(frame + swhdr_offset);
+	data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+	/* Get rid of sdpcm header + head_padding */
+	PKTPULL(osh, pkt, data_offset);
+
+	DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+		__FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+	return BCME_OK;
+}
+
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
+{
+	int i;
+	int ret = 0;
+	osl_t *osh;
+	bcmsdh_info_t *sdh;
+	void *pkt = NULL;
+	void *pkt_chain;
+	int total_len = 0;
+	void *head_pkt = NULL;
+	void *prev_pkt = NULL;
+	int pad_pkt_len = 0;
+	int new_pkt_num = 0;
+	void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+	bool wlfc_enabled = FALSE;
+
+	if (bus->dhd->dongle_reset)
+		return BCME_NOTREADY;
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+	/* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+	new_pkts[0] = NULL;
+
+	for (i = 0; i < num_pkt; i++) {
+		int pkt_len;
+		bool last_pkt;
+		void *new_pkt = NULL;
+
+		pkt = pkts[i];
+		ASSERT(pkt);
+		last_pkt = (i == num_pkt - 1);
+		pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+			total_len, last_pkt, &pad_pkt_len, &new_pkt);
+		if (pkt_len <= 0)
+			goto done;
+		if (new_pkt) {
+			pkt = new_pkt;
+			new_pkts[new_pkt_num++] = new_pkt;
+		}
+		total_len += pkt_len;
+
+		PKTSETNEXT(osh, pkt, NULL);
+		/* insert the packet into the list */
+		head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+		prev_pkt = pkt;
+
+	}
+
+	/* Update the HW frame tag (total length) in the first pkt of the glom */
+	if (bus->txglom_enable) {
+		uint8 *frame;
+
+		total_len += pad_pkt_len;
+		frame = (uint8*)PKTDATA(osh, head_pkt);
+		*(uint16*)frame = (uint16)htol16(total_len);
+		*(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
+
+	}
+
+#ifdef DHDENABLE_TAILPAD
+	/* if a padding packet if needed, insert it to the end of the link list */
+	if (pad_pkt_len) {
+		PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+		PKTSETNEXT(osh, pkt, bus->pad_pkt);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+	 * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+	 * so it will take the aligned length and buffer pointer.
+	 */
+	pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
+
+	/* if a padding packet was needed, remove it from the link list as it not a data pkt */
+	if (pad_pkt_len && pkt)
+		PKTSETNEXT(osh, pkt, NULL);
+
+done:
+	pkt = head_pkt;
+	while (pkt) {
+		void *pkt_next = PKTNEXT(osh, pkt);
+		PKTSETNEXT(osh, pkt, NULL);
+		dhdsdio_txpkt_postprocess(bus, pkt);
+		pkt = pkt_next;
+	}
+
+	/* new packets might be allocated due to insufficient room for padding, but we
+	 * still have to indicate the original packets to upper layer
+	 */
+	for (i = 0; i < num_pkt; i++) {
+		pkt = pkts[i];
+		wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+			wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+				WLFC_UNSUPPORTED);
+		}
+#endif /* PROP_TXSTATUS */
+		if (!wlfc_enabled) {
+			PKTSETNEXT(osh, pkt, NULL);
+			dhd_txcomplete(bus->dhd, pkt, ret != 0);
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+		}
+	}
+
+	for (i = 0; i < new_pkt_num; i++)
+		PKTFREE(osh, new_pkts[i], TRUE);
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	uint cnt = 0;
+	uint8 tx_prec_map;
+	uint16 txpktqlen = 0;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	osl_t *osh;
+	uint datalen = 0;
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	osh = dhd->osh;
+	tx_prec_map = ~bus->flowcontrol;
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+		int i;
+		int num_pkt = 1;
+		void *pkts[MAX_TX_PKTCHAIN_CNT];
+		int prec_out;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		if (bus->txglom_enable) {
+			num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize);
+			num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+		}
+		num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+		for (i = 0; i < num_pkt; i++) {
+			pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out);
+			if (!pkts[i]) {
+				DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
+					__FUNCTION__));
+				ASSERT(0);
+				break;
+			}
+			PKTORPHAN(pkts[i]);
+			datalen += PKTLEN(osh, pkts[i]);
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (i == 0)
+			break;
+		if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+			dhd->tx_errors++;
+		else
+			dhd->dstats.tx_bytes += datalen;
+		cnt += i;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+
+	}
+
+	dhd_os_sdlock_txq(bus->dhd);
+	txpktqlen = pktq_len(&bus->txq);
+	dhd_os_sdunlock_txq(bus->dhd);
+
+	/* Do flow-control if needed */
+	if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+		bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+		if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+			dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+		}
+	}
+
+	return cnt;
+}
+
+static void
+dhdsdio_sendpendctl(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	int ret;
+	uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
+
+	if (bus->txglom_enable)
+		frame_seq += SDPCM_HWEXT_LEN;
+
+	if (*frame_seq != bus->tx_seq) {
+		DHD_INFO(("%s IOCTL frame seq lag detected!"
+			" frm_seq:%d != bus->tx_seq:%d, corrected\n",
+			__FUNCTION__, *frame_seq, bus->tx_seq));
+		*frame_seq = bus->tx_seq;
+	}
+
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+		NULL, NULL, NULL, 1);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+	bus->ctrl_frame_stat = FALSE;
+	dhd_wait_event_wakeup(bus->dhd);
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	static int err_nodevice = 0;
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint8 doff = 0;
+	int ret = -1;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - sdpcm_hdrlen;
+	len = (msglen += sdpcm_hdrlen);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + sdpcm_hdrlen);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += sdpcm_hdrlen;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	if (bus->txglom_enable) {
+		uint32 hwheader1, hwheader2;
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+				| bus->tx_seq
+				| ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+			+ SDPCM_HWEXT_LEN + sizeof(swheader));
+
+		hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
+		hwheader2 = (len - (msglen)) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+
+		*(uint16*)frame = htol16(len);
+		*(((uint16*)frame) + 1) = htol16(~(len));
+	} else {
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+	}
+	if (!TXCTLOK(bus)) {
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		if (!bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+		if (bus->ctrl_frame_stat) {
+			dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+		}
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			bus->dhd->txcnt_timeout++;
+			if (!bus->dhd->hang_was_sent) {
+				DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+					__FUNCTION__, bus->dhd->txcnt_timeout));
+			}
+			ret = -1;
+			bus->ctrl_frame_stat = FALSE;
+			goto done;
+		}
+	}
+
+	bus->dhd->txcnt_timeout = 0;
+	bus->ctrl_frame_stat = TRUE;
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                          frame, len, NULL, NULL, NULL, TXRETRIES);
+		if (ret == BCME_OK)
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+	}
+	bus->ctrl_frame_stat = FALSE;
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (ret == BCME_NODEVICE)
+		err_nodevice++;
+	else
+		err_nodevice = 0;
+
+	return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+			__FUNCTION__, rxlen, msglen));
+	} else if (timeleft == 0) {
+#ifdef DHD_DEBUG
+		uint32 status, retry = 0;
+		R_SDREG(status, &bus->regs->intstatus, retry);
+		DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
+			__FUNCTION__, status));
+#else
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG
+			dhd_os_sdlock(bus->dhd);
+			dhdsdio_checkdied(bus, NULL, 0);
+			dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	} else if (pending == TRUE) {
+		/* signal pending */
+		DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
+		return -EINTR;
+
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	}
+	if (timeleft == 0) {
+		if (rxlen == 0)
+			bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
+			bus->dhd->rxcnt_timeout, rxlen));
+	}
+	else
+		bus->dhd->rxcnt_timeout = 0;
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_MEMBYTES,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+#ifdef DHD_DEBUG
+	IOV_CHECKDIED,
+	IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_SOCRAM_STATE,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#if defined(USE_SDIOFIFO_IOVAR)
+	IOV_WATERMARK,
+	IOV_MESBUSYCTRL,
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_DONGLEISOLATION,
+	IOV_KSO,
+	IOV_DEVSLEEP,
+	IOV_DEVCAP,
+	IOV_VARS,
+#ifdef SOFTAP
+	IOV_FWPATH,
+#endif
+	IOV_TXGLOMSIZE,
+	IOV_TXGLOMMODE,
+	IOV_HANGREPORT,
+	IOV_TXINRX_THRES
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"ramsize",	IOV_RAMSIZE,	0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"socram_state",	IOV_SOCRAM_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0,	IOVT_UINT32,	0 },
+	{"txminmax",	IOV_TXMINMAX,	0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"checkdied",	IOV_CHECKDIED,	0,	IOVT_BUFFER,	0 },
+	{"serial",	IOV_SERIALCONS,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG  */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+#if defined(USE_SDIOFIFO_IOVAR)
+	{"watermark",	IOV_WATERMARK,	0,	IOVT_UINT32,	0 },
+	{"mesbusyctrl",	IOV_MESBUSYCTRL,	0,	IOVT_UINT32,	0 },
+#endif /* USE_SDIOFIFO_IOVAR */
+	{"devcap", IOV_DEVCAP,	0,	IOVT_UINT32,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+	{"kso",	IOV_KSO,	0,	IOVT_UINT32,	0 },
+	{"devsleep", IOV_DEVSLEEP,	0,	IOVT_UINT32,	0 },
+#ifdef SOFTAP
+	{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+	{"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 },
+	{"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
+	{"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+	bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+	bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+	            bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif /* DHDENABLE_TAILPAD */
+	bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+	bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif /* DHDENABLE_TAILPAD */
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_prev_time = jiffies;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
+		bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
+		bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
+	}
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+	uint8 enable, protect, remap;
+
+	si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+	remap = val ? TRUE : FALSE;
+	si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+	if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+		address -= bus->orig_ramsize;
+		address += SOCDEVRAM_BP_ADDR;
+	}
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv, i;
+	uint32 shaddr = 0;
+
+	if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus))
+		bus->srmemsize = 0;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	i = 0;
+	do {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+			return rv;
+
+		addr = ltoh32(addr);
+
+		DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+		/*
+		 * Check if addr is valid.
+		 * NVRAM length at the end of memory should have been overwritten.
+		 */
+		if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+			if ((bus->srmemsize > 0) && (i++ == 0)) {
+				shaddr -= bus->srmemsize;
+			} else {
+				DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+					__FUNCTION__, addr));
+				return BCME_ERROR;
+			}
+		} else
+			break;
+	} while (i < 2);
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+		return BCME_OK;
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	if (!KSO_ENAB(bus))
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+			dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	sdpcm_shared_t sdpcm_shared;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+	if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+		}
+
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(sdpcm_shared.trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+
+printbuf:
+	if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB		(1  << 24)
+#define CC_CHIPCTRL_JTAG_SEL			(1  << 3)
+#define CC_CHIPCTRL_GPIO_SEL				(0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334	(1  << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+	int int_val;
+	uint32 addr, data, uart_enab = 0;
+	uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+	uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+	*bcmerror = 0;
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	}
+	else if (bus->sih->chip == BCM4334_CHIP_ID ||
+		bus->sih->chip == BCM43340_CHIP_ID ||
+		bus->sih->chip == BCM43341_CHIP_ID ||
+		bus->sih->chip == BCM43342_CHIP_ID ||
+		0) {
+		if (enable) {
+			/* Moved to PMU chipcontrol 1 from 4330 */
+			int_val &= ~gpio_sel;
+			int_val |= jtag_sel;
+		} else {
+			int_val |= gpio_sel;
+			int_val &= ~jtag_sel;
+		}
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+	}
+
+	if (!set)
+		return (int_val & uart_enab);
+	if (enable)
+		int_val |= uart_enab;
+	else
+		int_val &= ~uart_enab;
+	bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uint32 chipcontrol;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+		chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+		chipcontrol &= ~jtag_sel;
+		if (enable) {
+			chipcontrol |=  jtag_sel;
+			chipcontrol &= ~gpio_sel;
+		}
+		bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+	}
+
+	return (int_val & uart_enab);
+}
+#endif 
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/*
+	 * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+	 */
+	if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+		dhdsdio_clk_kso_iovar(bus, bool_val);
+		goto exit;
+	} else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+		{
+			dhdsdio_clk_devsleep_iovar(bus, bool_val);
+			if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+				DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+					bus->dpc_sched));
+				if (!bus->dpc_sched) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+			}
+		}
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, val_size);
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/*
+			 * If address is start of RAM (i.e. a downloaded image),
+			 * store the reset instruction to be written in 0
+			 */
+			if (set && address == bus->dongle_ram_base) {
+				bus->resetinstr = *(((uint32*)params) + 2);
+			}
+		} else {
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect, remap;
+			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+
+			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_ARM_ADDR;
+				address += SOCDEVRAM_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+				/* Can not access remap region while devram remap bit is set
+				 * ROM content would be returned in this case
+				 */
+				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+					__FUNCTION__, address));
+				bcmerror = BCME_ERROR;
+				break;
+			}
+		}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_SOCRAM_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_SERIALCONS):
+		int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+		if (bcmerror != 0)
+			break;
+
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SERIALCONS):
+		dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+		break;
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+#if defined(USE_SDIOFIFO_IOVAR)
+	case IOV_GVAL(IOV_WATERMARK):
+		int_val = (int32)watermark;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WATERMARK):
+		watermark = (uint)int_val;
+		watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
+		DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
+		break;
+
+	case IOV_GVAL(IOV_MESBUSYCTRL):
+		int_val = (int32)mesbusyctrl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MESBUSYCTRL):
+		mesbusyctrl = (uint)int_val;
+		mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
+			? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
+		DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			((uint8)mesbusyctrl | 0x80), NULL);
+		break;
+#endif 
+
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+	/*
+	 * softap firmware is updated through module parameter or android private command
+	 */
+
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, val_size);
+
+		break;
+
+	case IOV_GVAL(IOV_KSO):
+		int_val = dhdsdio_sleepcsr_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DEVCAP):
+		int_val = dhdsdio_devcap_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DEVCAP):
+		dhdsdio_devcap_set(bus, (uint8) int_val);
+		break;
+	case IOV_GVAL(IOV_TXGLOMSIZE):
+		int_val = (int32)bus->txglomsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXGLOMSIZE):
+		if (int_val > SDPCM_MAXGLOM_SIZE) {
+			bcmerror = BCME_ERROR;
+		} else {
+			bus->txglomsize = (uint)int_val;
+		}
+		break;
+	case IOV_SVAL(IOV_HANGREPORT):
+		bus->dhd->hang_report = bool_val;
+		DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
+		break;
+
+	case IOV_GVAL(IOV_HANGREPORT):
+		int_val = (int32)bus->dhd->hang_report;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_TXINRX_THRES):
+		int_val = bus->txinrx_thres;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_TXINRX_THRES):
+		if (int_val < 0) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->txinrx_thres = int_val;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+			if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+				DHD_ERROR(("PR85623WAR in place\n"));
+				varsize += 4;
+				varaddr -= 4;
+			}
+		}
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+	int foundcr4 = 0;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+				foundcr4 = 1;
+			} else {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		}
+
+		if (!foundcr4) {
+			si_core_disable(bus->sih, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
+				           __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			/* Disable remap for download */
+			if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, FALSE);
+
+			if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID) {
+				/* Disabling Remap for SRAM_3 */
+				si_socram_set_bankpda(bus->sih, 0x3, 0x0);
+			}
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_SDIO_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+			if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, TRUE);
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			/* cr4 has no socram, but tcm's */
+			/* write vars */
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			/* write address 0 with reset instruction */
+			bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+				dhdsdio_tune_fifoparam(bus);
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+	bool wlfc_enabled = FALSE;
+
+	if (!bus->dhd)
+		return;
+
+	osh = bus->dhd->osh;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitlockfree(bus->sdh);
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
+		/* if Firmware already hangs disbale any interrupt */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->hostintmask = 0;
+		bcmsdh_intr_disable(bus->sdh);
+	} else {
+
+		BUS_WAKE(bus);
+
+		/* Change our idea of bus state */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+
+		if (KSO_ENAB(bus)) {
+
+		/* Enable clock for device interrupts */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Disable and clear interrupts at the chip level also */
+		W_SDREG(0, &bus->regs->hostintmask, retries);
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force clocks on backplane to be sure F2 interrupt propagates */
+		saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (!err) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+			                 (saveclk | SBSDIO_FORCE_HT), &err);
+		}
+		if (err) {
+			DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+			            __FUNCTION__, err));
+		}
+
+		/* Turn off the bus (F2), free any pending packets */
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+		bcmsdh_intr_disable(bus->sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+		/* Clear any pending interrupts now that F2 is disabled */
+		W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+		}
+
+		/* Turn off the backplane clock (only) */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+	}
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	bus->tx_max = 4;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
+void
+dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
+{
+	/* can't enable host txglom by default, some platforms have no
+	 * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+	 * panda board)
+	 */
+	dhd_bus_t *bus = dhdp->bus;
+#ifdef BCMSDIOH_TXGLOM
+	char buf[256];
+	uint32 rxglom;
+	int32 ret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIOH_STD
+	if (enable)
+		enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
+	if (enable) {
+		rxglom = 1;
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret >= 0)
+			bus->txglom_enable = TRUE;
+		else {
+#ifdef BCMSDIOH_STD
+			sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+			bus->txglom_enable = FALSE;
+		}
+	} else
+#endif /* BCMSDIOH_TXGLOM */
+		bus->txglom_enable = FALSE;
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = 0;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (bus->clkstate != CLK_AVAIL) {
+		DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+		ret = -1;
+		goto exit;
+	}
+
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+	DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+		ASSERT(bus->regs != NULL);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		/* corerev 4 could use the newer interrupt logic to detect the frames */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+			(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+			bus->hostintmask &= ~I_HMB_FRAME_IND;
+			bus->hostintmask |= I_XMTDATA_AVAIL;
+		}
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		if (bus->sih->buscorerev < 15) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+				(uint8)watermark, &err);
+		}
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	if (dhdsdio_sr_cap(bus)) {
+		dhdsdio_sr_init(bus);
+		/* Masking the chip active interrupt  permanantly */
+		bus->hostintmask &= ~I_CHIPACTIVE;
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+		DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+		__FUNCTION__, bus->hostintmask));
+	}
+	else
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return;
+	}
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	if (err) {
+		DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		if (err) {
+			DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
+			goto fail;
+		}
+
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+fail:
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count);
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext;
+	void * list_tail[DHD_MAX_IFS] = { NULL };
+	void * list_head[DHD_MAX_IFS] = { NULL };
+	uint8 idx;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+				__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
+				SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			reorder_info_len = sizeof(reorder_info_buf);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+				&reorder_info_len) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				continue;
+			}
+			if (reorder_info_len) {
+				uint32 free_buf_count;
+				void *ppfirst;
+
+				ppfirst = pfirst;
+				/* Reordering info from the firmware */
+				dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+					reorder_info_len, &ppfirst, &free_buf_count);
+
+				if (free_buf_count == 0) {
+					continue;
+				}
+				else {
+					void *temp;
+
+					/*  go to the end of the chain and attach the pnext there */
+					temp = ppfirst;
+					while (PKTNEXT(osh, temp) != NULL) {
+						temp = PKTNEXT(osh, temp);
+					}
+					pfirst = temp;
+					if (list_tail[ifidx] == NULL)
+						list_head[ifidx] = ppfirst;
+					else
+						PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
+					list_tail[ifidx] = pfirst;
+				}
+
+				num += (uint8)free_buf_count;
+			}
+			else {
+				/* this packet will go up, link back into chain and count it */
+
+				if (list_tail[ifidx] == NULL) {
+					list_head[ifidx] = list_tail[ifidx] = pfirst;
+				}
+				else {
+					PKTSETNEXT(osh, list_tail[ifidx], pfirst);
+					list_tail[ifidx] = pfirst;
+				}
+				num++;
+			}
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (list_head[idx]) {
+				void *temp;
+				uint8 cnt = 0;
+				temp = list_head[idx];
+				do {
+					temp = PKTNEXT(osh, temp);
+					cnt++;
+				} while (temp);
+				if (cnt) {
+					dhd_os_sdunlock(bus->dhd);
+					dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
+					dhd_os_sdlock(bus->dhd);
+				}
+			}
+		}
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+	uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->readframes = TRUE;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+		bus->readframes = FALSE;
+		return 0;
+	}
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+	     rxseq++, rxleft--) {
+#ifdef DHDTCPACK_SUP_DBG
+		if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+			if (bus->dotxinrx == FALSE)
+				DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+					__FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+		}
+#ifdef DEBUG_COUNTER
+		else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+			tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+		}
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
+		/* tx more to improve rx performance */
+		if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+			dhdsdio_sendpendctl(bus);
+		} else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+			!bus->fcstate && DATAOK(bus) &&
+			(pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+			dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+			/* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+			 * 1. Any DATA packet to TX
+			 * 2. TCPACK to TCPDATA PSH packets.
+			 * in bus txq.
+			 */
+			bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+				FALSE : TRUE;
+#endif
+		}
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			}
+			else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required because dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attempt NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_max;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+			&reorder_info_len) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+		if (reorder_info_len) {
+			/* Reordering info from the firmware */
+			dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+				&pkt, &pkt_count);
+			if (pkt_count == 0)
+				continue;
+		}
+		else
+			pkt_count = 1;
+
+		/* Unlock during rx call */
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+		dhd_os_sdlock(bus->dhd);
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	if (bus->reqbussleep)
+	{
+	    dhdsdio_bussleep(bus, TRUE);
+		bus->reqbussleep = FALSE;
+	}
+	bus->readframes = FALSE;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= FRAME_AVAIL_MASK(bus);
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+		/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		    (bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+			uint32 val;
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+			val &= ~CC_XMTDATAAVAIL_MODE;
+			val |= CC_XMTDATAAVAIL_CTRL;
+			W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+		}
+
+#ifdef DHD_DEBUG
+		/* Retrieve console state address now that firmware should have updated it */
+		{
+			sdpcm_shared_t shared;
+			if (dhdsdio_readshared(bus, &shared) == 0)
+				bus->console_addr = shared.console_addr;
+		}
+#endif /* DHD_DEBUG */
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leave this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+#ifdef DHD_DEBUG
+	/* At least print a message if FW halted */
+	if (hmb_data & HMB_DATA_FWHALT) {
+		DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
+		dhdsdio_checkdied(bus, NULL, 0);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+#endif /* DHD_DEBUG */
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_FWHALT |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_sdlock(bus->dhd);
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		dhd_os_sdunlock(bus->dhd);
+		return 0;
+	}
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		goto exit;
+	}
+
+	/* If waiting for HTAVAIL, check status */
+	if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate != CLK_AVAIL)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			bus->f1regdata++;
+			if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+				(newstatus == I_XMTDATA_AVAIL)) {
+			}
+			else
+				W_SDREG(newstatus, &regs->intstatus, retries);
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Just being here means nothing more to do for chipactive */
+	if (intstatus & I_CHIPACTIVE) {
+		/* ASSERT(bus->clkstate == CLK_AVAIL); */
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	if (intstatus & I_HMB_FC_STATE) {
+		DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+		intstatus &= ~I_HMB_FC_STATE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip) {
+		intstatus &= ~FRAME_AVAIL_MASK(bus);
+	}
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE(bus, intstatus)) {
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus  &= ~FRAME_AVAIL_MASK(bus);
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+		bcmsdh_intr_enable(sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+	}
+
+#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
+	/* In case of SW-OOB(using edge trigger),
+	 * Check interrupt status in the dongle again after enable irq on the host.
+	 * and rechedule dpc if interrupt is pended in the dongle.
+	 * There is a chance to miss OOB interrupt while irq is disabled on the host.
+	 * No need to do this with HW-OOB(level trigger)
+	 */
+	R_SDREG(newstatus, &regs->intstatus, retries);
+	if (bcmsdh_regfail(bus->sdh))
+		newstatus = 0;
+	if (newstatus & bus->hostintmask) {
+		bus->ipend = TRUE;
+		resched = TRUE;
+	}
+#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
+#endif
+
+	if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
+		dhdsdio_sendpendctl(bus);
+
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+		framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+		framecnt = dhdsdio_sendfromq(bus, framecnt);
+		txlimit -= framecnt;
+	}
+	/* Resched the DPC if ctrl cmd is pending on bus credit */
+	if (bus->ctrl_frame_stat)
+		resched = TRUE;
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+			SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+			/* Bus failed because of KSO */
+			DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+			bus->kso = FALSE;
+		} else {
+			DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+				__FUNCTION__));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			bus->intstatus = 0;
+		}
+	} else if (bus->clkstate == CLK_PENDING) {
+		/* Awaiting I_CHIPACTIVE; don't resched */
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE(bus, bus->intstatus)) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+exit:
+
+	if (!resched && dhd_dpcpoll) {
+		if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0)
+			resched = TRUE;
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (!SLPAUTO_ENAB(bus)) {
+		if (bus->sleeping) {
+			DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+			return;
+		} else if (!KSO_ENAB(bus)) {
+			DHD_ERROR(("ISR in devsleep 1\n"));
+		}
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+	bcmsdh_intr_disable(sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhdsdio_dpc(bus);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+
+#endif /* defined(SDIO_ISR_THREAD) */
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+	ulong time_lapse;
+	uint sent_pkts;
+	uint rcvd_pkts;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d, rcvd %d, errors %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+
+		/* Print throughput stats only for constant length packet runs */
+		if (bus->pktgen_minlen == bus->pktgen_maxlen) {
+			time_lapse = jiffies - bus->pktgen_prev_time;
+			bus->pktgen_prev_time = jiffies;
+			sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
+			bus->pktgen_prev_sent = bus->pktgen_sent;
+			rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
+			bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
+
+			printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
+			  __FUNCTION__,
+			  (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
+			  (rcvd_pkts * bus->pktgen_len  / jiffies_to_msecs(time_lapse)) * 8);
+		}
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+			dhdsdio_sdtest_set(bus, bus->pktgen_total);
+		}
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			len = SDPCM_TEST_PKT_CNT_FLD_LEN;
+		} else {
+			len = bus->pktgen_len;
+		}
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (bus->pktgen_len >> 0);
+		*data++ = (bus->pktgen_len >> 8);
+
+		/* Write frame count in a 4 byte field adjucent to SDPCM test header for
+		 * burst mode
+		 */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			*data++ = (uint8)(bus->pktgen_count >> 0);
+			*data++ = (uint8)(bus->pktgen_count >> 8);
+			*data++ = (uint8)(bus->pktgen_count >> 16);
+			*data++ = (uint8)(bus->pktgen_count >> 24);
+		} else {
+
+			/* Then fill in the remainder -- N/A for burst */
+			for (fillbyte = 0; fillbyte < len; fillbyte++)
+				*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = (count > 0)?TRUE:FALSE;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+	*data++ = (uint8)(count >> 0);
+	*data++ = (uint8)(count >> 8);
+	*data++ = (uint8)(count >> 16);
+	*data++ = (uint8)(count >> 24);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+	DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		{
+			int i = 0;
+			uint8 *prn = data;
+			uint8 testval = extra;
+			for (i = 0; i < len; i++) {
+				if (*prn != testval) {
+					DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+						i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+					prn++; testval++;
+				}
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limit (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcvd_rcvsession++;
+
+			if (bus->pktgen_total &&
+				(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			DHD_ERROR(("Pktgen:rcv test complete!\n"));
+			bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+			dhdsdio_sdtest_set(bus, FALSE);
+				bus->pktgen_rcvd_rcvsession = 0;
+			}
+		}
+	}
+}
+#endif /* SDTEST */
+
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+	int err = 0;
+
+#if defined(OOB_INTR_ONLY)
+	err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+	return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_relax(dhdpub->bus->sdh);
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+	bool enabled = FALSE;
+
+	enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+	return enabled;
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	if (bus->dhd->hang_was_sent) {
+		dhd_os_wd_timer(bus->dhd, 0);
+		return FALSE;
+	}
+
+	/* Ignore the timer if simulating bus down */
+	if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+		return FALSE;
+
+	if (dhdp->busstate == DHD_BUS_DOWN)
+		return FALSE;
+
+	dhd_os_sdlock(bus->dhd);
+
+	/* Poll period: check device if appropriate. */
+	if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (SLPAUTO_ENAB(bus))
+				dhdsdio_bussleep(bus, FALSE);
+			else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		if (SLPAUTO_ENAB(bus))
+			dhdsdio_bussleep(bus, FALSE);
+		else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+#ifdef DHD_USE_IDLECOUNT
+	if (bus->activity)
+		bus->activity = FALSE;
+	else {
+		bus->idlecount++;
+
+		if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+			DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
+			if (SLPAUTO_ENAB(bus)) {
+				if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
+					dhd_os_wd_timer(bus->dhd, 0);
+			} else
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+			bus->idlecount = 0;
+		}
+	}
+#else
+	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				if (SLPAUTO_ENAB(bus)) {
+					if (!bus->readframes)
+						dhdsdio_bussleep(bus, TRUE);
+					else
+						bus->reqbussleep = TRUE;
+				}
+				else
+					dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+			}
+		}
+	}
+#endif /* DHD_USE_IDLECOUNT */
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Bump dongle by sending an empty packet on the event channel.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+		rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4325_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4329_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4315_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4319_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4336_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4330_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43237_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43362_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4314_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43242_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43340_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43341_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43143_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43342_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4334_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43239_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4324_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4335_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4339_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43349_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4345_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4350_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4354_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4356_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4358_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43430_CHIP_ID)
+		return TRUE;
+	if (BCM4349_CHIP(chipid))
+		return TRUE;
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
+		DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
+	}
+	else {
+		DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
+	}
+	mutex_lock(&_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif 
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+#if !defined(PLATFORM_MPS)
+	dhd_doflow = FALSE;
+#else
+	dhd_doflow = TRUE;
+#endif /* OEM_ANDROID */
+	dhd_dongle_ramsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			goto forcereturn;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case BCM4325_D11DUAL_ID:		/* 4325 802.11a/g id */
+		case BCM4325_D11G_ID:			/* 4325 802.11g 2.4Ghz band id */
+		case BCM4325_D11A_ID:			/* 4325 802.11a 5Ghz band id */
+			DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4329_D11N_ID:		/* 4329 802.11n dualband device */
+		case BCM4329_D11N2G_ID:		/* 4329 802.11n 2.4G device */
+		case BCM4329_D11N5G_ID:		/* 4329 802.11n 5G device */
+		case 0x4329:
+			DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4315_D11DUAL_ID:		/* 4315 802.11a/g id */
+		case BCM4315_D11G_ID:			/* 4315 802.11g id */
+		case BCM4315_D11A_ID:			/* 4315 802.11a id */
+			DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4319_D11N_ID:			/* 4319 802.11n id */
+		case BCM4319_D11N2G_ID:			/* 4319 802.11n2g id */
+		case BCM4319_D11N5G_ID:			/* 4319 802.11n5g id */
+			DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+			break;
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			goto forcereturn;
+	}
+
+	if (osh == NULL) {
+		DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+		goto forcereturn;
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->bus_num = bus_no;
+	bus->slot_num = slot;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+		bcmsdh_intr_disable(sdh);
+		if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+			DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+			           __FUNCTION__, ret));
+			goto fail;
+		}
+		DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+	} else {
+		DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+		           __FUNCTION__));
+	}
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+	/* if firmware path present try to download and bring up bus */
+	bus->dhd->hang_report  = TRUE;
+	if (dhd_download_fw_on_driverload) {
+		if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+			DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+				goto fail;
+		}
+	}
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&_dhd_sdio_mutex_lock_);
+	DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif 
+
+	init_waitqueue_head(&bus->bus_sleep);
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+
+forcereturn:
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&_dhd_sdio_mutex_lock_);
+	DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif 
+
+	return NULL;
+}
+
+#ifdef REGON_BP_HANG_FIX
+static int dhd_sdio_backplane_reset(struct dhd_bus *bus)
+{
+	uint32 temp = 0;
+	DHD_ERROR(("Resetting  the backplane to avoid failure in firmware download..\n"));
+
+	temp = bcmsdh_reg_read(bus->sdh, 0x180021e0, 4);
+	DHD_INFO(("SDIO Clk Control Reg = %x\n", temp));
+
+	/* Force HT req from PMU */
+	bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x6000005);
+
+	/* Increase the clock stretch duration. */
+	bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC8FFC8);
+
+	/* Setting ALP clock request in SDIOD clock control status register */
+	bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x41);
+
+	/* Allowing clock from SR engine to SR memory */
+	bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1);
+	/* Disabling SR Engine before SR binary download. */
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0);
+
+	/* Enabling clock from backplane to SR memory */
+	bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf9af1);
+
+	/* Initializing SR memory address register in SOCRAM */
+	bcmsdh_reg_write(bus->sdh, 0x18004408, 4, 0x0);
+
+	/* Downloading the SR binary */
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xc0002000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1051f080);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000604);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001604);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001404);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a08c80);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011404);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011604);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010604);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008);
+	bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xfc000000);
+	/* SR Binary Download complete */
+
+	/* Allowing clock from SR engine to SR memory */
+	bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1);
+
+	/* Turning ON SR Engine to initiate backplane reset  Repeated ?? Maharana */
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0);
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x2);
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x37);
+	bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
+	temp = bcmsdh_reg_read(bus->sdh, 0x18000654, 4);
+	DHD_INFO(("0x18000654 = %x\n", temp));
+	bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x800037);
+	OSL_DELAY(100000);
+	/* Rolling back the original values for clock stretch and PMU timers */
+	bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x0);
+	bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC800C8);
+	/* Removing ALP clock request in SDIOD clock control status register */
+	bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x40);
+	OSL_DELAY(10000);
+	return TRUE;
+}
+
+static int dhdsdio_sdio_hang_war(struct dhd_bus *bus)
+{
+	uint32 temp = 0, temp2 = 0, counter = 0, BT_pwr_up = 0, BT_ready = 0;
+	/* Removing reset of D11 Core */
+	bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x3);
+	bcmsdh_reg_write(bus->sdh, 0x18101800, 4, 0x0);
+	bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x1);
+	/* Reading CLB XTAL BT cntrl register */
+	bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0xD1);
+	bcmsdh_reg_write(bus->sdh, 0x180013DA, 2, 0x12);
+	bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0);
+	/* Read if BT is powered up */
+	temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2);
+	/* Read BT_ready from WLAN wireless register */
+	temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4);
+	/*
+	Check if the BT is powered up and ready. The duration between BT being powered up
+	and BT becoming ready is the problematic window for WLAN. If we move ahead at this
+	time then we may encounter a corrupted backplane later. So we wait for BT to be ready
+	and then proceed after checking the health of the backplane. If the backplane shows
+	indications of failure then we	have to do a full reset of the backplane using SR engine
+	and then proceed.
+	*/
+	(temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0);
+	(temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0);
+	DHD_ERROR(("WARNING: Checking if BT is ready BT_pwr_up = %x"
+		"BT_ready = %x \n", BT_pwr_up, BT_ready));
+	while (BT_pwr_up && !BT_ready)
+	{
+		OSL_DELAY(1000);
+		bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0);
+		temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2);
+		temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4);
+		(temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0);
+		(temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0);
+		counter++;
+		if (counter == 5000)
+		{
+			DHD_ERROR(("WARNING: Going ahead after 5 secs with"
+					"risk of failure because BT ready is not yet set\n"));
+			break;
+		}
+	}
+	DHD_ERROR(("\nWARNING: WL Proceeding BT_pwr_up = %x BT_ready = %x"
+			"\n", BT_pwr_up, BT_ready));
+	counter = 0;
+	OSL_DELAY(10000);
+	/*
+	Get the information of who accessed the crucial backplane entities
+	by reading read and write access registers
+	*/
+	DHD_TRACE(("%d: Read Value @ 0x18104808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810480C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x18106808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810680C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x18107808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810780C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x18108808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810880C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x18109808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810980C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810C808 = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4)));
+	DHD_TRACE(("%d: Read Value @ 0x1810C80C = %x."
+			"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4)));
+	counter = 0;
+	while ((bcmsdh_reg_read(bus->sdh, 0x18104808, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810480C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x18106808, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810680C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5) ||
+		(bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5))
+	{
+		if (++counter > 10)
+		{
+			DHD_ERROR(("Unable to recover the backkplane corruption"
+					"..Tried %d times.. Exiting\n", counter));
+			break;
+		}
+		OSL_DELAY(10000);
+		dhd_sdio_backplane_reset(bus);
+		/*
+		Get the information of who accessed the crucial backplane
+		entities by reading read and write access registers
+		*/
+		DHD_ERROR(("%d: Read Value @ 0x18104808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810480C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x18106808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810680C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x18107808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810780C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x18108808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810880C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x18109808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810980C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810C808 = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4)));
+		DHD_ERROR(("%d: Read Value @ 0x1810C80C = %x."
+				"\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4)));
+	}
+	/* Set the WL ready to indicate BT that we are done with backplane reset */
+	DHD_ERROR(("Setting up AXI_OK\n"));
+	bcmsdh_reg_write(bus->sdh, 0x18000658, 4, 0x3);
+	temp = bcmsdh_reg_read(bus->sdh, 0x1800065c, 4);
+	temp |= 0x80000000;
+	bcmsdh_reg_write(bus->sdh, 0x1800065c, 4, temp);
+	return TRUE;
+}
+#endif /* REGON_BP_HANG_FIX */
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                     uint16 devid)
+{
+	int err = 0;
+	uint8 clkctl = 0;
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+#if defined(DHD_DEBUG)
+	DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+		bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+#endif 
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+		DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+		           err, DHD_INIT_CLKCTL1, clkctl));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	if (DHD_INFO_ON()) {
+		uint fn, numfn;
+		uint8 *cis[SDIOD_MAX_IOFUNCS];
+		int err = 0;
+
+		numfn = bcmsdh_query_iofnum(sdh);
+		ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+		/* Make sure ALP is available before trying to read CIS */
+		SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+		                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+		          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+		/* Now request ALP be put on the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 DHD_INIT_CLKCTL2, &err);
+		OSL_DELAY(65);
+
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+			dhd_dump_cis(fn, cis[fn]);
+		}
+
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+
+		if (err) {
+			DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+			goto fail;
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
+		bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+#endif /* DHD_DEBUG */
+
+#ifdef REGON_BP_HANG_FIX
+	/* WAR - for 43241 B0-B1-B2. B3 onwards do not need this */
+	if (((uint16)bus->sih->chip == BCM4324_CHIP_ID) && (bus->sih->chiprev < 3))
+			dhdsdio_sdio_hang_war(bus);
+#endif /* REGON_BP_HANG_FIX */
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+	if (bus->sih->buscorerev >= 12)
+		dhdsdio_clk_kso_init(bus);
+	else
+		bus->kso = TRUE;
+
+	if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+	}
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+		} else {
+			/* cr4 has a different way to find the RAM size from TCM's */
+			if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+			/* also populate base address */
+			switch ((uint16)bus->sih->chip) {
+			case BCM4335_CHIP_ID:
+			case BCM4339_CHIP_ID:
+			case BCM43349_CHIP_ID:
+				bus->dongle_ram_base = CR4_4335_RAM_BASE;
+				break;
+			case BCM4350_CHIP_ID:
+			case BCM4354_CHIP_ID:
+			case BCM4356_CHIP_ID:
+			case BCM4358_CHIP_ID:
+				bus->dongle_ram_base = CR4_4350_RAM_BASE;
+				break;
+			case BCM4360_CHIP_ID:
+				bus->dongle_ram_base = CR4_4360_RAM_BASE;
+				break;
+			case BCM4345_CHIP_ID:
+				bus->dongle_ram_base = CR4_4345_RAM_BASE;
+				break;
+			case BCM4349_CHIP_GRPID:
+				bus->dongle_ram_base = CR4_4349_RAM_BASE;
+				break;
+			default:
+				bus->dongle_ram_base = 0;
+				DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+				           __FUNCTION__, bus->dongle_ram_base));
+			}
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_ramsize)
+			dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+		           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+		bus->srmemsize = si_socram_srmem_size(bus->sih);
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+	bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+	if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		(bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1))
+	{
+		uint32 val;
+
+		val = R_REG(osh, &bus->regs->corecontrol);
+		val &= ~CC_XMTDATAAVAIL_MODE;
+		val |= CC_XMTDATAAVAIL_CTRL;
+		W_REG(osh, &bus->regs->corecontrol, val);
+	}
+
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	/* Setting default Glom size */
+	bus->txglomsize = SDPCM_DEFGLOM_SIZE;
+
+	return TRUE;
+
+fail:
+	if (bus->sih != NULL) {
+		si_detach(bus->sih);
+		bus->sih = NULL;
+	}
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->_srenab = FALSE;
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+		dhdsdio_tune_fifoparam(bus);
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+#ifdef DHDENABLE_TAILPAD
+	if (bus->pad_pkt)
+		PKTFREE(osh, bus->pad_pkt, FALSE);
+	bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+	if (bus->pad_pkt == NULL)
+		DHD_ERROR(("failed to allocate padding packet\n"));
+	else {
+		int alignment_offset = 0;
+		uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+		if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+			PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+		PKTSETNEXT(osh, bus->pad_pkt, NULL);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+	bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+	/* TX first in dhdsdio_readframes() */
+	bus->dotxinrx = TRUE;
+
+	return TRUE;
+}
+
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+	return ret;
+}
+
+static int
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	int ret;
+
+
+	DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	/* Download the firmware */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	ret = _dhdsdio_download_firmware(bus);
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	bool dongle_isolation = FALSE;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+		}
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+			dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+#ifdef DHDENABLE_TAILPAD
+		if (bus->pad_pkt)
+			PKTFREE(osh, bus->pad_pkt, FALSE);
+#endif /* DHDENABLE_TAILPAD */
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+#if !defined(BCMLXSDMMC)
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		}
+		if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+			si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+		si_detach(bus->sih);
+		bus->sih = NULL;
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
+		DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
+	}
+	else {
+		DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
+	}
+	mutex_lock(&_dhd_sdio_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif 
+
+
+	if (bus) {
+		ASSERT(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&_dhd_sdio_mutex_lock_);
+	DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif /* LINUX */
+
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static int
+dhdsdio_suspend(void *context)
+{
+	int ret = 0;
+
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+	int wait_time = 0;
+	if (bus->idletime > 0) {
+		wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+	}
+
+	ret = dhd_os_check_wakelock(bus->dhd);
+	if ((!ret) && (bus->dhd->up)) {
+		if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+			if (!bus->sleeping) {
+				return 1;
+			}
+		}
+	}
+	return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+#if defined(OOB_INTR_ONLY)
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+
+	if (dhd_os_check_if_up(bus->dhd))
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+	return 0;
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect,
+	dhdsdio_suspend,
+	dhdsdio_resume
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+	return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+	bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *ularray = NULL;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)dlarray));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	if (ularray)
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+/*
+	EXAMPLE: nvram_array
+	nvram_arry format:
+	name=value
+	Use carriage return at the end of each assignment, and an empty string with
+	carriage return at the end of array.
+
+	For example:
+	unsigned char  nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+	Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+	Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+	bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+		len = strlen(bus->nvram_params);
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	int status;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry)
+{
+	int ret;
+	int i = 0;
+	int retries = 0;
+	bcmsdh_info_t *sdh;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	sdh = bus->sdh;
+	do {
+		ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+			pkt, complete, handle);
+
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret == BCME_NODEVICE) {
+			DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+		} else if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+				__FUNCTION__, ret));
+			bus->tx_sderrs++;
+			bus->f1regdata++;
+			bus->dhd->tx_errors++;
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				SFC_WF_TERM, NULL);
+			for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+					NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+					NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+		}
+	} while ((ret < 0) && retrydata && ++retries < max_retry);
+
+	return ret;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+	return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+	bus->dotxinrx = val;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			dhd_os_sdlock(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+			/* Clean up any pending IRQ */
+			dhd_enable_oob_intr(bus, FALSE);
+			bcmsdh_oob_intr_set(bus->sdh, FALSE);
+			bcmsdh_oob_intr_unregister(bus->sdh);
+#endif 
+
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_txglom_enable(dhdp, FALSE);
+			dhd_os_sdunlock(dhdp);
+
+			DHD_TRACE(("%s:  WLAN OFF DONE\n", __FUNCTION__));
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+				(uint32 *)SI_ENUM_BASE,
+				bus->cl_devid)) {
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+				    dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						dhd_enable_oob_intr(bus, TRUE);
+						bcmsdh_oob_intr_register(bus->sdh,
+							dhdsdio_isr, bus);
+						bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+
+						bus->dhd->dongle_reset = FALSE;
+						bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+						/* Restore flow control  */
+						dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif 
+						dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+						DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh,
+							TRUE, FALSE);
+					}
+				} else
+					bcmerror = BCME_SDIO_ERROR;
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+				dhd_os_sdunlock(dhdp);
+		} else {
+			bcmerror = BCME_SDIO_ERROR;
+			DHD_INFO(("%s called when dongle is not in reset\n",
+				__FUNCTION__));
+			DHD_INFO(("Will call dhd_bus_start instead\n"));
+			dhd_bus_resume(dhdp, 1);
+			if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+				DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+					__FUNCTION__, bcmerror));
+		}
+	}
+	return bcmerror;
+}
+
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+	return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+	return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return  bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+	*bus_type = bus->bus;
+	*bus_num = bus->bus_num;
+	*slot_num = bus->slot_num;
+	return 0;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+	return dhdsdio_membytes(bus, set, address, data, size);
+}
+
+#if defined(NDISVER) && (NDISVER >= 0x0630)
+void
+dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject)
+{
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject);
+}
+
+void
+dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp)
+{
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitfor_iodrain(dhdp->bus->sdh);
+}
+#endif /* (NDISVER) && (NDISVER >= 0x0630) */
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path)
+{
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+}
+
+int
+dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
+{
+	dhd_bus_t *bus = dhd->bus;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	if (sleep) {
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit) {
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+			return BCME_BUSY;
+		}
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	} else {
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+	return BCME_OK;
+}
+
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
+	}
+}
+
+#ifdef BCMSDIO
+int
+dhd_sr_config(dhd_pub_t *dhd, bool on)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (!bus->_srenab)
+		return -1;
+
+	return dhdsdio_clk_devsleep_iovar(bus, on);
+}
+
+uint16
+dhd_get_chipid(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (bus && bus->sih)
+		return (uint16)bus->sih->chip;
+	else
+		return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef DEBUGGER
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
+{
+	uint32 rval;
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rval;
+}
+
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
+{
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+	dhd_os_sdunlock(bus->dhd);
+}
+#endif /* DEBUGGER */
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time)
+{
+	if (!dhdp || !dhdp->bus) {
+		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+		return;
+	}
+	dhdp->bus->idletime = idle_time;
+}
+
+void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time)
+{
+	if (!dhdp || !dhdp->bus) {
+		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+		return;
+	}
+
+	if (!idle_time) {
+		DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__));
+		return;
+	}
+	*idle_time = dhdp->bus->idletime;
+}
+#endif /* SOFTAP_TPUT_ENHANCE */
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
new file mode 100644
index 0000000000000000000000000000000000000000..15844fbc67343ace9eb72a612e0cef692c962dc7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
@@ -0,0 +1,4074 @@
+/*
+ * DHD PROP_TXSTATUS Module.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_wlfc.c 490028 2014-07-09 05:58:25Z $
+ *
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
+
+
+#ifdef PROP_TXSTATUS
+
+#ifdef QMONITOR
+#define DHD_WLFC_QMON_COMPLETE(entry) dhd_qmon_txcomplete(&entry->qmon)
+#else
+#define DHD_WLFC_QMON_COMPLETE(entry)
+#endif /* QMONITOR */
+
+#define LIMIT_BORROW
+
+
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+	uint16 seq;
+
+	if (!p) {
+		return 0xffff;
+	}
+
+	seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	if (seq < current_seq) {
+		/* wrap around */
+		seq += 256;
+	}
+
+	return seq;
+}
+
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+	uint8 current_seq, bool reOrder)
+{
+	struct pktq_prec *q;
+	uint16 seq, seq2;
+	void *p2, *p2_prev;
+
+	if (!p)
+		return;
+
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	PKTSETLINK(p, NULL);
+	if (q->head == NULL) {
+		/* empty queue */
+		q->head = p;
+		q->tail = p;
+	} else {
+		if (reOrder && (prec & 1)) {
+			seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+			p2 = qHead ? q->head : q->tail;
+			seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+			if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+				/* need reorder */
+				p2 = q->head;
+				p2_prev = NULL;
+				seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+				while (seq > seq2) {
+					p2_prev = p2;
+					p2 = PKTLINK(p2);
+					if (!p2) {
+						break;
+					}
+					seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+				}
+
+				if (p2_prev == NULL) {
+					/* insert head */
+					PKTSETLINK(p, q->head);
+					q->head = p;
+				} else if (p2 == NULL) {
+					/* insert tail */
+					PKTSETLINK(p2_prev, p);
+					q->tail = p;
+				} else {
+					/* insert after p2_prev */
+					PKTSETLINK(p, PKTLINK(p2_prev));
+					PKTSETLINK(p2_prev, p);
+				}
+				goto exit;
+			}
+		}
+
+		if (qHead) {
+			PKTSETLINK(p, q->head);
+			q->head = p;
+		} else {
+			PKTSETLINK(q->tail, p);
+			q->tail = p;
+		}
+	}
+
+exit:
+
+	q->len++;
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+	a status comes back, suppress or otherwise.
+
+	hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+_dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+	int i;
+	wlfc_hanger_t* hanger;
+
+	/* allow only up to a specific size for now */
+	ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+	if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+		return NULL;
+
+	memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+	hanger->max_items = max_items;
+
+	for (i = 0; i < hanger->max_items; i++) {
+		hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+	return hanger;
+}
+
+static int
+_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+		return BCME_OK;
+	}
+	return BCME_BADARG;
+}
+
+static uint16
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+	uint32 i;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		i = h->slot_pos + 1;
+		if (i == h->max_items) {
+			i = 0;
+		}
+		while (i != h->slot_pos) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) {
+				h->slot_pos = i;
+				return (uint16)i;
+			}
+			i++;
+			if (i == h->max_items)
+				i = 0;
+		}
+		h->failed_slotfind++;
+	}
+	return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	*gen = 0xff;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if ((h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+			(h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+			*gen = h->items[slot_id].gen;
+		}
+		else {
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			h->items[slot_id].pkt = pkt;
+			h->items[slot_id].pkt_state = 0;
+			h->items[slot_id].pkt_txstatus = 0;
+			h->pushed++;
+		}
+		else {
+			h->failed_to_push++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+			*pktout = h->items[slot_id].pkt;
+			if (remove_from_hanger) {
+				h->items[slot_id].state =
+					WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[slot_id].pkt = NULL;
+				h->items[slot_id].gen = 0xff;
+				h->items[slot_id].identifier = 0;
+				h->popped++;
+			}
+		}
+		else {
+			h->failed_to_pop++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+	if (h) {
+		h->items[slot_id].gen = gen;
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+		}
+		else
+			rc = BCME_BADARG;
+	}
+	else
+		rc = BCME_BADARG;
+
+	return rc;
+}
+
+/* remove reference of specific packet in hanger */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
+{
+	int i;
+
+	if (!h || !pkt) {
+		return FALSE;
+	}
+
+	for (i = 0; i < h->max_items; i++) {
+		if (pkt == h->items[i].pkt) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[i].pkt = NULL;
+				h->items[i].gen = 0xff;
+				h->items[i].identifier = 0;
+			}
+			return TRUE;
+		}
+	}
+
+	return FALSE;
+}
+
+
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+	wlfc_mac_descriptor_t* entry;
+	uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+	if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[entry_idx];
+	else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pktq_penq(&entry->afq, prec, p);
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+	void **pktout)
+{
+	wlfc_mac_descriptor_t *entry;
+	struct pktq *pq;
+	struct pktq_prec *q;
+	void *p, *b;
+
+	if (!ctx) {
+		DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+		return BCME_BADARG;
+	}
+
+	if (pktout) {
+		*pktout = NULL;
+	}
+
+	ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+	if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[hslot];
+	else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pq = &entry->afq;
+
+	ASSERT(prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	b = NULL;
+	p = q->head;
+
+	while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+	{
+		b = p;
+		p = PKTLINK(p);
+	}
+
+	if (p == NULL) {
+		/* none is matched */
+		if (b) {
+			DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+		} else {
+			DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+		}
+
+		return BCME_ERROR;
+	}
+
+	if (!b) {
+		/* head packet is matched */
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		/* middle packet is matched */
+		DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+			WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+		ctx->stats.ooo_pkts[prec]++;
+		PKTSETLINK(b, PKTLINK(p));
+		if (PKTLINK(p) == NULL) {
+			q->tail = b;
+		}
+	}
+
+	q->len--;
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	if (pktout) {
+		*pktout = p;
+	}
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+	uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+	uint32 wl_pktinfo = 0;
+	uint8* wlh;
+	uint8 dataOffset = 0;
+	uint8 fillers;
+	uint8 tim_signal_len = 0;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	struct bdc_header *h;
+
+	if (skip_wlfc_hdr)
+		goto push_bdc_hdr;
+
+	if (tim_signal) {
+		tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+	}
+
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+	}
+
+	fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+	dataOffset += fillers;
+
+	PKTPUSH(ctx->osh, p, dataOffset);
+	wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+	wl_pktinfo = htol32(htodtag);
+
+	wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+	wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+	memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32));
+
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		uint16 wl_seqinfo = htol16(htodseq);
+		wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+		memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+			WLFC_CTL_VALUE_LEN_SEQ);
+	}
+
+	if (tim_signal_len) {
+		wlh[dataOffset - fillers - tim_signal_len ] =
+			WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 1] =
+			WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+		wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+	}
+	if (fillers)
+		memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
+
+	PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+	h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(p))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = dataOffset >> 2;
+	BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+	struct bdc_header *h;
+
+	if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+	h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+	/* pull BDC header */
+	PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+
+	if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
+		return BCME_ERROR;
+	}
+
+	/* pull wl-header */
+	PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+	return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+	int i;
+	wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+	uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+	uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+	wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
+	int iftype = ctx->destination_entries.interfaces[ifid].iftype;
+
+	/* saved one exists, return it */
+	if (entry)
+		return entry;
+
+	/* Multicast destination, STA and P2P clients get the interface entry.
+	 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+	 * have their own entry.
+	 */
+	if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+		iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+		(ctx->destination_entries.interfaces[ifid].occupied)) {
+			entry = &ctx->destination_entries.interfaces[ifid];
+	}
+
+	if (entry && ETHER_ISMULTI(dstn)) {
+		DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+		return entry;
+	}
+
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (table[i].occupied) {
+			if (table[i].interface_id == ifid) {
+				if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) {
+					entry = &table[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (entry == NULL)
+		entry = &ctx->destination_entries.other;
+
+	DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+	return entry;
+}
+
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+	athost_wl_status_info_t* ctx;
+	void *pout = NULL;
+
+	ASSERT(dhdp && p);
+	ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+		/* suppressed queue, need pop from hanger */
+		_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+					(PKTTAG(p))), &pout, TRUE);
+		ASSERT(p == pout);
+	}
+
+	if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* pkt in delayed q, so fake push BDC header for
+		 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+		 */
+		_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0, 0, 0, TRUE);
+
+		/* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+		 * This must be one of...
+		 * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+		 * in _dhd_wlfc_prec_enq_with_drop()
+		 * 2. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_enque_delayq().
+		 * 3. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_rollback_packet_toq().
+		 */
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+				" Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+	}
+
+	if (bPktInQ) {
+		ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+		ctx->pkt_cnt_per_ac[prec>>1]--;
+	}
+
+	ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+	ctx->stats.pktout++;
+	ctx->stats.drop_pkts[prec]++;
+
+	dhd_txcomplete(dhdp, p, FALSE);
+	PKTFREE(ctx->osh, p, TRUE);
+
+	return 0;
+}
+
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+	uint8 current_seq)
+{
+	void *p = NULL;
+	int eprec = -1;		/* precedence to evict from */
+	athost_wl_status_info_t* ctx;
+
+	ASSERT(dhdp && pq && pkt);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
+		goto exit;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(pq, prec))
+		eprec = prec;
+	else if (pktq_full(pq)) {
+		p = pktq_peek_tail(pq, &eprec);
+		if (!p) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		if ((eprec > prec) || (eprec < 0)) {
+			if (!pktq_pempty(pq, prec)) {
+				eprec = prec;
+			} else {
+				return FALSE;
+			}
+		}
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(pq, eprec));
+		/* Evict all fragmented frames */
+		dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+	}
+
+exit:
+	/* Enqueue */
+	_dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+		WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+	ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+	ctx->pkt_cnt_per_ac[prec>>1]++;
+
+	return TRUE;
+}
+
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+	void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+	/*
+	put the packet back to the head of queue
+
+	- suppressed packet goes back to suppress sub-queue
+	- pull out the header, if new or delayed packet
+
+	Note: hslot is used only when header removal is done.
+	*/
+	wlfc_mac_descriptor_t* entry;
+	int rc = BCME_OK;
+	int prec, fifo_id;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+	fifo_id = prec << 1;
+	if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+		fifo_id += 1;
+	if (entry != NULL) {
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the firmware (for pspoll etc.)
+		*/
+		if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
+			entry->requested_credit++;
+
+		if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+			/* decrement sequence count */
+			WLFC_DECR_SEQCOUNT(entry, prec);
+			/* remove header first */
+			rc = _dhd_wlfc_pullheader(ctx, p);
+			if (rc != BCME_OK) {
+				DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+				goto exit;
+			}
+		}
+
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+			WLFC_SEQCOUNT(entry, fifo_id>>1))
+			== FALSE) {
+			/* enque failed */
+			DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+				__FUNCTION__, __LINE__, fifo_id));
+			rc = BCME_ERROR;
+		}
+	} else {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		rc = BCME_ERROR;
+	}
+exit:
+	if (rc != BCME_OK) {
+		ctx->stats.rollback_failed++;
+		_dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+	}
+	else
+		ctx->stats.rollback++;
+
+	return rc;
+}
+
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+	int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+	for (prec = 0; prec < AC_COUNT; prec++) {
+		if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) {
+			if (ac_traffic == WLFC_NO_TRAFFIC)
+				ac_traffic = prec + 1;
+			else if (ac_traffic != (prec + 1))
+				ac_traffic = WLFC_MULTI_TRAFFIC;
+		}
+	}
+
+	if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+		/* single AC (BE/BK/VI/VO) in queue */
+		if (ctx->allow_fc) {
+			return TRUE;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (ctx->fc_defer_timestamp == 0) {
+				/* first signle ac scenario */
+				ctx->fc_defer_timestamp = curr_t;
+				return FALSE;
+			}
+
+			/* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+			delta = curr_t - ctx->fc_defer_timestamp;
+			if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+				ctx->allow_fc = TRUE;
+			}
+		}
+	} else {
+		/* multiple ACs or BCMC in queue */
+		ctx->allow_fc = FALSE;
+		ctx->fc_defer_timestamp = 0;
+	}
+
+	return ctx->allow_fc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+	dhd_pub_t *dhdp;
+
+	ASSERT(ctx);
+
+	dhdp = (dhd_pub_t *)ctx->dhdp;
+	ASSERT(dhdp);
+
+	if (dhdp->skip_fc && dhdp->skip_fc())
+		return;
+
+	if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
+		return;
+
+	if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+		/* start traffic */
+		ctx->hostif_flow_state[if_id] = OFF;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("F"));
+
+		dhd_txflowcontrol(dhdp, if_id, OFF);
+
+		ctx->toggle_host_if = 0;
+	}
+
+	if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+		/* stop traffic */
+		ctx->hostif_flow_state[if_id] = ON;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic   %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("N"));
+
+		dhd_txflowcontrol(dhdp, if_id, ON);
+
+		ctx->host_ifidx = if_id;
+		ctx->toggle_host_if = 1;
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 ta_bmp)
+{
+	int rc = BCME_OK;
+	void* p = NULL;
+	int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	if (dhdp->proptxstatus_txoff) {
+		rc = BCME_NORESOURCE;
+		return rc;
+	}
+
+	/* allocate a dummy packet */
+	p = PKTGET(ctx->osh, dummylen, TRUE);
+	if (p) {
+		PKTPULL(ctx->osh, p, dummylen);
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+		_dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+		DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+		ctx->stats.signal_only_pkts_sent++;
+#endif
+
+#if defined(BCMPCIE)
+		rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+		rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
+		if (rc != BCME_OK) {
+			_dhd_wlfc_pullheader(ctx, p);
+			PKTFREE(ctx->osh, p, TRUE);
+		}
+	}
+	else {
+		DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+		           __FUNCTION__, dummylen));
+		rc = BCME_NOMEM;
+	}
+	return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	int prec)
+{
+	bool rc = FALSE;
+
+	if (entry->state == WLFC_STATE_CLOSE) {
+		if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+			(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+			if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp & ~ NBITVAL(prec);
+			}
+		}
+		else {
+			if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp | NBITVAL(prec);
+			}
+		}
+	}
+	if (rc) {
+		/* request a TIM update to firmware at the next piggyback opportunity */
+		if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+			entry->send_tim_signal = 1;
+			_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+			entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+			entry->send_tim_signal = 0;
+		}
+		else {
+			rc = FALSE;
+		}
+	}
+	return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_NOTFOUND;
+	}
+	/*
+	- suppressed packets go to sub_queue[2*prec + 1] AND
+	- delayed packets go to sub_queue[2*prec + 0] to ensure
+	order of delivery.
+	*/
+	if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+		WLFC_SEQCOUNT(entry, prec))
+		== FALSE) {
+		ctx->stats.delayq_full_error++;
+		/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+		WLFC_DBGMESG(("s"));
+		return BCME_ERROR;
+	}
+
+	/* A packet has been pushed, update traffic availability bitmap, if applicable */
+	_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+	int rc = BCME_OK;
+	int hslot = WLFC_HANGER_MAXITEMS;
+	bool send_tim_update = FALSE;
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	uint8 free_ctr;
+	int gen = 0xff;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	*slot = hslot;
+
+	if (entry == NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+	}
+
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_ERROR;
+	}
+
+	if (entry->send_tim_signal) {
+		send_tim_update = TRUE;
+		entry->send_tim_signal = 0;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+
+	if (header_needed) {
+		if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+		} else {
+			hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+		}
+		gen = entry->generation;
+		free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	} else {
+		if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+		}
+
+		hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+		if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+			gen = entry->generation;
+		} else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		} else {
+			_dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+		}
+
+		free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		/* remove old header */
+		_dhd_wlfc_pullheader(ctx, p);
+	}
+
+	if (hslot >= WLFC_HANGER_MAXITEMS) {
+		DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+	WL_TXSTATUS_SET_HSLOT(htod, hslot);
+	WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+	WL_TXSTATUS_SET_GENERATION(htod, gen);
+	DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+	if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+		/*
+		Indicate that this packet is being sent in response to an
+		explicit request from the firmware side.
+		*/
+		WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+	} else {
+		WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+	}
+
+	rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+		entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+	if (rc == BCME_OK) {
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+		if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) {
+			/*
+			a new header was created for this packet.
+			push to hanger slot and scrub q. Since bus
+			send succeeded, increment seq number as well.
+			*/
+			rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+			if (rc == BCME_OK) {
+#ifdef PROP_TXSTATUS_DEBUG
+				((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+					OSL_SYSUPTIME();
+#endif
+			} else {
+				DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+					__FUNCTION__, rc));
+			}
+		}
+
+		if ((rc == BCME_OK) && header_needed) {
+			/* increment free running sequence count */
+			WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+		}
+	}
+	*slot = hslot;
+	return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, int prec)
+{
+	if (entry->interface_id >= WLFC_MAX_IFNUM) {
+		ASSERT(&ctx->destination_entries.other == entry);
+		return 1;
+	}
+	if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+		WLC_E_IF_ROLE_P2P_GO) {
+		/* - destination interface is of type p2p GO.
+		For a p2pGO interface, if the destination is OPEN but the interface is
+		CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+		destination-specific-credit left send packets. This is because the
+		firmware storing the destination-specific-requested packet in queue.
+		*/
+		if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+			(entry->requested_packet == 0)) {
+			return 0;
+		}
+	}
+	/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+	if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+		(entry->requested_packet == 0)) ||
+		(!(entry->ac_bitmap & (1 << prec)))) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+	uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+	bool only_no_credit)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+	wlfc_mac_descriptor_t* entry;
+	int total_entries;
+	void* p = NULL;
+	int i;
+
+	*entry_out = NULL;
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+
+	/* search all entries, include nodes as well as interfaces */
+	if (only_no_credit) {
+		total_entries = ctx->requested_entry_count;
+	} else {
+		total_entries = ctx->active_entry_count;
+	}
+
+	for (i = 0; i < total_entries; i++) {
+		if (only_no_credit) {
+			entry = ctx->requested_entry[i];
+		} else {
+			entry = ctx->active_entry_head;
+			/* move head to ensure fair round-robin */
+			ctx->active_entry_head = ctx->active_entry_head->next;
+		}
+		ASSERT(entry);
+
+		if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+			(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+			!(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
+			if (entry->state == WLFC_STATE_CLOSE) {
+				*ac_credit_spent = 0;
+			}
+
+			/* higher precedence will be picked up first,
+			 * i.e. suppressed packets before delayed ones
+			 */
+			p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+			*needs_hdr = 0;
+			if (p == NULL) {
+				if (entry->suppressed == TRUE) {
+					/* skip this entry */
+					continue;
+				}
+				/* De-Q from delay Q */
+				p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+				*needs_hdr = 1;
+			}
+
+			if (p != NULL) {
+				/* did the packet come from suppress sub-queue? */
+				if (entry->requested_credit > 0) {
+					entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+					entry->dstncredit_sent_packets++;
+#endif
+				} else if (entry->requested_packet > 0) {
+					entry->requested_packet--;
+					DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+				}
+
+				*entry_out = entry;
+				ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+				ctx->pkt_cnt_per_ac[prec]--;
+				_dhd_wlfc_flow_control_check(ctx, &entry->psq,
+					DHD_PKTTAG_IF(PKTTAG(p)));
+				/*
+				A packet has been picked up, update traffic
+				availability bitmap, if applicable
+				*/
+				_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+				return p;
+			}
+		}
+	}
+	return NULL;
+}
+
+static int
+_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	if (pktbuf != NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
+		if (entry == NULL) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return BCME_ERROR;
+		}
+
+		/*
+		- suppressed packets go to sub_queue[2*prec + 1] AND
+		- delayed packets go to sub_queue[2*prec + 0] to ensure
+		order of delivery.
+		*/
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+			FALSE, WLFC_SEQCOUNT(entry, prec))
+			== FALSE) {
+			WLFC_DBGMESG(("D"));
+			ctx->stats.delayq_full_error++;
+			return BCME_ERROR;
+		}
+
+#ifdef QMONITOR
+		dhd_qmon_tx(&entry->qmon);
+#endif
+
+		/*
+		A packet has been pushed, update traffic availability bitmap,
+		if applicable
+		*/
+		_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	}
+
+	return BCME_OK;
+}
+
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+	if (!p || !p_ifid)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+	if (!p || !entry)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+	dhd_pub_t *dhdp;
+
+	if (!wlfc || !pkt) {
+		return;
+	}
+
+	dhdp = (dhd_pub_t *)(wlfc->dhdp);
+	if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+		DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+		int lender, credit_returned = 0;
+		uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+		/* Note that borrower is fifo_id */
+		/* Return credits to highest priority lender first */
+		for (lender = AC_COUNT; lender >= 0; lender--) {
+			if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+				wlfc->FIFO_credit[lender]++;
+				wlfc->credits_borrowed[fifo_id][lender]--;
+				credit_returned = 1;
+				break;
+			}
+		}
+
+		if (!credit_returned) {
+			wlfc->FIFO_credit[fifo_id]++;
+		}
+	}
+}
+
+static void
+_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
+	int pkt_txstatus)
+{
+	wlfc_hanger_t* hanger;
+	wlfc_hanger_item_t* item;
+
+	if (!wlfc)
+		return;
+
+	hanger = (wlfc_hanger_t*)wlfc->hanger;
+	if (!hanger)
+		return;
+
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return;
+
+	item = &hanger->items[slot_id];
+	item->pkt_state |= pkt_state;
+	if (pkt_txstatus != -1) {
+		item->pkt_txstatus = pkt_txstatus;
+	}
+
+	if (item->pkt) {
+		if ((item->pkt_state & WLFC_HANGER_PKT_STATE_TXCOMPLETE) &&
+			(item->pkt_state & (WLFC_HANGER_PKT_STATE_TXSTATUS |
+			WLFC_HANGER_PKT_STATE_CLEANUP))) {
+			void *p = NULL;
+			void *pkt = item->pkt;
+			uint8 old_state = item->state;
+			int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE);
+			BCM_REFERENCE(ret);
+			BCM_REFERENCE(pkt);
+			ASSERT((ret == BCME_OK) && p && (pkt == p));
+
+			/* free packet */
+			if (!(item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS)) {
+				/* cleanup case */
+				wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, p);
+
+				ASSERT(entry);
+				entry->transit_count--;
+				if (entry->suppressed &&
+					(--entry->suppr_transit_count == 0)) {
+					entry->suppressed = FALSE;
+				}
+				_dhd_wlfc_return_implied_credit(wlfc, p);
+				wlfc->stats.cleanup_fw_cnt++;
+				/* slot not freeable yet */
+				item->state = old_state;
+			}
+
+			wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
+				[DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+			wlfc->stats.pktout++;
+			dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus);
+			PKTFREE(wlfc->osh, p, TRUE);
+		}
+	} else {
+		if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) {
+			/* free slot */
+			ASSERT(item->state != WLFC_HANGER_ITEM_STATE_FREE);
+			item->state = WLFC_HANGER_ITEM_STATE_FREE;
+		}
+	}
+}
+
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+	bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+	int prec;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	ASSERT(dhdp);
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+
+	for (prec = 0; prec < pq->num_prec; prec++) {
+		struct pktq_prec *q;
+		void *p, *prev = NULL;
+
+		q = &pq->q[prec];
+		p = q->head;
+		while (p) {
+			if (fn == NULL || (*fn)(p, arg)) {
+				bool head = (p == q->head);
+				if (head)
+					q->head = PKTLINK(p);
+				else
+					PKTSETLINK(prev, PKTLINK(p));
+				if (q_type == Q_TYPE_PSQ) {
+					if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+						_dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+					}
+					ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->pkt_cnt_per_ac[prec>>1]--;
+					ctx->stats.cleanup_psq_cnt++;
+					if (!(prec & 1)) {
+						/* pkt in delayed q, so fake push BDC header for
+						 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+						 */
+						_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0,
+							0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+						if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+							DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+								" Stop using it\n",
+								__FUNCTION__, __LINE__));
+							dhd_tcpack_suppress_set(dhdp,
+								TCPACK_SUP_OFF);
+						}
+#endif /* DHDTCPACK_SUPPRESS */
+					}
+				} else if (q_type == Q_TYPE_AFQ) {
+					wlfc_mac_descriptor_t* entry =
+						_dhd_wlfc_find_table_entry(ctx, p);
+					entry->transit_count--;
+					if (entry->suppressed &&
+						(--entry->suppr_transit_count == 0)) {
+						entry->suppressed = FALSE;
+					}
+					_dhd_wlfc_return_implied_credit(ctx, p);
+					ctx->stats.cleanup_fw_cnt++;
+				}
+				PKTSETLINK(p, NULL);
+				if (dir) {
+					ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->stats.pktout++;
+					dhd_txcomplete(dhdp, p, FALSE);
+				}
+				PKTFREE(ctx->osh, p, dir);
+
+				q->len--;
+				pq->len--;
+				p = (head ? q->head : PKTLINK(prev));
+			} else {
+				prev = p;
+				p = PKTLINK(p);
+			}
+		}
+
+		if (q->head == NULL) {
+			ASSERT(q->len == 0);
+			q->tail = NULL;
+		}
+
+	}
+
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+	wlfc_mac_descriptor_t* entry;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+			if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+				DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+					__FUNCTION__, __LINE__));
+				dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+			}
+#endif /* DHDTCPACK_SUPPRESS */
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+			!_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+			DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+				__FUNCTION__, pkt));
+		}
+		entry->transit_count--;
+		if (entry->suppressed &&
+			(--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+		_dhd_wlfc_return_implied_credit(wlfc, pkt);
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
+		wlfc->stats.pktout++;
+		wlfc->stats.cleanup_txq_cnt++;
+		dhd_txcomplete(dhd, pkt, FALSE);
+		PKTFREE(wlfc->osh, pkt, TRUE);
+	}
+}
+
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int i;
+	int total_entries;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+	wlfc->stats.cleanup_txq_cnt = 0;
+	wlfc->stats.cleanup_psq_cnt = 0;
+	wlfc->stats.cleanup_fw_cnt = 0;
+	/*
+	*  flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one
+	*/
+	/* flush bus->txq */
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+
+	/* flush psq, search all entries, include nodes as well as interfaces */
+	total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+	table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+	for (i = 0; i < total_entries; i++) {
+		if (table[i].occupied) {
+			/* release packets held in PSQ (both delayed and suppressed) */
+			if (table[i].psq.len) {
+				WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+					__FUNCTION__, i, table[i].psq.len));
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+					fn, arg, Q_TYPE_PSQ);
+			}
+
+			/* free packets held in AFQ */
+			if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+					fn, arg, Q_TYPE_AFQ);
+			}
+
+			if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+				table[i].occupied = 0;
+				if (table[i].transit_count || table[i].suppr_transit_count) {
+					DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+						__FUNCTION__, i,
+						table[i].transit_count,
+						table[i].suppr_transit_count));
+				}
+			}
+		}
+	}
+
+	/*
+		. flush remained pkt in hanger queue, not in bus->txq nor psq.
+		. the remained pkt was successfully downloaded to dongle already.
+		. hanger slot state cannot be set to free until receive txstatus update.
+	*/
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		for (i = 0; i < h->max_items; i++) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+					_dhd_wlfc_hanger_free_pkt(wlfc, i,
+						WLFC_HANGER_PKT_STATE_CLEANUP, FALSE);
+				}
+			}
+		}
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+	f_processpkt_t fn, void *arg)
+{
+	int rc = BCME_OK;
+
+#ifdef QMONITOR
+	dhd_qmon_reset(&entry->qmon);
+#endif
+
+	if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
+		entry->occupied = 1;
+		entry->state = WLFC_STATE_OPEN;
+		entry->requested_credit = 0;
+		entry->interface_id = ifid;
+		entry->iftype = iftype;
+		entry->ac_bitmap = 0xff; /* update this when handling APSD */
+		/* for an interface entry we may not care about the MAC address */
+		if (ea != NULL)
+			memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+
+		if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+			dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+			pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+			if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+			}
+
+			if (entry->next == NULL) {
+				/* not linked to anywhere, add to tail */
+				if (ctx->active_entry_head) {
+					entry->prev = ctx->active_entry_head->prev;
+					ctx->active_entry_head->prev->next = entry;
+					ctx->active_entry_head->prev = entry;
+					entry->next = ctx->active_entry_head;
+
+				} else {
+					ASSERT(ctx->active_entry_count == 0);
+					entry->prev = entry->next = entry;
+					ctx->active_entry_head = entry;
+				}
+				ctx->active_entry_count++;
+			} else {
+				DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+					(int)(entry - &ctx->destination_entries.nodes[0])));
+			}
+		}
+	} else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+		/* When the entry is deleted, the packets that are queued in the entry must be
+		   cleanup. The cleanup action should be before the occupied is set as 0.
+		*/
+		_dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
+		_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+		entry->occupied = 0;
+		entry->suppressed = 0;
+		entry->state = WLFC_STATE_CLOSE;
+		entry->requested_credit = 0;
+		entry->transit_count = 0;
+		entry->suppr_transit_count = 0;
+		memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
+
+		if (entry->next) {
+			/* not floating, remove from Q */
+			if (ctx->active_entry_count <= 1) {
+				/* last item */
+				ctx->active_entry_head = NULL;
+				ctx->active_entry_count = 0;
+			} else {
+				entry->prev->next = entry->next;
+				entry->next->prev = entry->prev;
+				if (entry == ctx->active_entry_head) {
+					ctx->active_entry_head = entry->next;
+				}
+				ctx->active_entry_count--;
+			}
+			entry->next = entry->prev = NULL;
+		} else {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		}
+	}
+	return rc;
+}
+
+#ifdef LIMIT_BORROW
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
+	bool bBorrowAll)
+{
+	int lender_ac, borrow_limit = 0;
+	int rc = -1;
+
+	if (ctx == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+
+	/* Borrow from lowest priority available AC (including BC/MC credits) */
+	for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+		if (!bBorrowAll) {
+			borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO;
+		} else {
+			borrow_limit = 0;
+		}
+
+		if (ctx->FIFO_credit[lender_ac] > borrow_limit) {
+			ctx->credits_borrowed[borrower_ac][lender_ac]++;
+			ctx->FIFO_credit[lender_ac]--;
+			rc = lender_ac;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+	if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+		(borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+		DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+			__FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+		return BCME_BADARG;
+	}
+
+	ctx->credits_borrowed[borrower_ac][lender_ac]--;
+	ctx->FIFO_credit[lender_ac]++;
+
+	return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* entry;
+
+	if (ifid >= WLFC_MAX_IFNUM)
+		return BCME_BADARG;
+
+	entry = &ctx->destination_entries.interfaces[ifid];
+
+	return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+		_dhd_wlfc_ifpkt_fn, &ifid);
+}
+
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	ctx->bcmc_credit_supported = TRUE;
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	int i;
+
+	for (i = 0; i <= 4; i++) {
+		if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+			DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+				__FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+		}
+	}
+
+	/* update the AC FIFO credit map */
+	ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+	ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+	ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+	ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+	ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+	ctx->Init_FIFO_credit[0] = credits[0];
+	ctx->Init_FIFO_credit[1] = credits[1];
+	ctx->Init_FIFO_credit[2] = credits[2];
+	ctx->Init_FIFO_credit[3] = credits[3];
+	ctx->Init_FIFO_credit[4] = credits[4];
+
+	/* credit for ATIM FIFO is not used yet. */
+	ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+    dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+	uint32 hslot;
+	int	rc;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+	/*
+		if ac_fifo_credit_spent = 0
+
+		This packet will not count against the FIFO credit.
+		To ensure the txstatus corresponding to this packet
+		does not provide an implied credit (default behavior)
+		mark the packet accordingly.
+
+		if ac_fifo_credit_spent = 1
+
+		This is a normal packet and it counts against the FIFO
+		credit count.
+	*/
+	DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+	rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+	     commit_info->needs_hdr, &hslot);
+
+	if (rc == BCME_OK) {
+		rc = fcommit(commit_ctx, commit_info->p);
+		if (rc == BCME_OK) {
+			uint8 gen = WL_TXSTATUS_GET_GENERATION(
+				DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+			ctx->stats.pkt2bus++;
+			if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+				ctx->stats.send_pkts[ac]++;
+				WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+			}
+
+			if (gen != commit_info->mac_entry->generation) {
+				/* will be suppressed back by design */
+				if (!commit_info->mac_entry->suppressed) {
+					commit_info->mac_entry->suppressed = TRUE;
+				}
+				commit_info->mac_entry->suppr_transit_count++;
+			}
+			commit_info->mac_entry->transit_count++;
+		} else if (commit_info->needs_hdr) {
+			if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				void *pout = NULL;
+				/* pop hanger for delayed packet */
+				_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+					DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE);
+				ASSERT(commit_info->p == pout);
+			}
+		}
+	} else {
+		ctx->stats.generic_error++;
+	}
+
+	if (rc != BCME_OK) {
+		/*
+		   pretx pkt process or bus commit has failed, rollback.
+		   - remove wl-header for a delayed packet
+		   - save wl-header header for suppressed packets
+		   - reset credit check flag
+		*/
+		_dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+		DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
+	}
+
+	return rc;
+}
+
+static uint8
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+	wlfc_mac_descriptor_t* table =
+		((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+	uint8 table_index;
+
+	if (ea != NULL) {
+		for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+			if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+				table[table_index].occupied)
+				return table_index;
+		}
+	}
+	return WLFC_MAC_DESC_ID_INVALID;
+}
+
+static int
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+	uint8 status_flag;
+	uint32 status;
+	int ret = BCME_OK;
+	int remove_from_hanger = 1;
+	void* pktbuf = NULL;
+	uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+	uint16 hslot;
+	wlfc_mac_descriptor_t* entry = NULL;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
+
+	memcpy(&status, pkt_info, sizeof(uint32));
+	status_flag = WL_TXSTATUS_GET_FLAGS(status);
+	hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+	hslot = WL_TXSTATUS_GET_HSLOT(status);
+	fifo_id = WL_TXSTATUS_GET_FIFO(status);
+	gen = WL_TXSTATUS_GET_GENERATION(status);
+
+	if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+		memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+		seq_fromfw = WL_SEQ_GET_FROMFW(seq);
+		seq_num = WL_SEQ_GET_NUM(seq);
+	}
+
+	wlfc->stats.txstatus_in += len;
+
+	if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+		wlfc->stats.d11_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+		wlfc->stats.wl_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+		wlfc->stats.wlc_tossed_pkts += len;
+	}
+
+	if (dhd->proptxstatus_txstatus_ignore) {
+		if (!remove_from_hanger) {
+			DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+		}
+		return BCME_OK;
+	}
+
+	while (count < len) {
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+		} else {
+			ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
+			if (!pktbuf) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
+				goto cont;
+			}
+		}
+
+		if ((ret != BCME_OK) || !pktbuf) {
+			goto cont;
+		}
+
+		/* set fifo_id to correct value because not all FW does that */
+		fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+
+		if (!remove_from_hanger) {
+			/* this packet was suppressed */
+			if (!entry->suppressed || (entry->generation != gen)) {
+				if (!entry->suppressed) {
+					entry->suppr_transit_count = entry->transit_count;
+					if (p_mac) {
+						*p_mac = entry;
+					}
+				} else {
+					DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+						gen, entry->generation));
+				}
+				entry->suppressed = TRUE;
+
+			}
+			entry->generation = gen;
+		}
+
+#ifdef PROP_TXSTATUS_DEBUG
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+		{
+			uint32 new_t = OSL_SYSUPTIME();
+			uint32 old_t;
+			uint32 delta;
+			old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
+
+			wlfc->stats.latency_sample_count++;
+			if (new_t > old_t)
+				delta = new_t - old_t;
+			else
+				delta = 0xffffffff + new_t - old_t;
+			wlfc->stats.total_status_latency += delta;
+			wlfc->stats.latency_most_recent = delta;
+
+			wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+			if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+				wlfc->stats.idx_delta = 0;
+		}
+#endif /* PROP_TXSTATUS_DEBUG */
+
+		/* pick up the implicit credit from this packet */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+			_dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+		} else {
+			/*
+			if this packet did not count against FIFO credit, it must have
+			taken a requested_credit from the destination entry (for pspoll etc.)
+			*/
+			if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+				entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+			entry->dstncredit_acks++;
+#endif
+		}
+
+		if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+			(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+			/* save generation bit inside packet */
+			WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+			if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+				WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+				WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+			}
+
+			ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+			if (ret != BCME_OK) {
+				/* delay q is full, drop this packet */
+				DHD_WLFC_QMON_COMPLETE(entry);
+				_dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
+			} else {
+				if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+					/* Mark suppressed to avoid a double free
+					during wlfc cleanup
+					*/
+					_dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+				}
+			}
+		} else {
+
+			DHD_WLFC_QMON_COMPLETE(entry);
+
+			if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE);
+			} else {
+				dhd_txcomplete(dhd, pktbuf, TRUE);
+				wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))]
+					[DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--;
+				wlfc->stats.pktout++;
+				/* free the packet */
+				PKTFREE(wlfc->osh, pktbuf, TRUE);
+			}
+		}
+		/* pkt back from firmware side */
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+
+cont:
+		hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+		}
+
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+			seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+		}
+
+		count++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+	int i;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+
+		/* update FIFO credits */
+		if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+		{
+			int lender; /* Note that borrower is i */
+
+			/* Return credits to highest priority lender first */
+			for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+				if (wlfc->credits_borrowed[i][lender] > 0) {
+					if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+						credits[i] -=
+							(uint8)wlfc->credits_borrowed[i][lender];
+						wlfc->FIFO_credit[lender] +=
+						    wlfc->credits_borrowed[i][lender];
+						wlfc->credits_borrowed[i][lender] = 0;
+					}
+					else {
+						wlfc->credits_borrowed[i][lender] -= credits[i];
+						wlfc->FIFO_credit[lender] += credits[i];
+						credits[i] = 0;
+					}
+				}
+			}
+
+			/* If we have more credits left over, these must belong to the AC */
+			if (credits[i] > 0) {
+				wlfc->FIFO_credit[i] += credits[i];
+			}
+
+			if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+				wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+			}
+		}
+	}
+
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* entry;
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	uint8	results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+	uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	bool bCreditUpdate = FALSE;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		/* fake a suppression txstatus */
+		htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+		WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+		WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+		memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+			if (WL_SEQ_GET_FROMDRV(htodseq)) {
+				WL_SEQ_SET_FROMFW(htodseq, 1);
+				WL_SEQ_SET_FROMDRV(htodseq, 0);
+			}
+			memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+				WLFC_CTL_VALUE_LEN_SEQ);
+		}
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, pkt);
+		}
+		_dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+		/* fake a fifo credit back */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+			credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+			bCreditUpdate = TRUE;
+		}
+	}
+
+	if (bCreditUpdate) {
+		_dhd_wlfc_fifocreditback_indicate(dhd, credits);
+	}
+}
+
+
+static int
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+{
+	uint32 timestamp;
+
+	(void)dhd;
+
+	bcopy(&value[2], &timestamp, sizeof(uint32));
+	DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+	(void)dhd;
+	(void)rssi;
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i == wlfc->requested_entry_count) {
+		/* no match entry found */
+		ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+		wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+	}
+}
+
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i < wlfc->requested_entry_count) {
+		/* found */
+		ASSERT(wlfc->requested_entry_count > 0);
+		wlfc->requested_entry_count--;
+		if (i != wlfc->requested_entry_count) {
+			wlfc->requested_entry[i] =
+				wlfc->requested_entry[wlfc->requested_entry_count];
+		}
+		wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+	}
+}
+
+static int
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	int rc;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 existing_index;
+	uint8 table_index;
+	uint8 ifid;
+	uint8* ea;
+
+	WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+		__FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+		((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+		WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+	table = wlfc->destination_entries.nodes;
+	table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+	ifid = value[1];
+	ea = &value[2];
+
+	_dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
+	if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+		existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+		if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+			(existing_index != table_index) && table[existing_index].occupied) {
+			/*
+			there is an existing different entry, free the old one
+			and move it to new index if necessary.
+			*/
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+				eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+				table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+				&table[existing_index]);
+		}
+
+		if (!table[table_index].occupied) {
+			/* this new MAC entry does not exist, create one */
+			table[table_index].mac_handle = value[0];
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+				eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+				wlfc->destination_entries.interfaces[ifid].iftype,
+				ea, NULL, NULL);
+		} else {
+			/* the space should have been empty, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+
+	if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+		if (table[table_index].occupied) {
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+					eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+					wlfc->destination_entries.interfaces[ifid].iftype,
+					ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+		} else {
+			/* the space should have been occupied, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+	BCM_REFERENCE(rc);
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle = value[0];
+	int i;
+
+	table = wlfc->destination_entries.nodes;
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+			desc->state = WLFC_STATE_OPEN;
+			desc->ac_bitmap = 0xff;
+			DHD_WLFC_CTRINC_MAC_OPEN(desc);
+			desc->requested_credit = 0;
+			desc->requested_packet = 0;
+			_dhd_wlfc_remove_requested_entry(wlfc, desc);
+		}
+		else {
+			desc->state = WLFC_STATE_CLOSE;
+			DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+			/*
+			Indicate to firmware if there is any traffic pending.
+			*/
+			for (i = 0; i < AC_COUNT; i++) {
+				_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+			}
+		}
+	}
+	else {
+		wlfc->stats.psmode_update_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 if_id = value[0];
+
+	if (if_id < WLFC_MAX_IFNUM) {
+		table = wlfc->destination_entries.interfaces;
+		if (table[if_id].occupied) {
+			if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+				table[if_id].state = WLFC_STATE_OPEN;
+				/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+			}
+			else {
+				table[if_id].state = WLFC_STATE_CLOSE;
+				/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+			}
+			return BCME_OK;
+		}
+	}
+	wlfc->stats.interface_update_failed++;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 credit;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	credit = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_credit = credit;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.credit_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 packet_count;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	packet_count = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_packet = packet_count;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.packet_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+	if (info_len) {
+		if (info_buf) {
+			bcopy(val, info_buf, len);
+			*info_len = len;
+		}
+		else
+			*info_len = 0;
+	}
+}
+
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+	bool rc = TRUE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rc =  FALSE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+	int i, rc = BCME_OK;
+	athost_wl_status_info_t* wlfc;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* allocate space to track txstatus propagated from firmware */
+	dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+	if (dhd->wlfc_state == NULL) {
+		rc = BCME_NOMEM;
+		goto exit;
+	}
+
+	/* initialize state space */
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+	/* remember osh & dhdp */
+	wlfc->osh = dhd->osh;
+	wlfc->dhdp = dhd;
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+		if (wlfc->hanger == NULL) {
+			MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+			dhd->wlfc_state = NULL;
+			rc = BCME_NOMEM;
+			goto exit;
+		}
+	}
+
+	dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+	/* default to check rx pkt */
+	if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
+		dhd->wlfc_rxpkt_chk = FALSE;
+	} else {
+		dhd->wlfc_rxpkt_chk = TRUE;
+	}
+
+
+	/* initialize all interfaces to accept traffic */
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		wlfc->hostif_flow_state[i] = OFF;
+	}
+
+	_dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+		eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+	wlfc->allow_credit_borrow = 0;
+	wlfc->single_ac = 0;
+	wlfc->single_ac_timestamp = 0;
+
+
+exit:
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+#ifdef SUPPORT_P2P_GO_PS
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+		return 0;
+	tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+
+	int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+		(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+		return 0;
+	tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+#endif /* SUPPORT_P2P_GO_PS */
+
+int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+	uint *reorder_info_len)
+{
+	uint8 type, len;
+	uint8* value;
+	uint8* tmpbuf;
+	uint16 remainder = (uint16)tlv_hdr_len;
+	uint16 processed = 0;
+	athost_wl_status_info_t* wlfc = NULL;
+	void* entry;
+
+	if ((dhd == NULL) || (pktbuf == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+		if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+			dhd_os_wlfc_unblock(dhd);
+			return WLFC_UNSUPPORTED;
+		}
+		wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	}
+
+	tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
+	if (remainder) {
+		while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+			type = tmpbuf[processed];
+			if (type == WLFC_CTL_TYPE_FILLER) {
+				remainder -= 1;
+				processed += 1;
+				continue;
+			}
+
+			len  = tmpbuf[processed + 1];
+			value = &tmpbuf[processed + 2];
+
+			if (remainder < (2 + len))
+				break;
+
+			remainder -= 2 + len;
+			processed += 2 + len;
+			entry = NULL;
+
+			DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+				__FUNCTION__, __LINE__, type, remainder, processed));
+
+			if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+				_dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+					reorder_info_len);
+
+			if (wlfc == NULL) {
+				ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+				if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+					type != WLFC_CTL_TYPE_TRANS_ID)
+					DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+					" type %d remainder %d processed %d\n",
+					__FUNCTION__, __LINE__, type, remainder, processed));
+				continue;
+			}
+
+			if (type == WLFC_CTL_TYPE_TXSTATUS) {
+				_dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+				uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+				if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+					compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+				}
+				_dhd_wlfc_compressed_txstatus_update(dhd, value,
+					value[compcnt_offset], &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+				_dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_RSSI)
+				_dhd_wlfc_rssi_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+				_dhd_wlfc_credit_request(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+				_dhd_wlfc_packet_request(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+				(type == WLFC_CTL_TYPE_MAC_CLOSE))
+				_dhd_wlfc_psmode_update(dhd, value, type);
+
+			else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+				(type == WLFC_CTL_TYPE_MACDESC_DEL))
+				_dhd_wlfc_mac_table_update(dhd, value, type);
+
+			else if (type == WLFC_CTL_TYPE_TRANS_ID)
+				_dhd_wlfc_dbg_senum_check(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+				(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+				_dhd_wlfc_interface_update(dhd, value, type);
+			}
+
+			if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+				/* suppress all packets for this mac entry from bus->txq */
+				_dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
+			}
+		}
+		if (remainder != 0 && wlfc) {
+			/* trouble..., something is not right */
+			wlfc->stats.tlv_parse_failed++;
+		}
+	}
+
+	if (wlfc)
+		wlfc->stats.dhd_hdrpulls++;
+
+	dhd_os_wlfc_unblock(dhd);
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+	bool need_toggle_host_if)
+{
+	int ac, single_ac = 0, rc = BCME_OK;
+	dhd_wlfc_commit_info_t  commit_info;
+	athost_wl_status_info_t* ctx;
+	int bus_retry_count = 0;
+
+	uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+	uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
+	uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+	bool no_credit = FALSE;
+
+	int lender;
+
+	if ((dhdp == NULL) || (fcommit == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		if (pktbuf) {
+			DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+		}
+		rc =  WLFC_UNSUPPORTED;
+		goto exit2;
+	}
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+
+	if (dhdp->proptxstatus_module_ignore) {
+		if (pktbuf) {
+			uint32 htod = 0;
+			WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+			_dhd_wlfc_pushheader(ctx, pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+			if (fcommit(commit_ctx, pktbuf))
+				PKTFREE(ctx->osh, pktbuf, TRUE);
+			rc = BCME_OK;
+		}
+		goto exit;
+	}
+
+	memset(&commit_info, 0, sizeof(commit_info));
+
+	/*
+	Commit packets for regular AC traffic. Higher priority first.
+	First, use up FIFO credits available to each AC. Based on distribution
+	and credits left, borrow from other ACs as applicable
+
+	-NOTE:
+	If the bus between the host and firmware is overwhelmed by the
+	traffic from host, it is possible that higher priority traffic
+	starves the lower priority queue. If that occurs often, we may
+	have to employ weighted round-robin or ucode scheme to avoid
+	low priority packet starvation.
+	*/
+
+	if (pktbuf) {
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+		ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+		/* en-queue the packets to respective queue. */
+		rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+		if (rc) {
+			_dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+		} else {
+			ctx->stats.pktin++;
+			ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+		}
+	}
+
+	for (ac = AC_COUNT; ac >= 0; ac--) {
+		if (dhdp->wlfc_rxpkt_chk) {
+			/* check rx packet */
+			uint32 curr_t = OSL_SYSUPTIME(), delta;
+
+			delta = curr_t - ctx->rx_timestamp[ac];
+			if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) {
+				rx_map |= (1 << ac);
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac] == 0) {
+			continue;
+		}
+		tx_map |= (1 << ac);
+		single_ac = ac + 1;
+		while (FALSE == dhdp->proptxstatus_txoff) {
+			/* packets from delayQ with less priority are fresh and
+			 * they'd need header and have no MAC entry
+			 */
+			no_credit = (ctx->FIFO_credit[ac] < 1);
+			if (dhdp->proptxstatus_credit_ignore ||
+				((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+				no_credit = FALSE;
+			}
+
+			lender = -1;
+#ifdef LIMIT_BORROW
+			if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) {
+				/* try borrow from lower priority */
+				lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE);
+				if (lender != -1) {
+					no_credit = FALSE;
+				}
+			}
+#endif
+			commit_info.needs_hdr = 1;
+			commit_info.mac_entry = NULL;
+			commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+				&(commit_info.ac_fifo_credit_spent),
+				&(commit_info.needs_hdr),
+				&(commit_info.mac_entry),
+				no_credit);
+			commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+				eWLFC_PKTTYPE_SUPPRESSED;
+
+			if (commit_info.p == NULL) {
+#ifdef LIMIT_BORROW
+				if (lender != -1) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				break;
+			}
+
+			if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) {
+				ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+			}
+			/* here we can ensure have credit or no credit needed */
+			rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit,
+				commit_ctx);
+
+			/* Bus commits may fail (e.g. flow control); abort after retries */
+			if (rc == BCME_OK) {
+				if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
+					ctx->FIFO_credit[ac]--;
+				}
+#ifdef LIMIT_BORROW
+				else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				if (lender != -1) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				bus_retry_count++;
+				if (bus_retry_count >= BUS_RETRIES) {
+					DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+					goto exit;
+				}
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac]) {
+			packets_map |= (1 << ac);
+		}
+	}
+
+	if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) {
+		/* nothing send out or remain in queue */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) {
+		/* only one tx ac exist and no higher rx ac */
+		if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+			ac = single_ac - 1;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (single_ac != ctx->single_ac) {
+				/* new single ac traffic (first single ac or different single ac) */
+				ctx->allow_credit_borrow = 0;
+				ctx->single_ac_timestamp = curr_t;
+				ctx->single_ac = (uint8)single_ac;
+				rc = BCME_OK;
+				goto exit;
+			}
+			/* same ac traffic, check if it lasts enough time */
+			delta = curr_t - ctx->single_ac_timestamp;
+
+			if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+				/* wait enough time, can borrow now */
+				ctx->allow_credit_borrow = 1;
+				ac = single_ac - 1;
+			} else {
+				rc = BCME_OK;
+				goto exit;
+			}
+		}
+	} else {
+		/* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+		ctx->allow_credit_borrow = 0;
+		ctx->single_ac_timestamp = 0;
+		ctx->single_ac = 0;
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (packets_map == 0) {
+		/* nothing to send, skip borrow */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* At this point, borrow all credits only for ac */
+	while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+		if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) {
+			break;
+		}
+#endif
+		commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+			&(commit_info.ac_fifo_credit_spent),
+			&(commit_info.needs_hdr),
+			&(commit_info.mac_entry),
+			FALSE);
+		if (commit_info.p == NULL) {
+			/* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			break;
+		}
+
+		commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+			eWLFC_PKTTYPE_SUPPRESSED;
+
+		rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+		     fcommit, commit_ctx);
+
+		/* Bus commits may fail (e.g. flow control); abort after retries */
+		if (rc == BCME_OK) {
+
+			if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+				ctx->FIFO_credit[ac]--;
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			}
+		} else {
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			bus_retry_count++;
+			if (bus_retry_count >= BUS_RETRIES) {
+				DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	if (need_toggle_host_if && ctx->toggle_host_if) {
+		ctx->toggle_host_if = 0;
+	}
+
+exit2:
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+	athost_wl_status_info_t* wlfc;
+	void* pout = NULL;
+	int rtn = BCME_OK;
+	if ((dhd == NULL) || (txp == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rtn = WLFC_UNSUPPORTED;
+		goto EXIT;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.signal_only_pkts_freed++;
+#endif
+		/* is this a signal-only packet? */
+		_dhd_wlfc_pullheader(wlfc, txp);
+		PKTFREE(wlfc->osh, txp, TRUE);
+		goto EXIT;
+	}
+
+	if (!success || dhd->proptxstatus_txstatus_ignore) {
+		wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+
+		WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+			__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+				DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE);
+			ASSERT(txp == pout);
+		}
+
+		/* indicate failure and free the packet */
+		dhd_txcomplete(dhd, txp, success);
+
+		/* return the credit, if necessary */
+		_dhd_wlfc_return_implied_credit(wlfc, txp);
+
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
+		wlfc->stats.pktout++;
+		PKTFREE(wlfc->osh, txp, TRUE);
+	} else {
+		/* bus confirmed pkt went to firmware side */
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, txp);
+		} else {
+			int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+			_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+				WLFC_HANGER_PKT_STATE_TXCOMPLETE, -1);
+		}
+	}
+
+EXIT:
+	dhd_os_wlfc_unblock(dhd);
+	return rtn;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable all signals & indicate host proptxstatus logic is active */
+	uint32 tlv, mode, fw_caps;
+	int ret = 0;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = TRUE;
+	dhd_os_wlfc_unblock(dhd);
+
+	tlv = WLFC_FLAGS_RSSI_SIGNALS |
+		WLFC_FLAGS_XONXOFF_SIGNALS |
+		WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+		WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+		WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+
+	/*
+	try to enable/disable signaling by sending "tlv" iovar. if that fails,
+	fallback to no flow control? Print a message for now.
+	*/
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	}
+
+	/* query caps */
+	ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+	if (ret > 0) {
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	}
+
+	if (ret >= 0) {
+		fw_caps = *((uint32 *)iovbuf);
+		mode = 0;
+		DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+		if (WLFC_IS_OLD_DEF(fw_caps)) {
+			/* enable proptxtstatus v2 by default */
+			mode = WLFC_MODE_AFQ;
+		} else {
+			WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+			WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+			WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+		}
+		ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+		if (ret > 0) {
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		}
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_mode = 0;
+	if (ret >= 0) {
+		if (WLFC_IS_OLD_DEF(mode)) {
+			WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+		} else {
+			dhd->wlfc_mode = mode;
+		}
+	}
+	DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_init)
+		dhd->plat_init((void *)dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable only ampdu hostreorder here */
+	uint32 tlv;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+	tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+			__FUNCTION__));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, tlv));
+	}
+
+	dhd_os_wlfc_block(dhd);
+	dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/* release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+	char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */
+	/* cleanup all psq related resources */
+	athost_wl_status_info_t* wlfc;
+	uint32 tlv = 0;
+	uint32 hostreorder = 0;
+	int ret = BCME_OK;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (!dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = FALSE;
+	dhd_os_wlfc_unblock(dhd);
+
+	/* query ampdu hostreorder */
+	bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	if (ret == BCME_OK)
+		hostreorder = *((uint32 *)iovbuf);
+	else {
+		hostreorder = 0;
+		DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+	}
+
+	if (hostreorder) {
+		tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+			__FUNCTION__, __LINE__));
+	}
+
+	/* Disable proptxtstatus signaling for deinit */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	if (ret == BCME_OK) {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, __LINE__,
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	} else
+		DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+#ifdef PROP_TXSTATUS_DEBUG
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+	{
+		int i;
+		wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+				WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+					__FUNCTION__, i, h->items[i].pkt,
+					DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+			}
+		}
+	}
+#endif
+
+	_dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		/* delete hanger */
+		_dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+	}
+
+
+	/* free top structure */
+	MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+	dhd->wlfc_state = NULL;
+	dhd->proptxstatus_mode = hostreorder ?
+		WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_deinit)
+		dhd->plat_deinit((void *)dhd);
+	return BCME_OK;
+}
+
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+	dhd_os_wlfc_unblock(dhdp);
+
+	return rc;
+}
+
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	int i;
+	uint8* ea;
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* h;
+	wlfc_mac_descriptor_t* mac_table;
+	wlfc_mac_descriptor_t* interfaces;
+	char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+	if (!dhdp || !strbuf) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	if (h == NULL) {
+		bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+	}
+
+	mac_table = wlfc->destination_entries.nodes;
+	interfaces = wlfc->destination_entries.interfaces;
+	bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+		h = (wlfc_hanger_t*)wlfc->hanger;
+		if (h == NULL) {
+			bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+		} else {
+			bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+				"f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+				h->pushed,
+				h->popped,
+				h->failed_to_push,
+				h->failed_to_pop,
+				h->failed_slotfind,
+				(h->pushed - h->popped));
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+		"(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+		wlfc->stats.tlv_parse_failed,
+		wlfc->stats.credit_request_failed,
+		wlfc->stats.mac_update_failed,
+		wlfc->stats.psmode_update_failed,
+		wlfc->stats.delayq_full_error,
+		wlfc->stats.rollback_failed);
+
+	bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+		"(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+		"AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+		wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+		wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+		wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+		wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+		wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+		wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+		wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+		wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+		wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+		wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		if (interfaces[i].occupied) {
+			char* iftype_desc;
+
+			if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+				iftype_desc = "<Unknown";
+			else
+				iftype_desc = iftypes[interfaces[i].iftype];
+
+			ea = interfaces[i].ea;
+			bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
+				"netif_flow_control:%s\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				interfaces[i].interface_id,
+				iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+				? " OFF":" ON"));
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				interfaces[i].psq.len,
+				((interfaces[i].state ==
+				WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+				interfaces[i].requested_credit,
+				interfaces[i].transit_count, interfaces[i].suppr_transit_count);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				interfaces[i].psq.q[0].len,
+				interfaces[i].psq.q[1].len,
+				interfaces[i].afq.q[0].len,
+				interfaces[i].psq.q[2].len,
+				interfaces[i].psq.q[3].len,
+				interfaces[i].afq.q[1].len,
+				interfaces[i].psq.q[4].len,
+				interfaces[i].psq.q[5].len,
+				interfaces[i].afq.q[2].len,
+				interfaces[i].psq.q[6].len,
+				interfaces[i].psq.q[7].len,
+				interfaces[i].afq.q[3].len,
+				interfaces[i].psq.q[8].len,
+				interfaces[i].psq.q[9].len,
+				interfaces[i].afq.q[4].len);
+		}
+	}
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (mac_table[i].occupied) {
+			ea = mac_table[i].ea;
+			bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				mac_table[i].interface_id);
+
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				mac_table[i].psq.len,
+				((mac_table[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				mac_table[i].requested_credit,
+				mac_table[i].transit_count, mac_table[i].suppr_transit_count);
+#ifdef PROP_TXSTATUS_DEBUG
+			bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+				i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				mac_table[i].psq.q[0].len,
+				mac_table[i].psq.q[1].len,
+				mac_table[i].afq.q[0].len,
+				mac_table[i].psq.q[2].len,
+				mac_table[i].psq.q[3].len,
+				mac_table[i].afq.q[1].len,
+				mac_table[i].psq.q[4].len,
+				mac_table[i].psq.q[5].len,
+				mac_table[i].afq.q[2].len,
+				mac_table[i].psq.q[6].len,
+				mac_table[i].psq.q[7].len,
+				mac_table[i].afq.q[3].len,
+				mac_table[i].psq.q[8].len,
+				mac_table[i].psq.q[9].len,
+				mac_table[i].afq.q[4].len);
+
+		}
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int avg;
+		int moving_avg = 0;
+		int moving_samples;
+
+		if (wlfc->stats.latency_sample_count) {
+			moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+			for (i = 0; i < moving_samples; i++)
+				moving_avg += wlfc->stats.deltas[i];
+			moving_avg /= moving_samples;
+
+			avg = (100 * wlfc->stats.total_status_latency) /
+				wlfc->stats.latency_sample_count;
+			bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+				"(%d.%d, %03d, %03d)\n",
+				moving_samples, avg/100, (avg - (avg/100)*100),
+				wlfc->stats.latency_most_recent,
+				moving_avg);
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+		"back = (%d,%d,%d,%d,%d,%d)\n",
+		wlfc->stats.fifo_credits_sent[0],
+		wlfc->stats.fifo_credits_sent[1],
+		wlfc->stats.fifo_credits_sent[2],
+		wlfc->stats.fifo_credits_sent[3],
+		wlfc->stats.fifo_credits_sent[4],
+		wlfc->stats.fifo_credits_sent[5],
+
+		wlfc->stats.fifo_credits_back[0],
+		wlfc->stats.fifo_credits_back[1],
+		wlfc->stats.fifo_credits_back[2],
+		wlfc->stats.fifo_credits_back[3],
+		wlfc->stats.fifo_credits_back[4],
+		wlfc->stats.fifo_credits_back[5]);
+	{
+		uint32 fifo_cr_sent = 0;
+		uint32 fifo_cr_acked = 0;
+		uint32 request_cr_sent = 0;
+		uint32 request_cr_ack = 0;
+		uint32 bc_mc_cr_ack = 0;
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+			fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+		}
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+			fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+		}
+
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_sent +=
+					wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_sent +=
+				wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.nodes[i].dstncredit_acks;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.interfaces[i].dstncredit_acks;
+			}
+		}
+		bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+			"other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+			fifo_cr_sent, fifo_cr_acked,
+			request_cr_sent, request_cr_ack,
+			wlfc->destination_entries.other.dstncredit_acks,
+			bc_mc_cr_ack,
+			wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
+		"(freed,free_err,rollback)) = "
+		"((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.pktin,
+		wlfc->stats.pkt2bus,
+		wlfc->stats.txstatus_in,
+		wlfc->stats.dhd_hdrpulls,
+		wlfc->stats.pktout,
+
+		wlfc->stats.pktdropped,
+		wlfc->stats.wlfc_header_only_pkt,
+		wlfc->stats.wlc_tossed_pkts,
+
+		wlfc->stats.pkt_freed,
+		wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+	bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+		"((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.d11_suppress,
+		wlfc->stats.wl_suppress,
+		wlfc->stats.bad_suppress,
+
+		wlfc->stats.psq_d11sup_enq,
+		wlfc->stats.psq_wlsup_enq,
+		wlfc->stats.psq_hostq_enq,
+		wlfc->stats.mac_handle_notfound,
+
+		wlfc->stats.psq_d11sup_retx,
+		wlfc->stats.psq_wlsup_retx,
+		wlfc->stats.psq_hostq_retx);
+
+	bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+		wlfc->stats.cleanup_txq_cnt,
+		wlfc->stats.cleanup_psq_cnt,
+		wlfc->stats.cleanup_fw_cnt);
+
+	bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+			wlfc->pkt_cnt_in_q[i][0],
+			wlfc->pkt_cnt_in_q[i][1],
+			wlfc->pkt_cnt_in_q[i][2],
+			wlfc->pkt_cnt_in_q[i][3],
+			wlfc->pkt_cnt_in_q[i][4]);
+	}
+	bcm_bprintf(strbuf, "\n");
+
+	dhd_os_wlfc_unblock(dhdp);
+	return BCME_OK;
+}
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* hanger;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+		hanger->pushed = 0;
+		hanger->popped = 0;
+		hanger->failed_slotfind = 0;
+		hanger->failed_to_pop = 0;
+		hanger->failed_to_push = 0;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_enabled;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->wlfc_state) {
+		dhd->proptxstatus_mode = val & 0xff;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
+{
+	athost_wl_status_info_t* wlfc;
+	bool rc = FALSE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return FALSE;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+		wlfc->stats.wlfc_header_only_pkt++;
+		rc = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_block(dhdp);
+	}
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+		dhdp->proptxstatus_module_ignore) {
+		if (bAcquireLock) {
+			dhd_os_wlfc_unblock(dhdp);
+		}
+		return WLFC_UNSUPPORTED;
+	}
+
+	if (state != dhdp->proptxstatus_txoff) {
+		dhdp->proptxstatus_txoff = state;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_unblock(dhdp);
+	}
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
+{
+	athost_wl_status_info_t* wlfc;
+	int rx_path_ac = -1;
+
+	if ((dhd == NULL) || (prio >= NUMPRIO)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_rxpkt_chk) {
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	rx_path_ac = prio2fifo[prio];
+	wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME();
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_module_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+	bool bChanged = FALSE;
+
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if ((bool)val != dhd->proptxstatus_module_ignore) {
+		dhd->proptxstatus_module_ignore = (val != 0);
+		/* force txstatus_ignore sync with proptxstatus_module_ignore */
+		dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+		if (FALSE == dhd->proptxstatus_module_ignore) {
+			tlv = WLFC_FLAGS_RSSI_SIGNALS |
+				WLFC_FLAGS_XONXOFF_SIGNALS |
+				WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+				WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+		}
+		/* always enable host reorder */
+		tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		bChanged = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (bChanged) {
+		/* select enable proptxtstatus signaling */
+		bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+			DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+		else {
+			DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+	}
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_credit_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_credit_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_txstatus_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_rxpkt_chk;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_rxpkt_chk = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+#endif /* PROP_TXSTATUS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 0000000000000000000000000000000000000000..e1f748a23321b0b34ee11ac3b3c7a4b3e6e11bd3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,505 @@
+/*
+* $Copyright Open 2009 Broadcom Corporation$
+* $Id: dhd_wlfc.h 490028 2014-07-09 05:58:25Z $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+#ifdef QMONITOR
+#include <dhd_qmon.h>
+#endif
+
+/* #define OOO_DEBUG */
+
+#define WLFC_UNSUPPORTED -9999
+
+#define WLFC_NO_TRAFFIC	-1
+#define WLFC_MULTI_TRAFFIC 0
+
+#define BUS_RETRIES 1	/* # of retries before aborting a bus tx operation */
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
+
+#define WLFC_HANGER_ITEM_STATE_FREE			1
+#define WLFC_HANGER_ITEM_STATE_INUSE			2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED		3
+
+#define WLFC_HANGER_PKT_STATE_TXSTATUS			1
+#define WLFC_HANGER_PKT_STATE_TXCOMPLETE		2
+#define WLFC_HANGER_PKT_STATE_CLEANUP			4
+
+typedef enum {
+	Q_TYPE_PSQ,
+	Q_TYPE_AFQ
+} q_type_t;
+
+typedef enum ewlfc_packet_state {
+	eWLFC_PKTTYPE_NEW,
+	eWLFC_PKTTYPE_DELAYED,
+	eWLFC_PKTTYPE_SUPPRESSED,
+	eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+	eWLFC_MAC_ENTRY_ACTION_ADD,
+	eWLFC_MAC_ENTRY_ACTION_DEL,
+	eWLFC_MAC_ENTRY_ACTION_UPDATE,
+	eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+	uint8	state;
+	uint8   gen;
+	uint8	pkt_state;
+	uint8	pkt_txstatus;
+	uint32	identifier;
+	void*	pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32	push_time;
+#endif
+	struct wlfc_hanger_item *next;
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+	int max_items;
+	uint32 pushed;
+	uint32 popped;
+	uint32 failed_to_push;
+	uint32 failed_to_pop;
+	uint32 failed_slotfind;
+	uint32 slot_pos;
+	wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n)	((sizeof(wlfc_hanger_t) - \
+	sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN		1
+#define WLFC_STATE_CLOSE	2
+
+#define WLFC_PSQ_PREC_COUNT		((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT		(AC_COUNT + 1)
+
+#define WLFC_PSQ_LEN			2048
+
+#define WLFC_FLOWCONTROL_HIWATER	(2048 - 256)
+#define WLFC_FLOWCONTROL_LOWATER	256
+
+#define WLFC_LOG_BUF_SIZE		(1024*1024)
+
+typedef struct wlfc_mac_descriptor {
+	uint8 occupied;
+	uint8 interface_id;
+	uint8 iftype;
+	uint8 state;
+	uint8 ac_bitmap; /* for APSD */
+	uint8 requested_credit;
+	uint8 requested_packet;
+	uint8 ea[ETHER_ADDR_LEN];
+	/*
+	maintain (MAC,AC) based seq count for
+	packets going to the device. As well as bc/mc.
+	*/
+	uint8 seq[AC_COUNT + 1];
+	uint8 generation;
+	struct pktq	psq;
+	/* packets at firmware */
+	struct pktq	afq;
+	/* The AC pending bitmap that was reported to the fw at last change */
+	uint8 traffic_lastreported_bmp;
+	/* The new AC pending bitmap */
+	uint8 traffic_pending_bmp;
+	/* 1= send on next opportunity */
+	uint8 send_tim_signal;
+	uint8 mac_handle;
+	/* Number of packets at dongle for this entry. */
+	uint transit_count;
+	/* Numbe of suppression to wait before evict from delayQ */
+	uint suppr_transit_count;
+	/* flag. TRUE when in suppress state */
+	uint8 suppressed;
+
+#ifdef QMONITOR
+	dhd_qmon_t qmon;
+#endif /* QMONITOR */
+
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32 dstncredit_sent_packets;
+	uint32 dstncredit_acks;
+	uint32 opened_ct;
+	uint32 closed_ct;
+#endif
+	struct wlfc_mac_descriptor* prev;
+	struct wlfc_mac_descriptor* next;
+} wlfc_mac_descriptor_t;
+
+typedef struct dhd_wlfc_commit_info {
+	uint8					needs_hdr;
+	uint8					ac_fifo_credit_spent;
+	ewlfc_packet_state_t	pkt_type;
+	wlfc_mac_descriptor_t*	mac_entry;
+	void*					p;
+} dhd_wlfc_commit_info_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+	entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+	uint32	pktin;
+	uint32	pktout;
+	uint32	pkt2bus;
+	uint32	pktdropped;
+	uint32	tlv_parse_failed;
+	uint32	rollback;
+	uint32	rollback_failed;
+	uint32	delayq_full_error;
+	uint32	credit_request_failed;
+	uint32	packet_request_failed;
+	uint32	mac_update_failed;
+	uint32	psmode_update_failed;
+	uint32	interface_update_failed;
+	uint32	wlfc_header_only_pkt;
+	uint32	txstatus_in;
+	uint32	d11_suppress;
+	uint32	wl_suppress;
+	uint32	bad_suppress;
+	uint32	pkt_freed;
+	uint32	pkt_free_err;
+	uint32	psq_wlsup_retx;
+	uint32	psq_wlsup_enq;
+	uint32	psq_d11sup_retx;
+	uint32	psq_d11sup_enq;
+	uint32	psq_hostq_retx;
+	uint32	psq_hostq_enq;
+	uint32	mac_handle_notfound;
+	uint32	wlc_tossed_pkts;
+	uint32	dhd_hdrpulls;
+	uint32	generic_error;
+	/* an extra one for bc/mc traffic */
+	uint32	send_pkts[AC_COUNT + 1];
+	uint32	drop_pkts[WLFC_PSQ_PREC_COUNT];
+	uint32	ooo_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+	/* all pkt2bus -> txstatus latency accumulated */
+	uint32	latency_sample_count;
+	uint32	total_status_latency;
+	uint32	latency_most_recent;
+	int	idx_delta;
+	uint32	deltas[10];
+	uint32	fifo_credits_sent[6];
+	uint32	fifo_credits_back[6];
+	uint32	dropped_qfull[6];
+	uint32	signal_only_pkts_sent;
+	uint32	signal_only_pkts_freed;
+#endif
+	uint32	cleanup_txq_cnt;
+	uint32	cleanup_psq_cnt;
+	uint32	cleanup_fw_cnt;
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+	(ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE				0
+#define WLFC_FCMODE_IMPLIED_CREDIT		1
+#define WLFC_FCMODE_EXPLICIT_CREDIT		2
+#define WLFC_ONLY_AMPDU_HOSTREORDER		3
+
+/* Reserved credits ratio when borrowed by hihger priority */
+#define WLFC_BORROW_LIMIT_RATIO		4
+
+/* How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/* How long to detect occurance per AC in miliseconds */
+#define WLFC_RX_DETECTION_THRESHOLD_MS	100
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+typedef struct athost_wl_status_info {
+	uint8	last_seqid_to_wlc;
+
+	/* OSL handle */
+	osl_t*	osh;
+	/* dhd pub */
+	void*	dhdp;
+
+	/* stats */
+	athost_wl_stat_counters_t stats;
+
+	int		Init_FIFO_credit[AC_COUNT + 2];
+
+	/* the additional ones are for bc/mc and ATIM FIFO */
+	int		FIFO_credit[AC_COUNT + 2];
+
+	/* Credit borrow counts for each FIFO from each of the other FIFOs */
+	int		credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+	/* packet hanger and MAC->handle lookup table */
+	void*	hanger;
+	struct {
+		/* table for individual nodes */
+		wlfc_mac_descriptor_t	nodes[WLFC_MAC_DESC_TABLE_SIZE];
+		/* table for interfaces */
+		wlfc_mac_descriptor_t	interfaces[WLFC_MAX_IFNUM];
+		/* OS may send packets to unknown (unassociated) destinations */
+		/* A place holder for bc/mc and packets to unknown destinations */
+		wlfc_mac_descriptor_t	other;
+	} destination_entries;
+
+	wlfc_mac_descriptor_t *active_entry_head;
+	int active_entry_count;
+
+	wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+	int requested_entry_count;
+
+	/* pkt counts for each interface and ac */
+	int	pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+	int	pkt_cnt_per_ac[AC_COUNT+1];
+	int	pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
+	uint8	allow_fc;
+	uint32  fc_defer_timestamp;
+	uint32	rx_timestamp[AC_COUNT+1];
+	/* ON/OFF state for flow control to the host network interface */
+	uint8	hostif_flow_state[WLFC_MAX_IFNUM];
+	uint8	host_ifidx;
+	/* to flow control an OS interface */
+	uint8	toggle_host_if;
+
+	/* To borrow credits */
+	uint8   allow_credit_borrow;
+
+	/* ac number for the first single ac traffic */
+	uint8	single_ac;
+
+	/* Timestamp for the first single ac traffic */
+	uint32  single_ac_timestamp;
+
+	bool	bcmc_credit_supported;
+
+} athost_wl_status_info_t;
+
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+	/*
+	b[15]  - 1 = wlfc packet
+	b[14:13]  - encryption exemption
+	b[12 ] - 1 = event channel
+	b[11 ] - 1 = this packet was sent in response to one time packet request,
+	do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+	b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+	b[9  ] - 1 = packet is host->firmware (transmit direction)
+	       - 0 = packet received from firmware (firmware->host)
+	b[8  ] - 1 = packet was sent due to credit_request (pspoll),
+	             packet does not count against FIFO credit.
+	       - 0 = normal transaction, packet counts against FIFO credit
+	b[7  ] - 1 = AP, 0 = STA
+	b[6:4] - AC FIFO number
+	b[3:0] - interface index
+	*/
+	uint16	if_flags;
+	/* destination MAC address for this packet so that not every
+	module needs to open the packet to find this
+	*/
+	uint8	dstn_ether[ETHER_ADDR_LEN];
+	/*
+	This 32-bit goes from host to device for every packet.
+	*/
+	uint32	htod_tag;
+
+	/*
+	This 16-bit is original seq number for every suppress packet.
+	*/
+	uint16	htod_seq;
+
+	/*
+	This address is mac entry for every packet.
+	*/
+	void*	entry;
+	/* bus specific stuff */
+	union {
+		struct {
+			void* stuff;
+			uint32 thing1;
+			uint32 thing2;
+		} sd;
+		struct {
+			void* bus;
+			void* urb;
+		} usb;
+	} bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK			0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT		15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK			0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT			13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK			0x1
+#define DHD_PKTTAG_EVENT_SHIFT			12
+#define DHD_PKTTAG_SETEVENT(tag, event)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+	(((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK		0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT		11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+	(1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK		0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT		10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+	(((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK			0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT			9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+	(((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK		0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT		8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+	(((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK			0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT			7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+	(((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK			0x7
+#define DHD_PKTTAG_FIFO_SHIFT			4
+#define DHD_PKTTAG_SETFIFO(tag, fifo)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+	(((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK			0xf
+#define DHD_PKTTAG_IF_SHIFT			0
+#define DHD_PKTTAG_SETIF(tag, if)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+	(((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea)	memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+	(dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag)	((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue)	((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag)			(((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq)		((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag)			(((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry)	((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag)			(((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do {} while (0)
+#endif
+
+/* public functions */
+int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
+	uchar *reorder_info_buf, uint *reorder_info_len);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+	void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 0000000000000000000000000000000000000000..ec22f7c383ce7212b77bf170588658d6c116480d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dngl_stats.h 464743 2014-03-25 21:04:32Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 0000000000000000000000000000000000000000..a3aa62f0078951c8dac0a3b834e932233cff6a76
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,22 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dngl_wlhdr.h 464743 2014-03-25 21:04:32Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+    uint8   type;           /* Header type */
+    uint8   version;        /* Header version */
+	int8	rssi;			/* RSSI */
+	uint8	pad;			/* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN   sizeof(wl_header_t)
+#define WL_HEADER_TYPE  0
+#define WL_HEADER_VER   1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
new file mode 100644
index 0000000000000000000000000000000000000000..242f4322117a2efd86d7759c87cf323241f60696
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
@@ -0,0 +1,733 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktpool.h>
+
+/* Registry size is one larger than max pools, as slot #0 is reserved */
+#define PKTPOOLREG_RSVD_ID				(0U)
+#define PKTPOOLREG_RSVD_PTR				(POOLPTR(0xdeaddead))
+#define PKTPOOLREG_FREE_PTR				(POOLPTR(NULL))
+
+#define PKTPOOL_REGISTRY_SET(id, pp)	(pktpool_registry_set((id), (pp)))
+#define PKTPOOL_REGISTRY_CMP(id, pp)	(pktpool_registry_cmp((id), (pp)))
+
+/* Tag a registry entry as free for use */
+#define PKTPOOL_REGISTRY_CLR(id)		\
+		PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
+#define PKTPOOL_REGISTRY_ISCLR(id)		\
+		(PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
+
+/* Tag registry entry 0 as reserved */
+#define PKTPOOL_REGISTRY_RSV()			\
+		PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
+#define PKTPOOL_REGISTRY_ISRSVD()		\
+		(PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
+
+/* Walk all un-reserved entries in registry */
+#define PKTPOOL_REGISTRY_FOREACH(id)	\
+		for ((id) = 1U; (id) <= pktpools_max; (id)++)
+
+uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
+pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
+
+/* Register/Deregister a pktpool with registry during pktpool_init/deinit */
+static int pktpool_register(pktpool_t * poolptr);
+static int pktpool_deregister(pktpool_t * poolptr);
+
+/** accessor functions required when ROMming this file, forced into RAM */
+static void
+BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
+{
+	pktpools_registry[id] = pp;
+}
+
+static bool
+BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
+{
+	return pktpools_registry[id] == pp;
+}
+
+int /* Construct a pool registry to serve a maximum of total_pools */
+pktpool_attach(osl_t *osh, uint32 total_pools)
+{
+	uint32 poolid;
+
+	if (pktpools_max != 0U) {
+		return BCME_ERROR;
+	}
+
+	ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
+
+	/* Initialize registry: reserve slot#0 and tag others as free */
+	PKTPOOL_REGISTRY_RSV();		/* reserve slot#0 */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* tag all unreserved entries as free */
+		PKTPOOL_REGISTRY_CLR(poolid);
+	}
+
+	pktpools_max = total_pools;
+
+	return (int)pktpools_max;
+}
+
+int /* Destruct the pool registry. Ascertain all pools were first de-inited */
+pktpool_dettach(osl_t *osh)
+{
+	uint32 poolid;
+
+	if (pktpools_max == 0U) {
+		return BCME_OK;
+	}
+
+	/* Ascertain that no pools are still registered */
+	ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* ascertain all others are free */
+		ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
+	}
+
+	pktpools_max = 0U; /* restore boot state */
+
+	return BCME_OK;
+}
+
+static int	/* Register a pool in a free slot; return the registry slot index */
+pktpool_register(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	if (pktpools_max == 0U) {
+		return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
+	}
+
+	ASSERT(pktpools_max != 0U);
+
+	/* find an empty slot in pktpools_registry */
+	PKTPOOL_REGISTRY_FOREACH(poolid) {
+		if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
+			PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
+			return (int)poolid; /* return pool ID */
+		}
+	} /* FOREACH */
+
+	return PKTPOOL_INVALID_ID;	/* error: registry is full */
+}
+
+static int	/* Deregister a pktpool, given the pool pointer; tag slot as free */
+pktpool_deregister(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
+
+	poolid = POOLID(poolptr);
+	ASSERT(poolid <= pktpools_max);
+
+	/* Asertain that a previously registered poolptr is being de-registered */
+	if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
+		PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
+	} else {
+		ASSERT(0);
+		return BCME_ERROR; /* mismatch in registry */
+	}
+
+	return BCME_OK;
+}
+
+
+/*
+ * pktpool_init:
+ * User provides a pktpool_t sturcture and specifies the number of packets to
+ * be pre-filled into the pool (pplen). The size of all packets in a pool must
+ * be the same and is specified by plen.
+ * pktpool_init first attempts to register the pool and fetch a unique poolid.
+ * If registration fails, it is considered an BCME_ERR, caused by either the
+ * registry was not pre-created (pktpool_attach) or the registry is full.
+ * If registration succeeds, then the requested number of packets will be filled
+ * into the pool as part of initialization. In the event that there is no
+ * available memory to service the request, then BCME_NOMEM will be returned
+ * along with the count of how many packets were successfully allocated.
+ * In dongle builds, prior to memory reclaimation, one should limit the number
+ * of packets to be allocated during pktpool_init and fill the pool up after
+ * reclaim stage.
+ */
+int
+pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
+{
+	int i, err = BCME_OK;
+	int pktplen;
+	uint8 pktp_id;
+
+	ASSERT(pktp != NULL);
+	ASSERT(osh != NULL);
+	ASSERT(pplen != NULL);
+
+	pktplen = *pplen;
+
+	bzero(pktp, sizeof(pktpool_t));
+
+	/* assign a unique pktpool id */
+	if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
+		return BCME_ERROR;
+	}
+	POOLSETID(pktp, pktp_id);
+
+	pktp->inited = TRUE;
+	pktp->istx = istx ? TRUE : FALSE;
+	pktp->plen = (uint16)plen;
+	pktp->type = type;
+
+	pktp->maxlen = PKTPOOL_LEN_MAX;
+	pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
+
+	for (i = 0; i < pktplen; i++) {
+		void *p;
+		p = PKTGET(osh, plen, TRUE);
+
+		if (p == NULL) {
+			/* Not able to allocate all requested pkts
+			 * so just return what was actually allocated
+			 * We can add to the pool later
+			 */
+			if (pktp->freelist == NULL) /* pktpool free list is empty */
+				err = BCME_NOMEM;
+
+			goto exit;
+		}
+
+		PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
+
+		PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
+		pktp->freelist = p;
+
+		pktp->avail++;
+
+#ifdef BCMDBG_POOL
+		pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+	}
+
+exit:
+	pktp->len = pktp->avail;
+
+	*pplen = pktp->len;
+	return err;
+}
+
+/*
+ * pktpool_deinit:
+ * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
+ * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
+ * An assert is in place to ensure that there are no packets still lingering
+ * around. Packets freed to a pool after the deinit will cause a memory
+ * corruption as the pktpool_t structure no longer exists.
+ */
+int
+pktpool_deinit(osl_t *osh, pktpool_t *pktp)
+{
+	uint16 freed = 0;
+
+	ASSERT(osh != NULL);
+	ASSERT(pktp != NULL);
+
+#ifdef BCMDBG_POOL
+	{
+		int i;
+		for (i = 0; i <= pktp->len; i++) {
+			pktp->dbg_q[i].p = NULL;
+		}
+	}
+#endif
+
+	while (pktp->freelist != NULL) {
+		void * p = pktp->freelist;
+
+		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+		PKTSETFREELIST(p, NULL);
+
+		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+		PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+		freed++;
+		ASSERT(freed <= pktp->len);
+	}
+
+	pktp->avail -= freed;
+	ASSERT(pktp->avail == 0);
+
+	pktp->len -= freed;
+
+	pktpool_deregister(pktp); /* release previously acquired unique pool id */
+	POOLSETID(pktp, PKTPOOL_INVALID_ID);
+
+	pktp->inited = FALSE;
+
+	/* Are there still pending pkts? */
+	ASSERT(pktp->len == 0);
+
+	return 0;
+}
+
+int
+pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
+{
+	void *p;
+	int err = 0;
+	int len, psize, maxlen;
+
+	ASSERT(pktp->plen != 0);
+
+	maxlen = pktp->maxlen;
+	psize = minimal ? (maxlen >> 2) : maxlen;
+	for (len = (int)pktp->len; len < psize; len++) {
+
+		p = PKTGET(osh, pktp->len, TRUE);
+
+		if (p == NULL) {
+			err = BCME_NOMEM;
+			break;
+		}
+
+		if (pktpool_add(pktp, p) != BCME_OK) {
+			PKTFREE(osh, p, FALSE);
+			err = BCME_ERROR;
+			break;
+		}
+	}
+
+	return err;
+}
+
+static void *
+pktpool_deq(pktpool_t *pktp)
+{
+	void *p;
+
+	if (pktp->avail == 0)
+		return NULL;
+
+	ASSERT(pktp->freelist != NULL);
+
+	p = pktp->freelist;  /* dequeue packet from head of pktpool free list */
+	pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+	PKTSETFREELIST(p, NULL);
+
+	pktp->avail--;
+
+	return p;
+}
+
+static void
+pktpool_enq(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+
+	PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
+	pktp->freelist = p; /* free list points to newly inserted packet */
+
+	pktp->avail++;
+	ASSERT(pktp->avail <= pktp->len);
+}
+
+/* utility for registering host addr fill function called from pciedev */
+int
+/* BCMATTACHFN */
+(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	ASSERT(pktp->cbext.cb == NULL);
+	pktp->cbext.cb = cb;
+	pktp->cbext.arg = arg;
+	return 0;
+}
+
+int
+pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	ASSERT(pktp->rxcplidfn.cb == NULL);
+	pktp->rxcplidfn.cb = cb;
+	pktp->rxcplidfn.arg = arg;
+	return 0;
+}
+/* Callback functions for split rx modes */
+/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */
+void
+pktpool_invoke_dmarxfill(pktpool_t *pktp)
+{
+	ASSERT(pktp->dmarxfill.cb);
+	ASSERT(pktp->dmarxfill.arg);
+
+	if (pktp->dmarxfill.cb)
+		pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
+}
+int
+pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	pktp->dmarxfill.cb = cb;
+	pktp->dmarxfill.arg = arg;
+
+	return 0;
+}
+/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */
+int
+pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->cbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->cbs[i].cb == NULL);
+	pktp->cbs[i].cb = cb;
+	pktp->cbs[i].arg = arg;
+	pktp->cbcnt++;
+
+	return 0;
+}
+
+int
+pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->ecbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->ecbs[i].cb == NULL);
+	pktp->ecbs[i].cb = cb;
+	pktp->ecbs[i].arg = arg;
+	pktp->ecbcnt++;
+
+	return 0;
+}
+
+static int
+pktpool_empty_notify(pktpool_t *pktp)
+{
+	int i;
+
+	pktp->empty = TRUE;
+	for (i = 0; i < pktp->ecbcnt; i++) {
+		ASSERT(pktp->ecbs[i].cb != NULL);
+		pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
+	}
+	pktp->empty = FALSE;
+
+	return 0;
+}
+
+#ifdef BCMDBG_POOL
+int
+pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb);
+
+	i = pktp->dbg_cbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->dbg_cbs[i].cb == NULL);
+	pktp->dbg_cbs[i].cb = cb;
+	pktp->dbg_cbs[i].arg = arg;
+	pktp->dbg_cbcnt++;
+
+	return 0;
+}
+
+int pktpool_dbg_notify(pktpool_t *pktp);
+
+int
+pktpool_dbg_notify(pktpool_t *pktp)
+{
+	int i;
+
+	for (i = 0; i < pktp->dbg_cbcnt; i++) {
+		ASSERT(pktp->dbg_cbs[i].cb);
+		pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
+	}
+
+	return 0;
+}
+
+int
+pktpool_dbg_dump(pktpool_t *pktp)
+{
+	int i;
+
+	printf("pool len=%d maxlen=%d\n",  pktp->dbg_qlen, pktp->maxlen);
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p);
+		printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
+			pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
+	}
+
+	return 0;
+}
+
+int
+pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
+{
+	int i;
+	int state;
+
+	bzero(stats, sizeof(pktpool_stats_t));
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		state = PKTPOOLSTATE(pktp->dbg_q[i].p);
+		switch (state) {
+			case POOL_TXENQ:
+				stats->enq++; break;
+			case POOL_TXDH:
+				stats->txdh++; break;
+			case POOL_TXD11:
+				stats->txd11++; break;
+			case POOL_RXDH:
+				stats->rxdh++; break;
+			case POOL_RXD11:
+				stats->rxd11++; break;
+			case POOL_RXFILL:
+				stats->rxfill++; break;
+			case POOL_IDLE:
+				stats->idle++; break;
+		}
+	}
+
+	return 0;
+}
+
+int
+pktpool_start_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		return 0;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			pktp->dbg_q[i].cycles = cycles;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int pktpool_stop_trigger(pktpool_t *pktp, void *p);
+int
+pktpool_stop_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		return 0;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			if (pktp->dbg_q[i].cycles == 0)
+				break;
+
+			if (cycles >= pktp->dbg_q[i].cycles)
+				pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
+			else
+				pktp->dbg_q[i].dur =
+					(((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
+
+			pktp->dbg_q[i].cycles = 0;
+			break;
+		}
+	}
+
+	return 0;
+}
+#endif /* BCMDBG_POOL */
+
+int
+pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
+{
+	ASSERT(pktp);
+	pktp->availcb_excl = NULL;
+	return 0;
+}
+
+int
+pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
+{
+	int i;
+
+	ASSERT(pktp);
+	ASSERT(pktp->availcb_excl == NULL);
+	for (i = 0; i < pktp->cbcnt; i++) {
+		if (cb == pktp->cbs[i].cb) {
+			pktp->availcb_excl = &pktp->cbs[i];
+			break;
+		}
+	}
+
+	if (pktp->availcb_excl == NULL)
+		return BCME_ERROR;
+	else
+		return 0;
+}
+
+static int
+pktpool_avail_notify(pktpool_t *pktp)
+{
+	int i, k, idx;
+	int avail;
+
+	ASSERT(pktp);
+	if (pktp->availcb_excl != NULL) {
+		pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
+		return 0;
+	}
+
+	k = pktp->cbcnt - 1;
+	for (i = 0; i < pktp->cbcnt; i++) {
+		avail = pktp->avail;
+
+		if (avail) {
+			if (pktp->cbtoggle)
+				idx = i;
+			else
+				idx = k--;
+
+			ASSERT(pktp->cbs[idx].cb != NULL);
+			pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
+		}
+	}
+
+	/* Alternate between filling from head or tail
+	 */
+	pktp->cbtoggle ^= 1;
+
+	return 0;
+}
+
+void *
+pktpool_get(pktpool_t *pktp)
+{
+	void *p;
+
+	p = pktpool_deq(pktp);
+
+	if (p == NULL) {
+		/* Notify and try to reclaim tx pkts */
+		if (pktp->ecbcnt)
+			pktpool_empty_notify(pktp);
+
+		p = pktpool_deq(pktp);
+		if (p == NULL)
+			return NULL;
+	}
+
+	return p;
+}
+
+void
+pktpool_free(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+#ifdef BCMDBG_POOL
+	/* pktpool_stop_trigger(pktp, p); */
+#endif
+
+	pktpool_enq(pktp, p);
+
+	if (pktp->emptycb_disable)
+		return;
+
+	if (pktp->cbcnt) {
+		if (pktp->empty == FALSE)
+			pktpool_avail_notify(pktp);
+	}
+}
+
+int
+pktpool_add(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+
+	if (pktp->len == pktp->maxlen)
+		return BCME_RANGE;
+
+	/* pkts in pool have same length */
+	ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
+	PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
+
+	pktp->len++;
+	pktpool_enq(pktp, p);
+
+#ifdef BCMDBG_POOL
+	pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+
+	return 0;
+}
+
+/* Force pktpool_setmaxlen () into RAM as it uses a constant
+ * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
+ */
+int
+BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
+{
+	if (maxlen > PKTPOOL_LEN_MAX)
+		maxlen = PKTPOOL_LEN_MAX;
+
+	/* if pool is already beyond maxlen, then just cap it
+	 * since we currently do not reduce the pool len
+	 * already allocated
+	 */
+	pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
+
+	return pktp->maxlen;
+}
+
+void
+pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
+{
+	ASSERT(pktp);
+
+	pktp->emptycb_disable = disable;
+}
+
+bool
+pktpool_emptycb_disabled(pktpool_t *pktp)
+{
+	ASSERT(pktp);
+	return pktp->emptycb_disable;
+}
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/net/wireless/bcmdhd/hnd_pktq.c
new file mode 100644
index 0000000000000000000000000000000000000000..d619113b1a5c084053cf3ea69a646fd1ae86d037
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktq.c
@@ -0,0 +1,584 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktq.h>
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+/*
+ * Append spktq 'list' to the tail of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_append(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		return;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, list_q->head);
+	else
+		q->head = list_q->head;
+
+	q->tail = list_q->tail;
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+}
+
+/*
+ * Prepend spktq 'list' to the head of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_prepend(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		return;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	/* set the tail packet of list to point at the former pq head */
+	PKTSETLINK(list_q->tail, q->head);
+	/* the new q head is the head of list */
+	q->head = list_q->head;
+
+	/* If the q tail was non-null, then it stays as is.
+	 * If the q tail was null, it is now the tail of list
+	 */
+	if (q->tail == NULL) {
+		q->tail = list_q->tail;
+	}
+
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if (prev_p == NULL)
+		return NULL;
+
+	if ((p = PKTLINK(prev_p)) == NULL)
+		return NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(prev_p, PKTLINK(p));
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	q = &pq->q[prec];
+	p = q->head;
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			bool head = (p == q->head);
+			if (head)
+				q->head = PKTLINK(p);
+			else
+				PKTSETLINK(prev, PKTLINK(p));
+			PKTSETLINK(p, NULL);
+			PKTFREE(osh, p, dir);
+			q->len--;
+			pq->len--;
+			p = (head ? q->head : PKTLINK(prev));
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+
+	if (q->head == NULL) {
+		ASSERT(q->len == 0);
+		q->tail = NULL;
+	}
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	/* Should this just assert pktbuf? */
+	if (!pktbuf)
+		return FALSE;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			return FALSE;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (prec < pq->num_prec)
+		pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+	int prec;
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+	for (prec = 0; prec < pq->num_prec; prec++)
+		pktq_pflush(osh, pq, prec, dir, fn, arg);
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+	{
+		return NULL;
+	}
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 0000000000000000000000000000000000000000..03f17244faf37de9190484014d67bffba426061e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,256 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hndpmu.c 475037 2014-05-02 23:55:49Z $
+ */
+
+
+/*
+ * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
+ * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
+ *
+ * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
+ * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define	PMU_NONE(args)
+
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip_s {
+	uint8 ht_avail;
+	uint8 macphy_clkavail;
+	uint8 ht_start;
+	uint8 otp_pu;
+};
+
+typedef struct rsc_per_chip_s rsc_per_chip_t;
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+	{32, 0x7},
+	{26, 0x6},
+	{22, 0x5},
+	{16, 0x4},
+	{12, 0x3},
+	{8, 0x2},
+	{4, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+
+/**
+ * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
+ * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
+ * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
+ * been written '1'.
+ */
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = {
+	/* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */
+	{16, 0x7},
+	{12, 0x5},
+	{8,  0x3},
+	{4,  0x1} }; /* note: 43143 does not support tristate */
+
+#else
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = {
+	/* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */
+	{8, 0x7},
+	{6, 0x5},
+	{4,  0x3},
+	{2,  0x1} }; /* note: 43143 does not support tristate */
+
+#endif /* BCM_SDIO_VDDIO */
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+/**
+ * Balance between stable SDIO operation and power consumption is achieved using this function.
+ * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
+ * function should read the VDDIO itself to select the correct table. For now it has been solved
+ * with the 'BCM_SDIO_VDDIO' preprocessor constant.
+ *
+ * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
+ *		    hardware supports this), if no hw support drive strength is not programmed.
+ */
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;	/* only alter desired bits in PMU chipcontrol 1 register */
+	uint32 str_shift = 0;
+	uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
+	uint32 str_ovr_pmuval = 0;            /* position of bit within this register */
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+
+	switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+		str_mask = 0x30000000;
+		str_shift = 28;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+	case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+		if (sih->pmurev == 8) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+		}
+		else if (sih->pmurev == 11) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		}
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3;
+		}
+#else
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8;
+		}
+#endif /* BCM_SDIO_VDDIO */
+		str_mask = 0x00000007;
+		str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR;
+		break;
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+		         bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+		break;
+	}
+
+	if (str_tab != NULL) {
+		uint32 cc_data_temp;
+		int i;
+
+		/* Pick the lowest available drive strength equal or greater than the
+		 * requested strength.	Drive strength of 0 requests tri-state.
+		 */
+		for (i = 0; drivestrength < str_tab[i].strength; i++)
+			;
+
+		if (i > 0 && drivestrength > str_tab[i].strength)
+			i--;
+
+		W_REG(osh, PMUREG(sih, chipcontrol_addr), PMU_CHIPCTL1);
+		cc_data_temp = R_REG(osh, PMUREG(sih, chipcontrol_data));
+		cc_data_temp &= ~str_mask;
+		cc_data_temp |= str_tab[i].sel << str_shift;
+		W_REG(osh, PMUREG(sih, chipcontrol_data), cc_data_temp);
+		if (str_ovr_pmuval) { /* enables the selected drive strength */
+			W_REG(osh,  PMUREG(sih, chipcontrol_addr), str_ovr_pmuctl);
+			OR_REG(osh, PMUREG(sih, chipcontrol_data), str_ovr_pmuval);
+		}
+		PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+		         drivestrength, str_tab[i].strength));
+	}
+} /* si_sdiod_drive_strength_init */
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..bc90f3cab2247855a656aa86653386eb0a278082
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+#    automerger maintained database to see where src/include
+#    came from
+# 2. For select components, as listed in compvers.sh
+#    it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 347587 2012-07-27 09:13:31Z $
+#
+
+export SRCBASE:=..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch url
+epivers:
+	bash epivers.sh
+
+# Generate component versions based on component url
+compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "Generating component versions, if any"; \
+		bash compvers.sh; \
+	else \
+		echo "Skipping component version generation"; \
+	fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "bash compvers.sh clean"; \
+		bash compvers.sh clean; \
+	else \
+		echo "Skipping component version clean"; \
+	fi
+
+clean:
+	rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 0000000000000000000000000000000000000000..6a7b78de9b2473250b8b5619f8332f8c108a3733
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,368 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: aidmp.h 456346 2014-02-18 16:48:52Z $
+ */
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+/* Manufacturer Ids */
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+/* Component Classes */
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+/* Enumeration ROM registers */
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+/* Erom entries */
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+
+/* EROM CompIdentA */
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+/* EROM CompIdentB */
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+/* EROM MasterPortDesc */
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+/* EROM AddrDesc */
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	/* 4KB */
+
+/* EROM SizeDesc */
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	/* 0x000 */
+	uint32	oobselina74;	/* 0x004 */
+	uint32	PAD[6];
+	uint32	oobselinb30;	/* 0x020 */
+	uint32	oobselinb74;	/* 0x024 */
+	uint32	PAD[6];
+	uint32	oobselinc30;	/* 0x040 */
+	uint32	oobselinc74;	/* 0x044 */
+	uint32	PAD[6];
+	uint32	oobselind30;	/* 0x060 */
+	uint32	oobselind74;	/* 0x064 */
+	uint32	PAD[38];
+	uint32	oobselouta30;	/* 0x100 */
+	uint32	oobselouta74;	/* 0x104 */
+	uint32	PAD[6];
+	uint32	oobseloutb30;	/* 0x120 */
+	uint32	oobseloutb74;	/* 0x124 */
+	uint32	PAD[6];
+	uint32	oobseloutc30;	/* 0x140 */
+	uint32	oobseloutc74;	/* 0x144 */
+	uint32	PAD[6];
+	uint32	oobseloutd30;	/* 0x160 */
+	uint32	oobseloutd74;	/* 0x164 */
+	uint32	PAD[38];
+	uint32	oobsynca;	/* 0x200 */
+	uint32	oobseloutaen;	/* 0x204 */
+	uint32	PAD[6];
+	uint32	oobsyncb;	/* 0x220 */
+	uint32	oobseloutben;	/* 0x224 */
+	uint32	PAD[6];
+	uint32	oobsyncc;	/* 0x240 */
+	uint32	oobseloutcen;	/* 0x244 */
+	uint32	PAD[6];
+	uint32	oobsyncd;	/* 0x260 */
+	uint32	oobseloutden;	/* 0x264 */
+	uint32	PAD[38];
+	uint32	oobaextwidth;	/* 0x300 */
+	uint32	oobainwidth;	/* 0x304 */
+	uint32	oobaoutwidth;	/* 0x308 */
+	uint32	PAD[5];
+	uint32	oobbextwidth;	/* 0x320 */
+	uint32	oobbinwidth;	/* 0x324 */
+	uint32	oobboutwidth;	/* 0x328 */
+	uint32	PAD[5];
+	uint32	oobcextwidth;	/* 0x340 */
+	uint32	oobcinwidth;	/* 0x344 */
+	uint32	oobcoutwidth;	/* 0x348 */
+	uint32	PAD[5];
+	uint32	oobdextwidth;	/* 0x360 */
+	uint32	oobdinwidth;	/* 0x364 */
+	uint32	oobdoutwidth;	/* 0x368 */
+	uint32	PAD[37];
+	uint32	ioctrlset;	/* 0x400 */
+	uint32	ioctrlclear;	/* 0x404 */
+	uint32	ioctrl;		/* 0x408 */
+	uint32	PAD[61];
+	uint32	iostatus;	/* 0x500 */
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	/* 0x700 */
+	uint32	iostatuswidth;	/* 0x704 */
+	uint32	PAD[62];
+	uint32	resetctrl;	/* 0x800 */
+	uint32	resetstatus;	/* 0x804 */
+	uint32	resetreadid;	/* 0x808 */
+	uint32	resetwriteid;	/* 0x80c */
+	uint32	PAD[60];
+	uint32	errlogctrl;	/* 0x900 */
+	uint32	errlogdone;	/* 0x904 */
+	uint32	errlogstatus;	/* 0x908 */
+	uint32	errlogaddrlo;	/* 0x90c */
+	uint32	errlogaddrhi;	/* 0x910 */
+	uint32	errlogid;	/* 0x914 */
+	uint32	errloguser;	/* 0x918 */
+	uint32	errlogflags;	/* 0x91c */
+	uint32	PAD[56];
+	uint32	intstatus;	/* 0xa00 */
+	uint32	PAD[255];
+	uint32	config;		/* 0xe00 */
+	uint32	PAD[63];
+	uint32	itcr;		/* 0xf00 */
+	uint32	PAD[3];
+	uint32	itipooba;	/* 0xf10 */
+	uint32	itipoobb;	/* 0xf14 */
+	uint32	itipoobc;	/* 0xf18 */
+	uint32	itipoobd;	/* 0xf1c */
+	uint32	PAD[4];
+	uint32	itipoobaout;	/* 0xf30 */
+	uint32	itipoobbout;	/* 0xf34 */
+	uint32	itipoobcout;	/* 0xf38 */
+	uint32	itipoobdout;	/* 0xf3c */
+	uint32	PAD[4];
+	uint32	itopooba;	/* 0xf50 */
+	uint32	itopoobb;	/* 0xf54 */
+	uint32	itopoobc;	/* 0xf58 */
+	uint32	itopoobd;	/* 0xf5c */
+	uint32	PAD[4];
+	uint32	itopoobain;	/* 0xf70 */
+	uint32	itopoobbin;	/* 0xf74 */
+	uint32	itopoobcin;	/* 0xf78 */
+	uint32	itopoobdin;	/* 0xf7c */
+	uint32	PAD[4];
+	uint32	itopreset;	/* 0xf90 */
+	uint32	PAD[15];
+	uint32	peripherialid4;	/* 0xfd0 */
+	uint32	peripherialid5;	/* 0xfd4 */
+	uint32	peripherialid6;	/* 0xfd8 */
+	uint32	peripherialid7;	/* 0xfdc */
+	uint32	peripherialid0;	/* 0xfe0 */
+	uint32	peripherialid1;	/* 0xfe4 */
+	uint32	peripherialid2;	/* 0xfe8 */
+	uint32	peripherialid3;	/* 0xfec */
+	uint32	componentid0;	/* 0xff0 */
+	uint32	componentid1;	/* 0xff4 */
+	uint32	componentid2;	/* 0xff8 */
+	uint32	componentid3;	/* 0xffc */
+} aidmp_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* Out-of-band Router registers */
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+/* DMP wrapper registers */
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+
+
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0xa00
+#define	AI_ERRLOGDONE		0xa04
+#define	AI_ERRLOGSTATUS		0xa08
+#define	AI_ERRLOGADDRLO		0xa0c
+#define	AI_ERRLOGADDRHI		0xa10
+#define	AI_ERRLOGID		0xa14
+#define	AI_ERRLOGUSER		0xa18
+#define	AI_ERRLOGFLAGS		0xa1c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+/* resetctrl */
+#define	AIRC_RESET		1
+
+/* config */
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+/* bit defines for AI_OOBSELOUTB74 reg */
+#define OOB_SEL_OUTEN_B_5	15
+#define OOB_SEL_OUTEN_B_6	23
+
+/* AI_OOBSEL for A/B/C/D, 0-7 */
+#define AI_OOBSEL_MASK		0x1F
+#define AI_OOBSEL_0_SHIFT	0
+#define AI_OOBSEL_1_SHIFT	8
+#define AI_OOBSEL_2_SHIFT	16
+#define AI_OOBSEL_3_SHIFT	24
+#define AI_OOBSEL_4_SHIFT	0
+#define AI_OOBSEL_5_SHIFT	8
+#define AI_OOBSEL_6_SHIFT	16
+#define AI_OOBSEL_7_SHIFT	24
+
+#endif	/* _AIDMP_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
new file mode 100644
index 0000000000000000000000000000000000000000..fb6edc2f13950f183fa992952455b746d6d04b1c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -0,0 +1,11 @@
+/*
+ * BCM common config options
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcm_cfg.h 351867 2012-08-21 18:46:16Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif /* _bcm_cfg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
new file mode 100644
index 0000000000000000000000000000000000000000..0375285ee1aa0911afdf2187973df628ddd8fdf7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -0,0 +1,343 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ *    pool manager: A singleton object that acts as a factory for
+ *                  pool allocators. It also is used for global
+ *                  instrumentation, such as reporting all blocks
+ *                  in use across all data structures. The pool manager
+ *                  creates and provides individual memory pools
+ *                  upon request to application code.
+ *
+ *    memory pool:  An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ *    bcm_mpm_*     Memory pool manager
+ *    bcm_mp_*      Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ *    prealloc: The contiguous memory block of objects can either be supplied
+ *              by the client or malloc'ed by the memory manager. The objects are
+ *              allocated out of a block of memory and freed back to the block.
+ *
+ *    heap:     The memory pool allocator uses the heap (malloc/free) for memory.
+ *              In this case, the pool allocator is just providing statistics
+ *              and instrumentation on top of the heap, without modifying the heap
+ *              allocation implementation.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+	char name[BCM_MP_NAMELEN];  /* Name of this pool. */
+	unsigned int objsz;         /* Object size allocated in this pool */
+	uint16 nobj;                /* Total number of objects in this pool */
+	uint16 num_alloc;           /* Number of objects currently allocated */
+	uint16 high_water;          /* Max number of allocated objects. */
+	uint16 failed_alloc;        /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    osh:       INPUT  Operating system handle. Needed for heap memory allocation.
+ *    max_pools: INPUT Maximum number of mempools supported.
+ *    mgr:       OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ *    BCME_OK     Object initialized successfully. May be used.
+ *    BCME_NOMEM  Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    mgr:     INPUT  Pointer to pool manager handle.
+ *
+ * Returns:
+ *    BCME_OK  Memory pool manager successfully de-initialized.
+ *    other    Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ *                                  pool uses a contiguous block of pre-alloced
+ *                                  memory. The memory block may either be provided
+ *                                  by the client or dynamically allocated by the
+ *                                  pool manager.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *                     Must be >= sizeof(void *).
+ *    nobj:     INPUT  Maximum number of concurrently existing objects to support
+ *    memstart  INPUT  Pointer to the memory to use, or NULL to malloc()
+ *    memsize   INPUT  Number of bytes referenced from memstart (for error checking).
+ *                     Must be 0 if 'memstart' is NULL.
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+                                 unsigned int obj_sz,
+                                 int nobj,
+                                 void *memstart,
+                                 unsigned int memsize,
+                                 const char poolname[BCM_MP_NAMELEN],
+                                 bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ *                                  all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ *                              pool allocator uses the heap (malloc/free) for memory.
+ *                              In this case, the pool allocator is just providing
+ *                              statistics and instrumentation on top of the heap,
+ *                              without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+                             const char poolname[BCM_MP_NAMELEN],
+                             bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ *                              all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ *    mgr:         INPUT   The handle to the pools manager
+ *    stats:       OUTPUT  Array of pool statistics.
+ *    nentries:    MOD     Max elements in 'stats' array on INPUT. Actual number
+ *                         of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ *    mgr:     INPUT  The handle to the pools manager
+ *    b:       OUTPUT Output buffer.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ *                          compensate for alignment requirements of the objects.
+ *                          This function provides the padded object size. If clients
+ *                          pre-allocate a memory slab for a memory pool, the
+ *                          padded object size should be used by the client to allocate
+ *                          the memory slab (in order to provide sufficent space for
+ *                          the maximum number of objects).
+ *
+ * Parameters:
+ *    mgr:            INPUT   The handle to the pools manager.
+ *    obj_sz:         INPUT   Input object size.
+ *    padded_obj_sz:  OUTPUT  Padded object size.
+ *
+ * Returns:
+ *    BCME_OK      Ok
+ *    BCME_BADARG  Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool.
+ *
+ * Returns:
+ *    A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ *    pool:  INPUT   The handle to the pool.
+ *    objp:  INPUT   A pointer to the object to free.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ *    pool:     INPUT    The handle to the pool
+ *    stats:    OUTPUT   Pool statistics
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting statistics.
+ *
+ */
+int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool
+ *    b        OUTPUT   Output buffer
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 0000000000000000000000000000000000000000..76788d48edadb832a78d0cac6def3bd4edcdfbab
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,114 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ */
+#ifndef _bcmcdc_h_
+#define	_bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      /* ioctl command value */
+	uint32 len;      /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+	uint32 flags;    /* flag defns given below */
+	uint32 status;   /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  /* maximum or expected response length, */
+					   /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT   16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR		0x01	/* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET		0x02	/* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK	0x3c	/* overlay region index mask */
+#define CDCF_IOC_OVL_RSV	0x40	/* 1=reserve this overlay region */
+#define CDCF_IOC_OVL		0x80	/* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK	0xfe	/* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT	1	/* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK	0xF000	/* I/F index */
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	/* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT	16		/* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ *   The BDC header is used on data packets to convey priority across USB.
+ */
+
+struct bdc_header {
+	uint8	flags;			/* Flags */
+	uint8	priority;		/* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
+	uint8	flags2;
+	uint8	dataOffset;		/* Offset from end of BDC header to packet data, in
+					 * 4-byte words.  Leaves room for optional headers.
+					 */
+};
+
+#define	BDC_HEADER_LEN		4
+
+/* flags field bitmap */
+#define BDC_FLAG_80211_PKT	0x01	/* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD	0x04	/* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG	0x08	/* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+
+/* priority field bitmap */
+#define BDC_PRIORITY_MASK	0x07
+#define BDC_PRIORITY_FC_MASK	0xf0	/* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT	4	/* flow control info shift */
+
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK	0x0f	/* interface index (host <-> dongle) */
+#define BDC_FLAG2_IF_SHIFT	0
+#define BDC_FLAG2_FC_FLAG	0x10	/* flag to indicate if pkt contains */
+					/* FLOW CONTROL info only */
+
+/* version numbers */
+#define BDC_PROTO_VER_1		1	/* Old Protocol version */
+#define BDC_PROTO_VER		2	/* Protocol version */
+
+/* flags2.if field access macros */
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK		0xf0
+#define BDC_FLAG_PAD_MASK		0x03
+#define BDC_FLAG2_PAD_SHIFT		2
+#define BDC_FLAG_PAD_SHIFT		0
+#define BDC_FLAG2_PAD_IDX		0x3c
+#define BDC_FLAG_PAD_IDX		0x03
+#define BDC_GET_PAD_LEN(hdr) \
+	((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+	((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+	(((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+	((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+	(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif /* _bcmcdc_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 0000000000000000000000000000000000000000..8c720d0a87fd533e2deb7e5cff857872738a7160
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,320 @@
+/*
+ * Misc system wide definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmdefs.h 474209 2014-04-30 12:16:47Z $
+ */
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
+
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
+#define BCM_REFERENCE(data)	((void)(data))
+
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR     __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
+#define STATIC_ASSERT(expr) { \
+	/* Make sure the expression is constant. */ \
+	typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+	/* Make sure the expression is true. */ \
+	typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
+}
+
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
+
+#define bcmreclaimed 		0
+#define _data	_data
+#define _fn	_fn
+#define BCMPREATTACHDATA(_data)	_data
+#define BCMPREATTACHFN(_fn)	_fn
+#define _data	_data
+#define _fn		_fn
+#define _fn	_fn
+#define	BCMNMIATTACHFN(_fn)	_fn
+#define	BCMNMIATTACHDATA(_data)	_data
+#define CONST	const
+
+#undef BCM47XX_CA9
+
+#ifndef BCMFASTPATH
+#if defined(BCM47XX_CA9)
+#define BCMFASTPATH		__attribute__ ((__section__ (".text.fastpath")))
+#define BCMFASTPATH_HOST	__attribute__ ((__section__ (".text.fastpath_host")))
+#else
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+#endif /* BCMFASTPATH */
+
+
+/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
+ * ROM). This should eliminate the need to manually specify these functions in the ROM config file.
+ * It should only be used in special cases where the function must be in RAM for *all* ROM-based
+ * chips.
+ */
+	#define BCMRAMFN(_fn)	_fn
+
+#define STATIC	static
+
+/* Bus types */
+#define	SI_BUS			0	/* SOC Interconnect */
+#define	PCI_BUS			1	/* PCI target */
+#define	PCMCIA_BUS		2	/* PCMCIA target */
+#define SDIO_BUS		3	/* SDIO target */
+#define JTAG_BUS		4	/* JTAG */
+#define USB_BUS			5	/* USB (does not support R/W REG) */
+#define SPI_BUS			6	/* gSPI target */
+#define RPC_BUS			7	/* RPC target */
+
+/* Allows size optimization for single-bus image */
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) 	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) 	(bus)
+#endif
+
+/* Allows size optimization for single-backplane image */
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) 	(bus)
+#endif
+
+
+/* Allows size optimization for SPROM support */
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+/* Allows size optimization for single-chip image */
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev)	(BCMCHIPREV)
+#else
+#define CHIPREV(rev)	(rev)
+#endif
+
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0		/* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000	/* Address mask for 30-bits */
+#define DMADDR_MASK_26 0xFC000000	/* Address maks for 26-bits */
+#define DMADDR_MASK_0  0xffffffff	/* Address mask for 0-bits (hi-part) */
+
+#define	DMADDRWIDTH_26  26 /* 26-bit addressing capability */
+#define	DMADDRWIDTH_30  30 /* 30-bit addressing capability */
+#define	DMADDRWIDTH_32  32 /* 32-bit addressing capability */
+#define	DMADDRWIDTH_63  63 /* 64-bit addressing capability */
+#define	DMADDRWIDTH_64  64 /* 64-bit addressing capability */
+
+typedef struct {
+	uint32 loaddr;
+	uint32 hiaddr;
+} dma64addr_t;
+
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
+	do { \
+		(_pa).hiaddr = (_val);		\
+	} while (0)
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
+	do { \
+		(_pa).loaddr = (_val);		\
+	} while (0)
+
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa)  PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa) = (_val);			\
+	} while (0)
+#endif /* BCMDMA64OSL */
+#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0)
+
+/* One physical DMA segment */
+typedef struct  {
+	dmaaddr_t addr;
+	uint32	  length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 8
+
+
+typedef struct {
+	void *oshdmah; /* Opaque handle for OSL to store its information */
+	uint origsize; /* Size of the virtual packet */
+	uint nsegs;
+	hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need  to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+/* add 40 bytes to allow for extra RPC header and info  */
+#define BCMEXTRAHDROOM 260
+#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+#if defined(BCM47XX_CA9)
+#define BCMEXTRAHDROOM 224
+#else
+#define BCMEXTRAHDROOM 204
+#endif /* linux && BCM47XX_CA9 */
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef SDALIGN
+#define SDALIGN	32
+#endif
+
+/* Headroom required for dongle-to-host communication.  Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host.  (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif 
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ *    #define <NAME>_M	BITFIELD_MASK(3)
+ *    #define <NAME>_S	4
+ * ...
+ *    regval = R_REG(osh, &regs->regfoo);
+ *    field = GFIELD(regval, <NAME>);
+ *    regval = SFIELD(regval, <NAME>, 1);
+ *    W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+/* define BCMSMALL to remove misc features for memory-constrained environments */
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	/* if (bcmspace) code is discarded */
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	/* if (bcmspace) code is retained */
+#endif
+
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
+#define	MAXSZ_NVRAM_VARS	4096
+#endif
+
+
+
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros  */
+	extern bool _bcmlfrag;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMLFRAG_ENAB() (_bcmlfrag)
+	#elif defined(BCMLFRAG_DISABLED)
+		#define BCMLFRAG_ENAB()	(0)
+	#else
+		#define BCMLFRAG_ENAB()	(1)
+	#endif
+#else
+	#define BCMLFRAG_ENAB()		(0)
+#endif /* BCMLFRAG_ENAB */
+#ifdef BCMSPLITRX /* BCMLFRAG support enab macros  */
+	extern bool _bcmsplitrx;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMSPLITRX_ENAB() (_bcmsplitrx)
+	#elif defined(BCMSPLITRX_DISABLED)
+		#define BCMSPLITRX_ENAB()	(0)
+	#else
+		#define BCMSPLITRX_ENAB()	(1)
+	#endif
+#else
+	#define BCMSPLITRX_ENAB()		(0)
+#endif /* BCMSPLITRX */
+#ifdef BCM_SPLITBUF
+	extern bool _bcmsplitbuf;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCM_SPLITBUF_ENAB() (_bcmsplitbuf)
+	#elif defined(BCM_SPLITBUF_DISABLED)
+		#define BCM_SPLITBUF_ENAB()	(0)
+	#else
+		#define BCM_SPLITBUF_ENAB()	(1)
+	#endif
+#else
+	#define BCM_SPLITBUF_ENAB()		(0)
+#endif	/* BCM_SPLITBUF */
+/* Max size for reclaimable NVRAM array */
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE	DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE	MAXSZ_NVRAM_VARS
+#endif /* DL_NVRAM */
+
+extern uint32 gFWID;
+
+
+#endif /* _bcmdefs_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 0000000000000000000000000000000000000000..b7c386ef0e84866799b4a7a023331690006b5ad9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,1087 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmdevs.h 484136 2014-06-12 04:36:10Z $
+ */
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+/* PCI vendor IDs */
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define	VENDOR_3COM		0x10b7
+#define	VENDOR_NETGEAR		0x1385
+#define	VENDOR_DIAMOND		0x1092
+#define	VENDOR_INTEL		0x8086
+#define	VENDOR_DELL		0x1028
+#define	VENDOR_HP		0x103c
+#define	VENDOR_HP_COMPAQ	0x0e11
+#define	VENDOR_APPLE		0x106b
+#define VENDOR_SI_IMAGE		0x1095		/* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO		0x1154		/* Buffalo vendor id */
+#define VENDOR_TI		0x104c		/* Texas Instruments */
+#define VENDOR_RICOH		0x1180		/* Ricoh */
+#define VENDOR_JMICRON		0x197b
+
+
+/* PCMCIA vendor IDs */
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+/* SDIO vendor IDs */
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_VID		0x0a5c
+#define BCM_DNGL_BL_PID_4328	0xbd12
+#define BCM_DNGL_BL_PID_4322	0xbd13
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BL_PID_43236   0xbd17
+#define BCM_DNGL_BL_PID_4332	0xbd18
+#define BCM_DNGL_BL_PID_4330	0xbd19
+#define BCM_DNGL_BL_PID_4334	0xbd1a
+#define BCM_DNGL_BL_PID_43239   0xbd1b
+#define BCM_DNGL_BL_PID_4324	0xbd1c
+#define BCM_DNGL_BL_PID_4360	0xbd1d
+#define BCM_DNGL_BL_PID_43143	0xbd1e
+#define BCM_DNGL_BL_PID_43242	0xbd1f
+#define BCM_DNGL_BL_PID_43342	0xbd21
+#define BCM_DNGL_BL_PID_4335	0xbd20
+#define BCM_DNGL_BL_PID_43341	0xbd22
+#define BCM_DNGL_BL_PID_4350    0xbd23
+#define BCM_DNGL_BL_PID_4345    0xbd24
+#define BCM_DNGL_BL_PID_4349	0xbd25
+#define BCM_DNGL_BL_PID_4354	0xbd26
+#define BCM_DNGL_BL_PID_43569   0xbd27
+#define BCM_DNGL_BL_PID_43909	0xbd28
+
+#define BCM_DNGL_BDC_PID	0x0bdc
+#define BCM_DNGL_JTAG_PID	0x4a44
+
+/* HW USB BLOCK [CPULESS USB] PIDs */
+#define BCM_HWUSB_PID_43239     43239
+
+/* PCI Device IDs */
+#define	BCM4210_DEVICE_ID	0x1072		/* never used */
+#define	BCM4230_DEVICE_ID	0x1086		/* never used */
+#define	BCM4401_ENET_ID		0x170c		/* 4401b0 production enet cards */
+#define	BCM3352_DEVICE_ID	0x3352		/* bcm3352 device id */
+#define	BCM3360_DEVICE_ID	0x3360		/* bcm3360 device id */
+#define	BCM4211_DEVICE_ID	0x4211
+#define	BCM4231_DEVICE_ID	0x4231
+#define	BCM4303_D11B_ID		0x4303		/* 4303 802.11b */
+#define	BCM4311_D11G_ID		0x4311		/* 4311 802.11b/g id */
+#define	BCM4311_D11DUAL_ID	0x4312		/* 4311 802.11a/b/g id */
+#define	BCM4311_D11A_ID		0x4313		/* 4311 802.11a id */
+#define	BCM4328_D11DUAL_ID	0x4314		/* 4328/4312 802.11a/g id */
+#define	BCM4328_D11G_ID		0x4315		/* 4328/4312 802.11g id */
+#define	BCM4328_D11A_ID		0x4316		/* 4328/4312 802.11a id */
+#define	BCM4318_D11G_ID		0x4318		/* 4318 802.11b/g id */
+#define	BCM4318_D11DUAL_ID	0x4319		/* 4318 802.11a/b/g id */
+#define	BCM4318_D11A_ID		0x431a		/* 4318 802.11a id */
+#define	BCM4325_D11DUAL_ID	0x431b		/* 4325 802.11a/g id */
+#define	BCM4325_D11G_ID		0x431c		/* 4325 802.11g id */
+#define	BCM4325_D11A_ID		0x431d		/* 4325 802.11a id */
+#define	BCM4306_D11G_ID		0x4320		/* 4306 802.11g */
+#define	BCM4306_D11A_ID		0x4321		/* 4306 802.11a */
+#define	BCM4306_UART_ID		0x4322		/* 4306 uart */
+#define	BCM4306_V90_ID		0x4323		/* 4306 v90 codec */
+#define	BCM4306_D11DUAL_ID	0x4324		/* 4306 dual A+B */
+#define	BCM4306_D11G_ID2	0x4325		/* BCM4306_D11G_ID; INF w/loose binding war */
+#define	BCM4321_D11N_ID		0x4328		/* 4321 802.11n dualband id */
+#define	BCM4321_D11N2G_ID	0x4329		/* 4321 802.11n 2.4Ghz band id */
+#define	BCM4321_D11N5G_ID	0x432a		/* 4321 802.11n 5Ghz band id */
+#define BCM4322_D11N_ID		0x432b		/* 4322 802.11n dualband device */
+#define BCM4322_D11N2G_ID	0x432c		/* 4322 802.11n 2.4GHz device */
+#define BCM4322_D11N5G_ID	0x432d		/* 4322 802.11n 5GHz device */
+#define BCM4329_D11N_ID		0x432e		/* 4329 802.11n dualband device */
+#define BCM4329_D11N2G_ID	0x432f		/* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID	0x4330		/* 4329 802.11n 5G device */
+#define	BCM4315_D11DUAL_ID	0x4334		/* 4315 802.11a/g id */
+#define	BCM4315_D11G_ID		0x4335		/* 4315 802.11g id */
+#define	BCM4315_D11A_ID		0x4336		/* 4315 802.11a id */
+#define BCM4319_D11N_ID		0x4337		/* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID	0x4338		/* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID	0x4339		/* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID	0x4340		/* 43231 802.11n 2.4GHz device */
+#define BCM43221_D11N2G_ID	0x4341		/* 43221 802.11n 2.4GHz device */
+#define BCM43222_D11N_ID	0x4350		/* 43222 802.11n dualband device */
+#define BCM43222_D11N2G_ID	0x4351		/* 43222 802.11n 2.4GHz device */
+#define BCM43222_D11N5G_ID	0x4352		/* 43222 802.11n 5GHz device */
+#define BCM43224_D11N_ID	0x4353		/* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1	0x0576		/* Vendor specific 43224 802.11n db device */
+#define BCM43226_D11N_ID	0x4354		/* 43226 802.11n dualband device */
+#define BCM43236_D11N_ID	0x4346		/* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID	0x4347		/* 43236 802.11n 2.4GHz device */
+#define BCM43236_D11N5G_ID	0x4348		/* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID	0x4357		/* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID	0xA99D		/* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID	0x4727		/* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID         0x4360          /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID       0x4361          /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID       0x4362          /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID		0x4343		/* 4336 802.11n 2.4GHz device */
+#define BCM6362_D11N_ID		0x435f		/* 6362 802.11n dualband device */
+#define BCM6362_D11N2G_ID	0x433f		/* 6362 802.11n 2.4Ghz band id */
+#define BCM6362_D11N5G_ID	0x434f		/* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID		0x4331		/* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID	0x4332		/* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID	0x4333		/* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID	0x4355		/* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID	0x4356		/* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID	0x4358		/* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID	0x4359		/* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID	0x435a		/* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID	0x4363		/* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID	0x4370		/* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID		0x4374		/* 4324 802.11n dualband device */
+#define BCM43217_D11N2G_ID	0x43a9		/* 43217 802.11n 2.4GHz device */
+#define BCM43131_D11N2G_ID	0x43aa		/* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID	0x4364		/* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID	0x4365		/* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID		0x4380		/* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID	0x4381		/* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID	0x4382		/* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID	0x4383		/* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID	0x4384		/* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID	0x4385		/* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID	0x4386		/* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID	0x4387		/* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID	0x4388		/* 43341 802.11n 5G device */
+#define BCM4360_D11AC_ID	0x43a0
+#define BCM4360_D11AC2G_ID	0x43a1
+#define BCM4360_D11AC5G_ID	0x43a2
+#define BCM4345_D11AC_ID	0x43ab		/* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID	0x43ac		/* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID	0x43ad		/* 4345 802.11ac 5G device */
+#define BCM4335_D11AC_ID	0x43ae
+#define BCM4335_D11AC2G_ID	0x43af
+#define BCM4335_D11AC5G_ID	0x43b0
+#define BCM4352_D11AC_ID	0x43b1		/* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID	0x43b2		/* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID	0x43b3		/* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID	0x43ba		/* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID	0x43bb		/* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID	0x43bc		/* 43602 802.11ac 5G device */
+#define BCM4349_D11AC_ID	0x4349		/* 4349 802.11ac dualband device */
+#define BCM4349_D11AC2G_ID	0x43dd		/* 4349 802.11ac 2.4G device */
+#define BCM4349_D11AC5G_ID	0x43de		/* 4349 802.11ac 5G device */
+#define BCM4355_D11AC_ID	0x43d3		/* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID	0x43d4		/* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID	0x43d5		/* 4355 802.11ac 5G device */
+#define BCM4359_D11AC_ID	0x43d6		/* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID	0x43d7		/* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID	0x43d8		/* 4359 802.11ac 5G device */
+
+/* PCI Subsystem ID */
+#define BCM943228HMB_SSID_VEN1	0x0607
+#define BCM94313HMGBL_SSID_VEN1	0x0608
+#define BCM94313HMG_SSID_VEN1	0x0609
+#define BCM943142HM_SSID_VEN1	0x0611
+
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+
+#define BCM43242_D11N_ID	0x4367		/* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID	0x4368		/* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID	0x4369		/* 43242 802.11n 5G device */
+
+#define BCM4350_D11AC_ID	0x43a3
+#define BCM4350_D11AC2G_ID	0x43a4
+#define BCM4350_D11AC5G_ID	0x43a5
+
+#define BCM43556_D11AC_ID	0x43b7
+#define BCM43556_D11AC2G_ID	0x43b8
+#define BCM43556_D11AC5G_ID	0x43b9
+
+#define BCM43558_D11AC_ID	0x43c0
+#define BCM43558_D11AC2G_ID	0x43c1
+#define BCM43558_D11AC5G_ID	0x43c2
+
+#define BCM43566_D11AC_ID	0x43d3
+#define BCM43566_D11AC2G_ID	0x43d4
+#define BCM43566_D11AC5G_ID	0x43d5
+
+#define BCM43568_D11AC_ID	0x43d6
+#define BCM43568_D11AC2G_ID	0x43d7
+#define BCM43568_D11AC5G_ID	0x43d8
+
+#define BCM43569_D11AC_ID	0x43d9
+#define BCM43569_D11AC2G_ID	0x43da
+#define BCM43569_D11AC5G_ID	0x43db
+
+#define BCM43570_D11AC_ID	0x43d9
+#define BCM43570_D11AC2G_ID	0x43da
+#define BCM43570_D11AC5G_ID	0x43db
+
+#define BCM4354_D11AC_ID	0x43df		/* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID	0x43e0		/* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID	0x43e1		/* 4354 802.11ac 5G device */
+#define BCM43430_D11N2G_ID	0x43e2		/* 43430 802.11n 2.4G device */
+
+
+#define BCM43349_D11N_ID	0x43e6		/* 43349 802.11n dualband id */
+#define BCM43349_D11N2G_ID	0x43e7		/* 43349 802.11n 2.4Ghz band id */
+#define BCM43349_D11N5G_ID	0x43e8		/* 43349 802.11n 5Ghz band id */
+
+#define BCM4358_D11AC_ID        0x43e9          /* 4358 802.11ac dualband device */
+#define BCM4358_D11AC2G_ID      0x43ea          /* 4358 802.11ac 2.4G device */
+#define BCM4358_D11AC5G_ID      0x43eb          /* 4358 802.11ac 5G device */
+
+#define BCM4356_D11AC_ID	0x43ec		/* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID	0x43ed		/* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID	0x43ee		/* 4356 802.11ac 5G device */
+
+#define	BCMGPRS_UART_ID		0x4333		/* Uart id used by 4306/gprs card */
+#define	BCMGPRS2_UART_ID	0x4344		/* Uart id used by 4306/gprs card */
+#define FPGA_JTAGM_ID		0x43f0		/* FPGA jtagm device id */
+#define BCM_JTAGM_ID		0x43f1		/* BCM jtagm device id */
+#define SDIOH_FPGA_ID		0x43f2		/* sdio host fpga */
+#define BCM_SDIOH_ID		0x43f3		/* BCM sdio host id */
+#define SDIOD_FPGA_ID		0x43f4		/* sdio device fpga */
+#define SPIH_FPGA_ID		0x43f5		/* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID		0x43f6		/* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID		0x43f8		/* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID		0x43f9		/* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID		0x43fa		/* Standard SDIO Host Controller FPGA */
+#define	BCM4402_ENET_ID		0x4402		/* 4402 enet */
+#define	BCM4402_V90_ID		0x4403		/* 4402 v90 codec */
+#define	BCM4410_DEVICE_ID	0x4410		/* bcm44xx family pci iline */
+#define	BCM4412_DEVICE_ID	0x4412		/* bcm44xx family pci enet */
+#define	BCM4430_DEVICE_ID	0x4430		/* bcm44xx family cardbus iline */
+#define	BCM4432_DEVICE_ID	0x4432		/* bcm44xx family cardbus enet */
+#define	BCM4704_ENET_ID		0x4706		/* 4704 enet (Use 47XX_ENET_ID instead!) */
+#define	BCM4710_DEVICE_ID	0x4710		/* 4710 primary function 0 */
+#define	BCM47XX_AUDIO_ID	0x4711		/* 47xx audio codec */
+#define	BCM47XX_V90_ID		0x4712		/* 47xx v90 codec */
+#define	BCM47XX_ENET_ID		0x4713		/* 47xx enet */
+#define	BCM47XX_EXT_ID		0x4714		/* 47xx external i/f */
+#define	BCM47XX_GMAC_ID		0x4715		/* 47xx Unimac based GbE */
+#define	BCM47XX_USBH_ID		0x4716		/* 47xx usb host */
+#define	BCM47XX_USBD_ID		0x4717		/* 47xx usb device */
+#define	BCM47XX_IPSEC_ID	0x4718		/* 47xx ipsec */
+#define	BCM47XX_ROBO_ID		0x4719		/* 47xx/53xx roboswitch core */
+#define	BCM47XX_USB20H_ID	0x471a		/* 47xx usb 2.0 host */
+#define	BCM47XX_USB20D_ID	0x471b		/* 47xx usb 2.0 device */
+#define	BCM47XX_ATA100_ID	0x471d		/* 47xx parallel ATA */
+#define	BCM47XX_SATAXOR_ID	0x471e		/* 47xx serial ATA & XOR DMA */
+#define	BCM47XX_GIGETH_ID	0x471f		/* 47xx GbE (5700) */
+#define	BCM4712_MIPS_ID		0x4720		/* 4712 base devid */
+#define	BCM4716_DEVICE_ID	0x4722		/* 4716 base devid */
+#define	BCM47XX_USB30H_ID	0x472a		/* 47xx usb 3.0 host */
+#define	BCM47XX_USB30D_ID	0x472b		/* 47xx usb 3.0 device */
+#define BCM47XX_SMBUS_EMU_ID	0x47fe		/* 47xx emulated SMBus device */
+#define	BCM47XX_XOR_EMU_ID	0x47ff		/* 47xx emulated XOR engine */
+#define	EPI41210_DEVICE_ID	0xa0fa		/* bcm4210 */
+#define	EPI41230_DEVICE_ID	0xa10e		/* bcm4230 */
+#define JINVANI_SDIOH_ID	0x4743		/* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID	0x2702		/* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID	0x803b		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID	0x803c		/* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID		0x0822		/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID	0x2381		/* JMicron Standard SDIO Host Controller */
+
+/* Chip IDs */
+#define	BCM4306_CHIP_ID		0x4306		/* 4306 chipcommon chipid */
+#define	BCM4311_CHIP_ID		0x4311		/* 4311 PCIe 802.11a/b/g */
+#define	BCM43111_CHIP_ID	43111		/* 43111 chipcommon chipid (OTP chipid) */
+#define	BCM43112_CHIP_ID	43112		/* 43112 chipcommon chipid (OTP chipid) */
+#define	BCM4312_CHIP_ID		0x4312		/* 4312 chipcommon chipid */
+#define BCM4313_CHIP_ID		0x4313		/* 4313 chip id */
+#define	BCM43131_CHIP_ID	43131		/* 43131 chip id (OTP chipid) */
+#define	BCM4315_CHIP_ID		0x4315		/* 4315 chip id */
+#define	BCM4318_CHIP_ID		0x4318		/* 4318 chipcommon chipid */
+#define	BCM4319_CHIP_ID		0x4319		/* 4319 chip id */
+#define	BCM4320_CHIP_ID		0x4320		/* 4320 chipcommon chipid */
+#define	BCM4321_CHIP_ID		0x4321		/* 4321 chipcommon chipid */
+#define	BCM43217_CHIP_ID	43217		/* 43217 chip id (OTP chipid) */
+#define	BCM4322_CHIP_ID		0x4322		/* 4322 chipcommon chipid */
+#define	BCM43221_CHIP_ID	43221		/* 43221 chipcommon chipid (OTP chipid) */
+#define	BCM43222_CHIP_ID	43222		/* 43222 chipcommon chipid */
+#define	BCM43224_CHIP_ID	43224		/* 43224 chipcommon chipid */
+#define	BCM43225_CHIP_ID	43225		/* 43225 chipcommon chipid */
+#define	BCM43227_CHIP_ID	43227		/* 43227 chipcommon chipid */
+#define	BCM43228_CHIP_ID	43228		/* 43228 chipcommon chipid */
+#define	BCM43226_CHIP_ID	43226		/* 43226 chipcommon chipid */
+#define	BCM43231_CHIP_ID	43231		/* 43231 chipcommon chipid (OTP chipid) */
+#define	BCM43234_CHIP_ID	43234		/* 43234 chipcommon chipid */
+#define	BCM43235_CHIP_ID	43235		/* 43235 chipcommon chipid */
+#define	BCM43236_CHIP_ID	43236		/* 43236 chipcommon chipid */
+#define	BCM43237_CHIP_ID	43237		/* 43237 chipcommon chipid */
+#define	BCM43238_CHIP_ID	43238		/* 43238 chipcommon chipid */
+#define	BCM43239_CHIP_ID	43239		/* 43239 chipcommon chipid */
+#define	BCM43420_CHIP_ID	43420		/* 43222 chipcommon chipid (OTP, RBBU) */
+#define	BCM43421_CHIP_ID	43421		/* 43224 chipcommon chipid (OTP, RBBU) */
+#define	BCM43428_CHIP_ID	43428		/* 43228 chipcommon chipid (OTP, RBBU) */
+#define	BCM43431_CHIP_ID	43431		/* 4331  chipcommon chipid (OTP, RBBU) */
+#define	BCM43460_CHIP_ID	43460		/* 4360  chipcommon chipid (OTP, RBBU) */
+#define	BCM4325_CHIP_ID		0x4325		/* 4325 chip id */
+#define	BCM4328_CHIP_ID		0x4328		/* 4328 chip id */
+#define	BCM4329_CHIP_ID		0x4329		/* 4329 chipcommon chipid */
+#define	BCM4331_CHIP_ID		0x4331		/* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID		0x4336		/* 4336 chipcommon chipid */
+#define BCM43362_CHIP_ID	43362		/* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID		0x4330		/* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID		0x6362		/* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID		0x4314		/* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID	43142		/* 43142 chipcommon chipid */
+#define BCM43143_CHIP_ID	43143		/* 43143 chipcommon chipid */
+#define	BCM4324_CHIP_ID		0x4324		/* 4324 chipcommon chipid */
+#define	BCM43242_CHIP_ID	43242		/* 43242 chipcommon chipid */
+#define	BCM43243_CHIP_ID	43243		/* 43243 chipcommon chipid */
+#define BCM4334_CHIP_ID		0x4334		/* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID		0x4335		/* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID		0x4339		/* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID	43349			/* 43349(0xA955) chipcommon chipid */
+#define BCM4360_CHIP_ID		0x4360          /* 4360 chipcommon chipid */
+#define BCM4352_CHIP_ID		0x4352          /* 4352 chipcommon chipid */
+#define BCM43526_CHIP_ID	0xAA06
+#define BCM43340_CHIP_ID	43340		/* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID	43341		/* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID	43342		/* 43342 chipcommon chipid */
+#define BCM4350_CHIP_ID		0x4350          /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID		0x4354          /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID		0x4356          /* 4356 chipcommon chipid */
+#define BCM43556_CHIP_ID	0xAA24          /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID	0xAA26          /* 43558 chipcommon chipid */
+#define BCM43566_CHIP_ID	0xAA2E          /* 43566 chipcommon chipid */
+#define BCM43567_CHIP_ID	0xAA2F          /* 43567 chipcommon chipid */
+#define BCM43568_CHIP_ID	0xAA30          /* 43568 chipcommon chipid */
+#define BCM43569_CHIP_ID	0xAA31          /* 43569 chipcommon chipid */
+#define BCM43570_CHIP_ID	0xAA32          /* 43570 chipcommon chipid */
+#define BCM4358_CHIP_ID         0x4358          /* 4358 chipcommon chipid */
+#define BCM4350_CHIP(chipid)	((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4354_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4356_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43556_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43558_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43566_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43567_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43568_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43569_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43570_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
+#define BCM4345_CHIP_ID		0x4345		/* 4345 chipcommon chipid */
+#define BCM43430_CHIP_ID	43430		/* 43430 chipcommon chipid */
+#define BCM4349_CHIP_ID		0x4349		/* 4349 chipcommon chipid */
+#define BCM4355_CHIP_ID		0x4355		/* 4355 chipcommon chipid */
+#define BCM4359_CHIP_ID		0x4359		/* 4359 chipcommon chipid */
+#define BCM4349_CHIP(chipid)	((CHIPID(chipid) == BCM4349_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4355_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4359_CHIP_ID))
+#define BCM4349_CHIP_GRPID		BCM4349_CHIP_ID: \
+					case BCM4355_CHIP_ID: \
+					case BCM4359_CHIP_ID
+
+#define BCM43602_CHIP_ID	0xaa52		/* 43602 chipcommon chipid */
+#define BCM43462_CHIP_ID	0xa9c6		/* 43462 chipcommon chipid */
+
+#define	BCM4342_CHIP_ID		4342		/* 4342 chipcommon chipid (OTP, RBBU) */
+#define	BCM4402_CHIP_ID		0x4402		/* 4402 chipid */
+#define	BCM4704_CHIP_ID		0x4704		/* 4704 chipcommon chipid */
+#define	BCM4706_CHIP_ID		0x5300		/* 4706 chipcommon chipid */
+#define BCM4707_CHIP_ID		53010		/* 4707 chipcommon chipid */
+#define BCM53018_CHIP_ID	53018		/* 53018 chipcommon chipid */
+#define BCM4707_CHIP(chipid)	(((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID))
+#define	BCM4710_CHIP_ID		0x4710		/* 4710 chipid */
+#define	BCM4712_CHIP_ID		0x4712		/* 4712 chipcommon chipid */
+#define	BCM4716_CHIP_ID		0x4716		/* 4716 chipcommon chipid */
+#define	BCM47162_CHIP_ID	47162		/* 47162 chipcommon chipid */
+#define	BCM4748_CHIP_ID		0x4748		/* 4716 chipcommon chipid (OTP, RBBU) */
+#define	BCM4749_CHIP_ID		0x4749		/* 5357 chipcommon chipid (OTP, RBBU) */
+#define BCM4785_CHIP_ID		0x4785		/* 4785 chipcommon chipid */
+#define	BCM5350_CHIP_ID		0x5350		/* 5350 chipcommon chipid */
+#define	BCM5352_CHIP_ID		0x5352		/* 5352 chipcommon chipid */
+#define	BCM5354_CHIP_ID		0x5354		/* 5354 chipcommon chipid */
+#define BCM5365_CHIP_ID		0x5365		/* 5365 chipcommon chipid */
+#define	BCM5356_CHIP_ID		0x5356		/* 5356 chipcommon chipid */
+#define	BCM5357_CHIP_ID		0x5357		/* 5357 chipcommon chipid */
+#define	BCM53572_CHIP_ID	53572		/* 53572 chipcommon chipid */
+
+/* Package IDs */
+#define	BCM4303_PKG_ID		2		/* 4303 package id */
+#define	BCM4309_PKG_ID		1		/* 4309 package id */
+#define	BCM4712LARGE_PKG_ID	0		/* 340pin 4712 package id */
+#define	BCM4712SMALL_PKG_ID	1		/* 200pin 4712 package id */
+#define	BCM4712MID_PKG_ID	2		/* 225pin 4712 package id */
+#define BCM4328USBD11G_PKG_ID	2		/* 4328 802.11g USB package id */
+#define BCM4328USBDUAL_PKG_ID	3		/* 4328 802.11a/g USB package id */
+#define BCM4328SDIOD11G_PKG_ID	4		/* 4328 802.11g SDIO package id */
+#define BCM4328SDIODUAL_PKG_ID	5		/* 4328 802.11a/g SDIO package id */
+#define BCM4329_289PIN_PKG_ID	0		/* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID	1		/* 4329N 182-pin package id */
+#define BCM5354E_PKG_ID		1		/* 5354E package id */
+#define	BCM4716_PKG_ID		8		/* 4716 package id */
+#define	BCM4717_PKG_ID		9		/* 4717 package id */
+#define	BCM4718_PKG_ID		10		/* 4718 package id */
+#define BCM5356_PKG_NONMODE	1		/* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID		8		/* 5358U package id */
+#define BCM5358_PKG_ID		9		/* 5358 package id */
+#define BCM47186_PKG_ID		10		/* 47186 package id */
+#define BCM5357_PKG_ID		11		/* 5357 package id */
+#define BCM5356U_PKG_ID		12		/* 5356U package id */
+#define BCM53572_PKG_ID		8		/* 53572 package id */
+#define BCM5357C0_PKG_ID	8		/* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID		9		/* 47188 package id */
+#define BCM5358C0_PKG_ID	0xa		/* 5358c0 package id */
+#define BCM5356C0_PKG_ID	0xb		/* 5356c0 package id */
+#define BCM4331TT_PKG_ID        8		/* 4331 12x12 package id */
+#define BCM4331TN_PKG_ID        9		/* 4331 12x9 package id */
+#define BCM4331TNA0_PKG_ID     0xb		/* 4331 12x9 package id */
+#define	BCM4706L_PKG_ID		1		/* 4706L package id */
+
+#define HDLSIM5350_PKG_ID	1		/* HDL simulator package id for a 5350 */
+#define HDLSIM_PKG_ID		14		/* HDL simulator package id */
+#define HWSIM_PKG_ID		15		/* Hardware simulator package id */
+#define BCM43224_FAB_CSM	0x8		/* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC	0xa		/* the chip is manufactured by SMIC */
+#define BCM4336_WLBGA_PKG_ID	0x8
+#define BCM4330_WLBGA_PKG_ID	0x0
+#define BCM4314PCIE_ARM_PKG_ID		(8 | 0)	/* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID		(8 | 1)	/* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID		(8 | 2)	/* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID		(8 | 3)	/* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID	(8 | 4)	/* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID		(8 | 6)	/* 4314 Developement package id */
+
+#define BCM4707_PKG_ID		1		/* 4707 package id */
+#define BCM4708_PKG_ID		2		/* 4708 package id */
+#define BCM4709_PKG_ID		0		/* 4709 package id */
+
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID	0x8034		/* TI PCI xx21 Standard Host Controller */
+
+#define BCM4335_WLCSP_PKG_ID	(0x0)	/* WLCSP Module/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGA_PKG_ID	(0x1)	/* FCBGA PC/Embeded/Media PCIE/SDIO */
+#define BCM4335_WLBGA_PKG_ID	(0x2)	/* WLBGA COB/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGAD_PKG_ID	(0x3)	/* FCBGA Debug Debug/Dev All if's. */
+#define BCM4335_PKG_MASK	(0x3)
+
+/* boardflags */
+#define	BFL_BTC2WIRE		0x00000001  /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX      0x00000001      /* Board supports BTCOEX */
+#define	BFL_PACTRL		0x00000002  /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE	0x00000004  /* Board implements gpio 13 radio disable indication, UNUSED */
+#define	BFL_ADCDIV		0x00000008  /* Board has the rssi ADC divider */
+#define BFL_DIS_256QAM		0x00000008
+#define	BFL_ENETROBO		0x00000010  /* Board has robo switch or core */
+#define	BFL_TSSIAVG   		0x00000010  /* TSSI averaging for ACPHY chips */
+#define	BFL_NOPLLDOWN		0x00000020  /* Not ok to power down the chip pll and oscillator */
+#define	BFL_CCKHIPWR		0x00000040  /* Can do high-power CCK transmission */
+#define	BFL_ENETADM		0x00000080  /* Board has ADMtek switch */
+#define	BFL_ENETVLAN		0x00000100  /* Board has VLAN capability */
+#define	BFL_LTECOEX		0x00000200  /* LTE Coex enabled */
+#define BFL_NOPCI		0x00000400  /* Board leaves PCI floating */
+#define BFL_FEM			0x00000800  /* Board supports the Front End Module */
+#define BFL_EXTLNA		0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA		0x00002000  /* Board has a high gain PA */
+#define	BFL_BTC2WIRE_ALTGPIO	0x00004000  /* Board's BTC 2wire is in the alternate gpios */
+#define	BFL_ALTIQ		0x00008000  /* Alternate I/Q settings */
+#define BFL_NOPA		0x00010000  /* Board has no PA */
+#define BFL_RSSIINV		0x00020000  /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF		0x00040000  /* Board uses the PARef LDO */
+#define BFL_3TSWITCH		0x00080000  /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT		0x00100000  /* Board can support phase shifter */
+#define BFL_BUCKBOOST		0x00200000  /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT		0x00400000  /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK		0x00800000  /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM		0x01000000  /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO		0x02000000  /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5		0x04000000  /* Select 2.5V as LNLDO2 output voltage */
+#define BFL_FASTPWR		0x08000000
+#define BFL_UCPWRCTL_MININDX	0x08000000  /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz		0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2		0x20000000  /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01        0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz	0x40000000  /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF	0x80000000  /* Backoff InitGain based on elna_2g/5g field
+					     * when this flag is set
+					     */
+#define BFL_EXTLNA_TX	0x20000000	/* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS	0x00000001  /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR		0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN	0x00000004  /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV		0x00000008  /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN		0x00000010  /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR	0x00000020  /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD	0x00000040  /* Board is Caesers brd (unused by sw) */
+#define BFL2_BTC3WIRE		0x00000080  /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY          0x00000080  /* Board support legacy 3/4 wire, to replace
+					     * BFL2_BTC3WIRE
+					     */
+#define BFL2_SKWRKFEM_BRD	0x00000100  /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR		0x00000200  /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR		0x00000400  /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED	0x00000800  /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR	0x00002000  /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES	0x00004000  /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR	0x00008000  /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000       /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2	        0x00010000  /* Flag to widen G-band PLL loop b/w */
+#define BFL2_REDUCED_PA_TURNONTIME 0x00010000  /* Flag to reduce PA turn on Time */
+#define BFL2_IPALVLSHIFT_3P3    0x00020000
+#define BFL2_INTERNDET_TXIQCAL  0x00040000  /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN       0x00080000  /* Keep the buffered Xtal output from radio on */
+				/* Most drivers will turn it off without this flag */
+				/* to save power. */
+
+#define BFL2_ANAPACTRL_2G	0x00100000  /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G	0x00200000  /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G	0x00400000  /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0	0x00800000  /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER	0x01000000  /* The tempsense threshold can sustain higher value
+					     * than programmed. The exact delta is decided by
+					     * driver per chip/boardtype. This can be used
+					     * when tempsense qualification happens after shipment
+					     */
+#define BFL2_BTC3WIREONLY       0x02000000  /* standard 3 wire btc only.  4 wire not supported */
+#define BFL2_PWR_NOMINAL	0x04000000  /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE	0x08000000  /* boardflag to enable ucode to apply power save */
+						/* ucode control of eLNA during Tx */
+#define BFL2_4313_RADIOREG	0x10000000
+									   /*  board rework */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+
+#define BFL2_SDR_EN		0x20000000  /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G	0x40000000  /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G	0x80000000  /* acphy, enable lna1 bypass for clip gain, 5g */
+
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX  0x00000001  /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL  0x00000002  /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA	0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EPA_TURNON_TIME     0x00018000  /* 2 bits for different PA turn on times */
+#define BFL_SROM11_EPA_TURNON_TIME_SHIFT  15
+#define BFL_SROM11_EXTLNA_5GHz	0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01	0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR	0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G  0x00100000  /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G  0x00200000  /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB	  0x00000007  /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR		  0x00000008  /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID	  0x00000070  /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT	  0x4         /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR	  0x00000080  /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT	  0x7         /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM    0x00000100  /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8         /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G           0x00000200  /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G           0x00000400  /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT          0x00000800  /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT    11          /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS	  0x00001000  /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN      0x00002000  /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK	  0x00004000  /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14       /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK	  0x00008000  /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15       /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA	  0x00010000  /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16       /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN           0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR          0x00080000  /* acphy, enable spur WAR in 5G band */
+#define BFL3_1X1_RSDB_ANT	  0x01000000  /* to find if 2-ant RSDB board or 1-ant RSDB board */
+#define BFL3_1X1_RSDB_ANT_SHIFT           24
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G	  0x00300000  /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT	  20
+
+#define BFL3_ACPHY_LPMODE_5G	  0x00C00000  /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT	  22
+
+#define BFL3_EXT_LPO_ISCLOCK      0x02000000  /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL    0x04000000  /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL    0x08000000  /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF        0x10000000  /* acphy, Allow BRCM Implicit TxBF */
+#define BFL3_AVVMID_FROM_NVRAM    0x40000000  /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM    0x80000000  /* Read Vlin En from NVRAM  */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT   30   /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT   31   /* Enable Vlin  from NVRAM  */
+
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define	BOARD_GPIO_BTC3W_IN	0x850	/* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define	BOARD_GPIO_BTC3W_OUT	0x020	/* bit 5 is TX_CONF */
+#define	BOARD_GPIO_BTCMOD_IN	0x010	/* bit 4 is the alternate BT Coexistence Input */
+#define	BOARD_GPIO_BTCMOD_OUT	0x020	/* bit 5 is the alternate BT Coexistence Out */
+#define	BOARD_GPIO_BTC_IN	0x080	/* bit 7 is BT Coexistence Input */
+#define	BOARD_GPIO_BTC_OUT	0x100	/* bit 8 is BT Coexistence Out */
+#define	BOARD_GPIO_PACTRL	0x200	/* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12		0x1000	/* gpio 12 */
+#define BOARD_GPIO_13		0x2000	/* gpio 13 */
+#define BOARD_GPIO_BTC4_IN	0x0800	/* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT	0x2000	/* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT	0x4000	/* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN	0x8000	/* gpio 15, coex4, wlan active */
+#define	BOARD_GPIO_1_WLAN_PWR	0x02	/* throttle WLAN power on X21 board */
+#define	BOARD_GPIO_2_WLAN_PWR	0x04	/* throttle WLAN power on X29C board */
+#define	BOARD_GPIO_3_WLAN_PWR	0x08	/* throttle WLAN power on X28 board */
+#define	BOARD_GPIO_4_WLAN_PWR	0x10	/* throttle WLAN power on X19 board */
+
+#define GPIO_BTC4W_OUT_4312  0x010  /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED  0x0e0  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225  0x0e0  /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313  0x060  /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED  0x010  /* GPIO 4  */
+
+#define	PCI_CFG_GPIO_SCS	0x10	/* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD	0x20	/* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL	0x40	/* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL	0x80	/* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY		150		/* us pll on delay */
+#define FREF_DELAY		200		/* us fref change delay */
+#define MIN_SLOW_CLK		32		/* us Slow clock period */
+#define	XTAL_ON_DELAY		1000		/* us crystal power-on delay */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Reference Board Types */
+#define	BU4710_BOARD		0x0400
+#define	VSIM4710_BOARD		0x0401
+#define	QT4710_BOARD		0x0402
+
+#define	BU4309_BOARD		0x040a
+#define	BCM94309CB_BOARD	0x040b
+#define	BCM94309MP_BOARD	0x040c
+#define	BCM4309AP_BOARD		0x040d
+
+#define	BCM94302MP_BOARD	0x040e
+
+#define	BU4306_BOARD		0x0416
+#define	BCM94306CB_BOARD	0x0417
+#define	BCM94306MP_BOARD	0x0418
+
+#define	BCM94710D_BOARD		0x041a
+#define	BCM94710R1_BOARD	0x041b
+#define	BCM94710R4_BOARD	0x041c
+#define	BCM94710AP_BOARD	0x041d
+
+#define	BU2050_BOARD		0x041f
+
+#define	BCM94306P50_BOARD	0x0420
+
+#define	BCM94309G_BOARD		0x0421
+
+#define	BU4704_BOARD		0x0423
+#define	BU4702_BOARD		0x0424
+
+#define	BCM94306PC_BOARD	0x0425		/* pcmcia 3.3v 4306 card */
+
+#define	MPSG4306_BOARD		0x0427
+
+#define	BCM94702MN_BOARD	0x0428
+
+/* BCM4702 1U CompactPCI Board */
+#define	BCM94702CPCI_BOARD	0x0429
+
+/* BCM4702 with BCM95380 VLAN Router */
+#define	BCM95380RR_BOARD	0x042a
+
+/* cb4306 with SiGe PA */
+#define	BCM94306CBSG_BOARD	0x042b
+
+/* cb4306 with SiGe PA */
+#define	PCSG94306_BOARD		0x042d
+
+/* bu4704 with sdram */
+#define	BU4704SD_BOARD		0x042e
+
+/* Dual 11a/11g Router */
+#define	BCM94704AGR_BOARD	0x042f
+
+/* 11a-only minipci */
+#define	BCM94308MP_BOARD	0x0430
+
+/* 4306/gprs combo */
+#define	BCM94306GPRS_BOARD	0x0432
+
+/* BCM5365/BCM4704 FPGA Bringup Board */
+#define BU5365_FPGA_BOARD	0x0433
+
+#define BU4712_BOARD		0x0444
+#define	BU4712SD_BOARD		0x045d
+#define	BU4712L_BOARD		0x045f
+
+/* BCM4712 boards */
+#define BCM94712AP_BOARD	0x0445
+#define BCM94712P_BOARD		0x0446
+
+/* BCM4318 boards */
+#define BU4318_BOARD		0x0447
+#define CB4318_BOARD		0x0448
+#define MPG4318_BOARD		0x0449
+#define MP4318_BOARD		0x044a
+#define SD4318_BOARD		0x044b
+
+/* BCM4313 boards */
+#define BCM94313BU_BOARD	0x050f
+#define BCM94313HM_BOARD	0x0510
+#define BCM94313EPA_BOARD	0x0511
+#define BCM94313HMG_BOARD       0x051C
+
+/* BCM63XX boards */
+#define BCM96338_BOARD		0x6338
+#define BCM96348_BOARD		0x6348
+#define BCM96358_BOARD		0x6358
+#define BCM96368_BOARD		0x6368
+
+/* Another mp4306 with SiGe */
+#define	BCM94306P_BOARD		0x044c
+
+/* mp4303 */
+#define	BCM94303MP_BOARD	0x044e
+
+/* mpsgh4306 */
+#define	BCM94306MPSGH_BOARD	0x044f
+
+/* BRCM 4306 w/ Front End Modules */
+#define BCM94306MPM		0x0450
+#define BCM94306MPL		0x0453
+
+/* 4712agr */
+#define	BCM94712AGR_BOARD	0x0451
+
+/* pcmcia 4303 */
+#define	PC4303_BOARD		0x0454
+
+/* 5350K */
+#define	BCM95350K_BOARD		0x0455
+
+/* 5350R */
+#define	BCM95350R_BOARD		0x0456
+
+/* 4306mplna */
+#define	BCM94306MPLNA_BOARD	0x0457
+
+/* 4320 boards */
+#define	BU4320_BOARD		0x0458
+#define	BU4320S_BOARD		0x0459
+#define	BCM94320PH_BOARD	0x045a
+
+/* 4306mph */
+#define	BCM94306MPH_BOARD	0x045b
+
+/* 4306pciv */
+#define	BCM94306PCIV_BOARD	0x045c
+
+#define	BU4712SD_BOARD		0x045d
+
+#define	BCM94320PFLSH_BOARD	0x045e
+
+#define	BU4712L_BOARD		0x045f
+#define	BCM94712LGR_BOARD	0x0460
+#define	BCM94320R_BOARD		0x0461
+
+#define	BU5352_BOARD		0x0462
+
+#define	BCM94318MPGH_BOARD	0x0463
+
+#define	BU4311_BOARD		0x0464
+#define	BCM94311MC_BOARD	0x0465
+#define	BCM94311MCAG_BOARD	0x0466
+
+#define	BCM95352GR_BOARD	0x0467
+
+/* bcm95351agr */
+#define	BCM95351AGR_BOARD	0x0470
+
+/* bcm94704mpcb */
+#define	BCM94704MPCB_BOARD	0x0472
+
+/* 4785 boards */
+#define BU4785_BOARD		0x0478
+
+/* 4321 boards */
+#define BU4321_BOARD		0x046b
+#define BU4321E_BOARD		0x047c
+#define MP4321_BOARD		0x046c
+#define CB2_4321_BOARD		0x046d
+#define CB2_4321_AG_BOARD	0x0066
+#define MC4321_BOARD		0x046e
+
+/* 4328 boards */
+#define BU4328_BOARD		0x0481
+#define BCM4328SDG_BOARD	0x0482
+#define BCM4328SDAG_BOARD	0x0483
+#define BCM4328UG_BOARD		0x0484
+#define BCM4328UAG_BOARD	0x0485
+#define BCM4328PC_BOARD		0x0486
+#define BCM4328CF_BOARD		0x0487
+
+/* 4325 boards */
+#define BCM94325DEVBU_BOARD	0x0490
+#define BCM94325BGABU_BOARD	0x0491
+
+#define BCM94325SDGWB_BOARD	0x0492
+
+#define BCM94325SDGMDL_BOARD	0x04aa
+#define BCM94325SDGMDL2_BOARD	0x04c6
+#define BCM94325SDGMDL3_BOARD	0x04c9
+
+#define BCM94325SDABGWBA_BOARD	0x04e1
+
+/* 4322 boards */
+#define BCM94322MC_SSID		0x04a4
+#define BCM94322USB_SSID	0x04a8	/* dualband */
+#define BCM94322HM_SSID		0x04b0
+#define BCM94322USB2D_SSID	0x04bf	/* single band discrete front end */
+
+/* 4312 boards */
+#define	BCM4312MCGSG_BOARD	0x04b5
+
+/* 4315 boards */
+#define BCM94315DEVBU_SSID	0x04c2
+#define BCM94315USBGP_SSID	0x04c7
+#define BCM94315BGABU_SSID	0x04ca
+#define BCM94315USBGP41_SSID	0x04cb
+
+/* 4319 boards */
+#define BCM94319DEVBU_SSID	0X04e5
+#define BCM94319USB_SSID	0X04e6
+#define BCM94319SD_SSID		0X04e7
+
+/* 4716 boards */
+#define BCM94716NR2_SSID	0x04cd
+
+/* 4319 boards */
+#define BCM94319DEVBU_SSID	0X04e5
+#define BCM94319USBNP4L_SSID	0X04e6
+#define BCM94319WLUSBN4L_SSID	0X04e7
+#define BCM94319SDG_SSID	0X04ea
+#define BCM94319LCUSBSDN4L_SSID	0X04eb
+#define BCM94319USBB_SSID       0x04ee
+#define BCM94319LCSDN4L_SSID	0X0507
+#define BCM94319LSUSBN4L_SSID	0X0508
+#define BCM94319SDNA4L_SSID	0X0517
+#define BCM94319SDELNA4L_SSID	0X0518
+#define BCM94319SDELNA6L_SSID	0X0539
+#define BCM94319ARCADYAN_SSID	0X0546
+#define BCM94319WINDSOR_SSID    0x0561
+#define BCM94319MLAP_SSID       0x0562
+#define BCM94319SDNA_SSID       0x058b
+#define BCM94319BHEMU3_SSID     0x0563
+#define BCM94319SDHMB_SSID     0x058c
+#define BCM94319SDBREF_SSID     0x05a1
+#define BCM94319USBSDB_SSID     0x05a2
+
+
+/* 4329 boards */
+#define BCM94329AGB_SSID	0X04b9
+#define BCM94329TDKMDL1_SSID	0X04ba
+#define BCM94329TDKMDL11_SSID	0X04fc
+#define BCM94329OLYMPICN18_SSID	0X04fd
+#define BCM94329OLYMPICN90_SSID	0X04fe
+#define BCM94329OLYMPICN90U_SSID 0X050c
+#define BCM94329OLYMPICN90M_SSID 0X050b
+#define BCM94329AGBF_SSID	0X04ff
+#define BCM94329OLYMPICX17_SSID	0X0504
+#define BCM94329OLYMPICX17M_SSID	0X050a
+#define BCM94329OLYMPICX17U_SSID	0X0509
+#define BCM94329OLYMPICUNO_SSID	0X0564
+#define BCM94329MOTOROLA_SSID   0X0565
+#define BCM94329OLYMPICLOCO_SSID	0X0568
+/* 4336 SDIO board types */
+#define BCM94336SD_WLBGABU_SSID		0x0511
+#define BCM94336SD_WLBGAREF_SSID	0x0519
+#define BCM94336SDGP_SSID	0x0538
+#define BCM94336SDG_SSID	0x0519
+#define BCM94336SDGN_SSID	0x0538
+#define BCM94336SDGFC_SSID	0x056B
+
+/* 4330 SDIO board types */
+#define BCM94330SDG_SSID	0x0528
+#define BCM94330SD_FCBGABU_SSID	0x052e
+#define BCM94330SD_WLBGABU_SSID	0x052f
+#define BCM94330SD_FCBGA_SSID	0x0530
+#define BCM94330FCSDAGB_SSID		0x0532
+#define BCM94330OLYMPICAMG_SSID		0x0549
+#define BCM94330OLYMPICAMGEPA_SSID		0x054F
+#define BCM94330OLYMPICUNO3_SSID	0x0551
+#define BCM94330WLSDAGB_SSID	0x0547
+#define BCM94330CSPSDAGBB_SSID	0x054A
+
+/* 43224 boards */
+#define BCM943224X21        0x056e
+#define BCM943224X21_FCC    0x00d1
+#define BCM943224X21B	    0x00e9
+#define BCM943224M93	    0x008b
+#define BCM943224M93A	    0x0090
+#define BCM943224X16	    0x0093
+#define BCM94322X9		    0x008d
+#define BCM94322M35e	    0x008e
+
+/* 43228 Boards */
+#define BCM943228BU8_SSID	0x0540
+#define BCM943228BU9_SSID	0x0541
+#define BCM943228BU_SSID	0x0542
+#define BCM943227HM4L_SSID	0x0543
+#define BCM943227HMB_SSID	0x0544
+#define BCM943228HM4L_SSID	0x0545
+#define BCM943228SD_SSID	0x0573
+
+/* 43239 Boards */
+#define BCM943239MOD_SSID	0x05ac
+#define BCM943239REF_SSID	0x05aa
+
+/* 4331 boards */
+#define BCM94331X19               0x00D6	/* X19B */
+#define BCM94331X28               0x00E4	/* X28 */
+#define BCM94331X28B              0x010E	/* X28B */
+#define BCM94331PCIEBT3Ax_SSID    BCM94331X28
+#define BCM94331X12_2G_SSID       0x00EC	/* X12 2G */
+#define BCM94331X12_5G_SSID       0x00ED	/* X12 5G */
+#define BCM94331X29B              0x00EF	/* X29B */
+#define BCM94331X29D              0x010F	/* X29D */
+#define BCM94331CSAX_SSID         BCM94331X29B
+#define BCM94331X19C              0x00F5	/* X19C */
+#define BCM94331X33	          0x00F4	/* X33 */
+#define BCM94331BU_SSID           0x0523
+#define BCM94331S9BU_SSID         0x0524
+#define BCM94331MC_SSID           0x0525
+#define BCM94331MCI_SSID          0x0526
+#define BCM94331PCIEBT4_SSID      0x0527
+#define BCM94331HM_SSID           0x0574
+#define BCM94331PCIEDUAL_SSID     0x059B
+#define BCM94331MCH5_SSID         0x05A9
+#define BCM94331CS_SSID           0x05C6
+#define BCM94331CD_SSID           0x05DA
+
+/* 4314 Boards */
+#define BCM94314BU_SSID         0x05b1
+
+/* 53572 Boards */
+#define BCM953572BU_SSID       0x058D
+#define BCM953572NR2_SSID      0x058E
+#define BCM947188NR2_SSID      0x058F
+#define BCM953572SDRNR2_SSID   0x0590
+
+/* 43236 boards */
+#define BCM943236OLYMPICSULLEY_SSID 0x594
+#define BCM943236PREPROTOBLU2O3_SSID 0x5b9
+#define BCM943236USBELNA_SSID 0x5f8
+
+/* 4314 Boards */
+#define BCM94314BUSDIO_SSID	0x05c8
+#define BCM94314BGABU_SSID	0x05c9
+#define BCM94314HMEPA_SSID	0x05ca
+#define BCM94314HMEPABK_SSID	0x05cb
+#define BCM94314SUHMEPA_SSID	0x05cc
+#define BCM94314SUHM_SSID	0x05cd
+#define BCM94314HM_SSID		0x05d1
+
+/* 4334 Boards */
+#define BCM94334FCAGBI_SSID	0x05df
+#define BCM94334WLAGBI_SSID	0x05dd
+
+/* 4335 Boards */
+#define BCM94335X52             0x0114
+
+/* 4345 Boards */
+#define BCM94345_SSID           0x0687
+
+/* 4360 Boards */
+#define BCM94360X52C            0X0117
+#define BCM94360X52D            0X0137
+#define BCM94360X29C            0X0112
+#define BCM94360X29CP2          0X0134
+#define BCM94360X29CP3          0X013B
+#define BCM94360X51             0x0111
+#define BCM94360X51P2           0x0129
+#define BCM94360X51P3           0x0142
+#define BCM94360X51A            0x0135
+#define BCM94360X51B            0x0136
+#define BCM94360CS              0x061B
+#define BCM94360J28_D11AC2G     0x0c00
+#define BCM94360J28_D11AC5G     0x0c01
+#define BCM94360USBH5_D11AC5G   0x06aa
+#define BCM94360MCM5            0x06d8
+
+/* 4350 Boards */
+#define BCM94350X52B            0X0116
+#define BCM94350X14             0X0131
+
+/* 43217 Boards */
+#define BCM943217BU_SSID	0x05d5
+#define BCM943217HM2L_SSID	0x05d6
+#define BCM943217HMITR2L_SSID	0x05d7
+
+/* 43142 Boards */
+#define BCM943142HM_SSID	0x05e0
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID	0x062d
+
+/* 43342 Boards */
+#define BCM943342FCAGBI_SSID	0x0641
+
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID	0x06a5
+#define BCM943602RSVD2_SSID	0x06a6
+#define BCM943602X87            0X0133
+#define BCM943602X238           0X0132
+
+/* # of GPIO pins */
+#define GPIO_NUMPINS		32
+
+/* These values are used by dhd host driver. */
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329  0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+#define RDL_RAM_SIZE_4360  0xA0000
+#define RDL_RAM_BASE_4360  0x60000000
+#define RDL_RAM_SIZE_43242  0x90000
+#define RDL_RAM_BASE_43242  0x60000000
+#define RDL_RAM_SIZE_43143  0x70000
+#define RDL_RAM_BASE_43143  0x60000000
+#define RDL_RAM_SIZE_4350  0xC0000
+#define RDL_RAM_BASE_4350  0x180800
+
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
+#define MUXENAB_UART		0x00000001
+#define MUXENAB_GPIO		0x00000002
+#define MUXENAB_ERCX		0x00000004	/* External Radio BT coex */
+#define MUXENAB_JTAG		0x00000008
+#define MUXENAB_HOST_WAKE	0x00000010	/* configure GPIO for SDIO host_wake */
+#define MUXENAB_I2S_EN		0x00000020
+#define MUXENAB_I2S_MASTER	0x00000040
+#define MUXENAB_I2S_FULL	0x00000080
+#define MUXENAB_SFLASH		0x00000100
+#define MUXENAB_RFSWCTRL0	0x00000200
+#define MUXENAB_RFSWCTRL1	0x00000400
+#define MUXENAB_RFSWCTRL2	0x00000800
+#define MUXENAB_SECI		0x00001000
+#define MUXENAB_BT_LEGACY	0x00002000
+#define MUXENAB_HOST_WAKE1	0x00004000	/* configure alternative GPIO for SDIO host_wake */
+
+/* Boot flags */
+#define FLASH_KERNEL_NFLASH	0x00000001
+#define FLASH_BOOT_NFLASH	0x00000002
+
+#endif /* _BCMDEVS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 0000000000000000000000000000000000000000..5dbf675f9ca3fd0b747b7211ea4f562bb0584ccd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,311 @@
+/*
+ * Byte order utilities
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ *  $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+/* Reverse the bytes in a 16-bit value */
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+/* Reverse the bytes in a 32-bit value */
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+/* Reverse the two 16-bit halves of a 32-bit value */
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+	((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+	          (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+	          (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+	          (((uint64)(val) & 0x00000000ff000000ULL) <<  8) | \
+	          (((uint64)(val) & 0x000000ff00000000ULL) >>  8) | \
+	          (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+	          (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+	          (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+	((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+	          (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+
+/* Byte swapping macros
+ *    Host <=> Network (Big Endian) for 16- and 32-bit values
+ *    Host <=> Little-Endian for 16- and 32-bit values
+ */
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define	hton16(i) bcmswap16(i)
+#define	HTON32(i) BCMSWAP32(i)
+#define	hton32(i) bcmswap32(i)
+#define	NTOH16(i) BCMSWAP16(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	NTOH32(i) BCMSWAP32(i)
+#define	ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#endif /* hton16 */
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+/* Unaligned loads and stores in host byte order */
+#define load32_ua(a)		ltoh32_ua(a)
+#define store32_ua(a, v)	htol32_ua_store(v, a)
+#define load16_ua(a)		ltoh16_ua(a)
+#define store16_ua(a, v)	htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#ifdef __GNUC__
+
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
+
+#define bcmswap16(val) ({ \
+	uint16 _val = (val); \
+	BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32(_val); \
+})
+
+#define bcmswap64(val) ({ \
+	uint64 _val = (val); \
+	BCMSWAP64(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+	uint16 *_buf = (uint16 *)(buf); \
+	uint _wds = (len) / 2; \
+	while (_wds--) { \
+		*_buf = bcmswap16(*_buf); \
+		_buf++; \
+	} \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = (_val >> 8) & 0xff; \
+	_bytes[2] = (_val >> 16) & 0xff; \
+	_bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 8; \
+	_bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 24; \
+	_bytes[1] = (_val >> 16) & 0xff; \
+	_bytes[2] = (_val >> 8) & 0xff; \
+	_bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH32_UA(_bytes); \
+})
+
+#else /* !__GNUC__ */
+
+/* Inline versions avoid referencing the argument multiple times */
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+	return BCMSWAP64(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf	- start of buffer of shorts to swap */
+/* len  - byte length of buffer */
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
new file mode 100644
index 0000000000000000000000000000000000000000..fc014ae89bda48f5c0174b7ded71cdb621c5ba84
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
@@ -0,0 +1,728 @@
+/*
+ * MSGBUF network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmmsgbuf.h 490808 2014-07-12 00:33:13Z $
+ */
+#ifndef _bcmmsgbuf_h_
+#define	_bcmmsgbuf_h_
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <bcmpcie.h>
+
+#define MSGBUF_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+#define D2H_EPOCH_MODULO			253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL			(D2H_EPOCH_MODULO + 1)
+
+#define H2DRING_TXPOST_ITEMSIZE		48
+#define H2DRING_RXPOST_ITEMSIZE		32
+#define H2DRING_CTRL_SUB_ITEMSIZE	40
+#define D2HRING_TXCMPLT_ITEMSIZE	16
+#define D2HRING_RXCMPLT_ITEMSIZE	32
+#define D2HRING_CTRL_CMPLT_ITEMSIZE	24
+
+#define H2DRING_TXPOST_MAX_ITEM			512
+#define H2DRING_RXPOST_MAX_ITEM			256
+#define H2DRING_CTRL_SUB_MAX_ITEM		20
+#define D2HRING_TXCMPLT_MAX_ITEM		1024
+#define D2HRING_RXCMPLT_MAX_ITEM		256
+#define D2HRING_CTRL_CMPLT_MAX_ITEM		20
+enum {
+	DNGL_TO_HOST_MSGBUF,
+	HOST_TO_DNGL_MSGBUF
+};
+
+enum {
+	HOST_TO_DNGL_TXP_DATA,
+	HOST_TO_DNGL_RXP_DATA,
+	HOST_TO_DNGL_CTRL,
+	DNGL_TO_HOST_DATA,
+	DNGL_TO_HOST_CTRL
+};
+
+#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
+
+#ifdef PCIE_API_REV1
+
+#define BCMMSGBUF_DUMMY_REF(a, b)	do {BCM_REFERENCE((a));BCM_REFERENCE((b));}  while (0)
+
+#define BCMMSGBUF_API_IFIDX(a)		0
+#define BCMMSGBUF_API_SEQNUM(a)		0
+#define BCMMSGBUF_IOCTL_XTID(a)		0
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->cmd_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID(a) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+
+#else /* PCIE_API_REV1 */
+
+#define BCMMSGBUF_API_IFIDX(a)		((a)->if_id)
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->pkt_id)
+#define BCMMSGBUF_API_SEQNUM(a)		((a)->u.seq.seq_no)
+#define BCMMSGBUF_IOCTL_XTID(a)		((a)->xt_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	(BCMMSGBUF_API_IFIDX((a)) = (b))
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	(BCMMSGBUF_API_SEQNUM((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	(BCMMSGBUF_IOCTL_XTID((a)) = (b))
+
+#endif /* PCIE_API_REV1 */
+
+/* utility data structures */
+union addr64 {
+	struct {
+		uint32 low;
+		uint32 high;
+	};
+	struct {
+		uint32 low_addr;
+		uint32 high_addr;
+	};
+	uint64 u64;
+} DECLSPEC_ALIGN(8);
+
+typedef union addr64 addr64_t;
+
+/* IOCTL req Hdr */
+/* cmn Msg Hdr */
+typedef struct cmn_msg_hdr {
+	/* message type */
+	uint8 msg_type;
+	/* interface index this is valid for */
+	uint8 if_id;
+	/* flags */
+	uint8 flags;
+	/* sequence number */
+	uint8 epoch;
+	/* packet Identifier for the associated host buffer */
+	uint32 request_id;
+} cmn_msg_hdr_t;
+
+/* message type */
+typedef enum bcmpcie_msgtype {
+	MSG_TYPE_GEN_STATUS 		= 0x1,
+	MSG_TYPE_RING_STATUS		= 0x2,
+	MSG_TYPE_FLOW_RING_CREATE	= 0x3,
+	MSG_TYPE_FLOW_RING_CREATE_CMPLT	= 0x4,
+	MSG_TYPE_FLOW_RING_DELETE	= 0x5,
+	MSG_TYPE_FLOW_RING_DELETE_CMPLT	= 0x6,
+	MSG_TYPE_FLOW_RING_FLUSH	= 0x7,
+	MSG_TYPE_FLOW_RING_FLUSH_CMPLT	= 0x8,
+	MSG_TYPE_IOCTLPTR_REQ		= 0x9,
+	MSG_TYPE_IOCTLPTR_REQ_ACK	= 0xA,
+	MSG_TYPE_IOCTLRESP_BUF_POST	= 0xB,
+	MSG_TYPE_IOCTL_CMPLT		= 0xC,
+	MSG_TYPE_EVENT_BUF_POST		= 0xD,
+	MSG_TYPE_WL_EVENT		= 0xE,
+	MSG_TYPE_TX_POST		= 0xF,
+	MSG_TYPE_TX_STATUS		= 0x10,
+	MSG_TYPE_RXBUF_POST		= 0x11,
+	MSG_TYPE_RX_CMPLT		= 0x12,
+	MSG_TYPE_LPBK_DMAXFER 		= 0x13,
+	MSG_TYPE_LPBK_DMAXFER_CMPLT	= 0x14,
+	MSG_TYPE_API_MAX_RSVD		= 0x3F
+} bcmpcie_msg_type_t;
+
+typedef enum bcmpcie_msgtype_int {
+	MSG_TYPE_INTERNAL_USE_START	= 0x40,
+	MSG_TYPE_EVENT_PYLD		= 0x41,
+	MSG_TYPE_IOCT_PYLD		= 0x42,
+	MSG_TYPE_RX_PYLD		= 0x43,
+	MSG_TYPE_HOST_FETCH		= 0x44,
+	MSG_TYPE_LPBK_DMAXFER_PYLD	= 0x45,
+	MSG_TYPE_TXMETADATA_PYLD	= 0x46,
+	MSG_TYPE_HOSTDMA_PTRS		= 0x47
+} bcmpcie_msgtype_int_t;
+
+typedef enum bcmpcie_msgtype_u {
+	MSG_TYPE_TX_BATCH_POST		= 0x80,
+	MSG_TYPE_IOCTL_REQ		= 0x81,
+	MSG_TYPE_HOST_EVNT		= 0x82,
+	MSG_TYPE_LOOPBACK		= 0x83
+} bcmpcie_msgtype_u_t;
+
+
+/* if_id */
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT	5
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX	0x7
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT	0
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX	0x1F
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+
+/* flags */
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX		0x1
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR	0x2
+#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT		0x80
+
+
+/* IOCTL request message */
+typedef struct ioctl_req_msg {
+	/* common message header */
+	cmn_msg_hdr_t 	cmn_hdr;
+
+	/* ioctl command type */
+	uint32		cmd;
+	/* ioctl transaction ID, to pair with a ioctl response */
+	uint16		trans_id;
+	/* input arguments buffer len */
+	uint16		input_buf_len;
+	/* expected output len */
+	uint16		output_buf_len;
+	/* to aling the host address on 8 byte boundary */
+	uint16		rsvd[3];
+	/* always aling on 8 byte boundary */
+	addr64_t	host_input_buf_addr;
+	/* rsvd */
+	uint32		rsvd1[2];
+} ioctl_req_msg_t;
+
+/* buffer post messages for device to use to return IOCTL responses, Events */
+typedef struct ioctl_resp_evt_buf_post_msg {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* length of the host buffer supplied */
+	uint16		host_buf_len;
+	/* to aling the host address on 8 byte boundary */
+	uint16		reserved[3];
+	/* always aling on 8 byte boundary */
+	addr64_t	host_buf_addr;
+	uint32		rsvd[4];
+} ioctl_resp_evt_buf_post_msg_t;
+
+
+typedef struct pcie_dma_xfer_params {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+
+	/* always aling on 8 byte boundary */
+	addr64_t	host_input_buf_addr;
+
+	/* always aling on 8 byte boundary */
+	addr64_t	host_ouput_buf_addr;
+
+	/* length of transfer */
+	uint32		xfer_len;
+	/* delay before doing the src txfer */
+	uint32		srcdelay;
+	/* delay before doing the dest txfer */
+	uint32		destdelay;
+	uint32		rsvd;
+} pcie_dma_xfer_params_t;
+
+/* Complete msgbuf hdr for flow ring update from host to dongle */
+typedef struct tx_flowring_create_request {
+	cmn_msg_hdr_t   msg;
+	uint8	da[ETHER_ADDR_LEN];
+	uint8	sa[ETHER_ADDR_LEN];
+	uint8	tid;
+	uint8 	if_flags;
+	uint16	flow_ring_id;
+	uint8 	tc;
+	uint8	priority;
+	uint16 	int_vector;
+	uint16	max_items;
+	uint16	len_item;
+	addr64_t flow_ring_ptr;
+} tx_flowring_create_request_t;
+
+typedef struct tx_flowring_delete_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_delete_request_t;
+
+typedef struct tx_flowring_flush_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_flush_request_t;
+
+typedef union ctrl_submit_item {
+	ioctl_req_msg_t			ioctl_req;
+	ioctl_resp_evt_buf_post_msg_t	resp_buf_post;
+	pcie_dma_xfer_params_t		dma_xfer;
+	tx_flowring_create_request_t	flow_create;
+	tx_flowring_delete_request_t	flow_delete;
+	tx_flowring_flush_request_t	flow_flush;
+	unsigned char			check[H2DRING_CTRL_SUB_ITEMSIZE];
+} ctrl_submit_item_t;
+
+/* Control Completion messages (20 bytes) */
+typedef struct compl_msg_hdr {
+	/* status for the completion */
+	int16	status;
+	/* submisison flow ring id which generated this status */
+	uint16	flow_ring_id;
+} compl_msg_hdr_t;
+
+/* XOR checksum or a magic number to audit DMA done */
+typedef uint32 dma_done_t;
+
+/* completion header status codes */
+#define	BCMPCIE_SUCCESS			0
+#define BCMPCIE_NOTFOUND		1
+#define BCMPCIE_NOMEM			2
+#define BCMPCIE_BADOPTION		3
+#define BCMPCIE_RING_IN_USE		4
+#define BCMPCIE_RING_ID_INVALID		5
+#define BCMPCIE_PKT_FLUSH		6
+#define BCMPCIE_NO_EVENT_BUF		7
+#define BCMPCIE_NO_RX_BUF		8
+#define BCMPCIE_NO_IOCTLRESP_BUF	9
+#define BCMPCIE_MAX_IOCTLRESP_BUF	10
+#define BCMPCIE_MAX_EVENT_BUF		11
+
+/* IOCTL completion response */
+typedef struct ioctl_compl_resp_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* response buffer len where a host buffer is involved */
+	uint16			resp_len;
+	/* transaction id to pair with a request */
+	uint16			trans_id;
+	/* cmd id */
+	uint32			cmd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_comp_resp_msg_t;
+
+/* IOCTL request acknowledgement */
+typedef struct ioctl_req_ack_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t 	compl_hdr;
+	/* cmd id */
+	uint32			cmd;
+	uint32			rsvd[1];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_req_ack_msg_t;
+
+/* WL event message: send from device to host */
+typedef struct wlevent_req_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* event data len valid with the event buffer */
+	uint16			event_data_len;
+	/* sequence number */
+	uint16			seqnum;
+	/* rsvd	*/
+	uint32			rsvd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} wlevent_req_msg_t;
+
+/* dma xfer complete message */
+typedef struct pcie_dmaxfer_cmplt {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_dmaxfer_cmplt_t;
+
+/* general status message */
+typedef struct pcie_gen_status {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_gen_status_t;
+
+/* ring status message */
+typedef struct pcie_ring_status {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* message which firmware couldn't decode */
+	uint16			write_idx;
+	uint16			rsvd[3];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_ring_status_t;
+
+typedef struct tx_flowring_create_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_create_response_t;
+typedef struct tx_flowring_delete_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_delete_response_t;
+
+typedef struct tx_flowring_flush_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_flush_response_t;
+
+/* Common layout of all d2h control messages */
+typedef struct ctrl_compl_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ctrl_compl_msg_t;
+
+typedef union ctrl_completion_item {
+	ioctl_comp_resp_msg_t		ioctl_resp;
+	wlevent_req_msg_t		event;
+	ioctl_req_ack_msg_t		ioct_ack;
+	pcie_dmaxfer_cmplt_t		pcie_xfer_cmplt;
+	pcie_gen_status_t		pcie_gen_status;
+	pcie_ring_status_t		pcie_ring_status;
+	tx_flowring_create_response_t	txfl_create_resp;
+	tx_flowring_delete_response_t	txfl_delete_resp;
+	tx_flowring_flush_response_t	txfl_flush_resp;
+	ctrl_compl_msg_t		ctrl_compl;
+	unsigned char		check[D2HRING_CTRL_CMPLT_ITEMSIZE];
+} ctrl_completion_item_t;
+
+/* H2D Rxpost ring work items */
+typedef struct host_rxbuf_post {
+	/* common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/* provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/* provided data buffer len to receive data */
+	uint16		data_buf_len;
+	/* alignment to make the host buffers start on 8 byte boundary */
+	uint32		rsvd;
+	/* provided meta data buffer */
+	addr64_t	metadata_buf_addr;
+	/* provided data buffer to receive data */
+	addr64_t	data_buf_addr;
+} host_rxbuf_post_t;
+
+typedef union rxbuf_submit_item {
+	host_rxbuf_post_t	rxpost;
+	unsigned char		check[H2DRING_RXPOST_ITEMSIZE];
+} rxbuf_submit_item_t;
+
+
+/* D2H Rxcompletion ring work items */
+typedef struct host_rxbuf_cmpl {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t	compl_hdr;
+	/*  filled up meta data len */
+	uint16		metadata_len;
+	/* filled up buffer len to receive data */
+	uint16		data_len;
+	/* offset in the host rx buffer where the data starts */
+	uint16		data_offset;
+	/* offset in the host rx buffer where the data starts */
+	uint16		flags;
+	/* rx status */
+	uint32		rx_status_0;
+	uint32		rx_status_1;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} host_rxbuf_cmpl_t;
+
+typedef union rxbuf_complete_item {
+	host_rxbuf_cmpl_t	rxcmpl;
+	unsigned char		check[D2HRING_RXCMPLT_ITEMSIZE];
+} rxbuf_complete_item_t;
+
+
+typedef struct host_txbuf_post {
+	/* common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/* eth header */
+	uint8		txhdr[ETHER_HDR_LEN];
+	/* flags */
+	uint8		flags;
+	/* number of segments */
+	uint8		seg_cnt;
+
+	/* provided meta data buffer for txstatus */
+	addr64_t	metadata_buf_addr;
+	/* provided data buffer to receive data */
+	addr64_t	data_buf_addr;
+	/* provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/* provided data buffer len to receive data */
+	uint16		data_len;
+	uint32		rsvd;
+} host_txbuf_post_t;
+
+#define BCMPCIE_PKT_FLAGS_FRAME_802_3	0x01
+#define BCMPCIE_PKT_FLAGS_FRAME_802_11	0x02
+
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK	0x03	/* Exempt uses 2 bits */
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT	0x02	/* needs to be shifted past other bits */
+
+
+#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT		5
+#define BCMPCIE_PKT_FLAGS_PRIO_MASK		(7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
+
+/* These are added to fix up teh compile issues */
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3	BCMPCIE_PKT_FLAGS_FRAME_802_3
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11	BCMPCIE_PKT_FLAGS_FRAME_802_11
+#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT		BCMPCIE_PKT_FLAGS_PRIO_SHIFT
+#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK		BCMPCIE_PKT_FLAGS_PRIO_MASK
+
+/* H2D Txpost ring work items */
+typedef union txbuf_submit_item {
+	host_txbuf_post_t	txpost;
+	unsigned char		check[H2DRING_TXPOST_ITEMSIZE];
+} txbuf_submit_item_t;
+
+/* D2H Txcompletion ring work items */
+typedef struct host_txbuf_cmpl {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t	compl_hdr;
+	union {
+		struct {
+			/* provided meta data len */
+			uint16	metadata_len;
+			/* WLAN side txstatus */
+			uint16	tx_status;
+		};
+		/* XOR checksum or a magic number to audit DMA done */
+		dma_done_t		marker;
+	};
+} host_txbuf_cmpl_t;
+
+typedef union txbuf_complete_item {
+	host_txbuf_cmpl_t	txcmpl;
+	unsigned char		check[D2HRING_TXCMPLT_ITEMSIZE];
+} txbuf_complete_item_t;
+
+#define BCMPCIE_D2H_METADATA_HDRLEN	4
+#define BCMPCIE_D2H_METADATA_MINLEN	(BCMPCIE_D2H_METADATA_HDRLEN + 4)
+
+/* ret buf struct */
+typedef struct ret_buf_ptr {
+	uint32 low_addr;
+	uint32 high_addr;
+} ret_buf_t;
+
+#ifdef PCIE_API_REV1
+/* ioctl specific hdr */
+typedef struct ioctl_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint32		cmd_id;
+} ioctl_hdr_t;
+typedef struct ioctlptr_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		rsvd;
+	uint32		cmd_id;
+} ioctlptr_hdr_t;
+#else /* PCIE_API_REV1 */
+typedef struct ioctl_req_hdr {
+	uint32		pkt_id; /* Packet ID */
+	uint32 		cmd; /* IOCTL ID */
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		xt_id; /* transaction ID */
+	uint16		rsvd[1];
+} ioctl_req_hdr_t;
+#endif /* PCIE_API_REV1 */
+
+
+/* Complete msgbuf hdr for ioctl from host to dongle */
+typedef struct ioct_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctl_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+} ioct_reqst_hdr_t;
+typedef struct ioctptr_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctlptr_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+	ret_buf_t ioct_buf;
+} ioctptr_reqst_hdr_t;
+
+/* ioctl response header */
+typedef struct ioct_resp_hdr {
+	cmn_msg_hdr_t   msg;
+#ifdef PCIE_API_REV1
+	uint32	cmd_id;
+#else
+	uint32	pkt_id;
+#endif
+	uint32	status;
+	uint32	ret_len;
+	uint32  inline_data;
+#ifdef PCIE_API_REV1
+#else
+	uint16	xt_id;	/* transaction ID */
+	uint16	rsvd[1];
+#endif
+} ioct_resp_hdr_t;
+
+/* ioct resp header used in dongle */
+/* ret buf hdr will be stripped off inside dongle itself */
+typedef struct msgbuf_ioctl_resp {
+	ioct_resp_hdr_t	ioct_hdr;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} msgbuf_ioct_resp_t;
+
+/* WL evet hdr info */
+typedef struct wl_event_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 event;
+	uint8 flags;
+	uint8 rsvd;
+	uint16 retbuf_len;
+	uint16 rsvd1;
+	uint32 rxbufid;
+} wl_event_hdr_t;
+
+#define TXDESCR_FLOWID_PCIELPBK_1	0xFF
+#define TXDESCR_FLOWID_PCIELPBK_2	0xFE
+
+typedef struct txbatch_lenptr_tup {
+	uint32 pktid;
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} txbatch_lenptr_tup_t;
+
+typedef struct txbatch_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 pktcnt;
+	uint8 flowid;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+} txbatch_cmn_msghdr_t;
+
+typedef struct txbatch_msghdr {
+	txbatch_cmn_msghdr_t txcmn;
+	txbatch_lenptr_tup_t tx_tup[0]; /* Based on packet count */
+} txbatch_msghdr_t;
+
+/* TX desc posting header */
+typedef struct tx_lenptr_tup {
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} tx_lenptr_tup_t;
+
+typedef struct txdescr_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 descrcnt;
+	uint8 flowid;
+	uint32 pktid;
+} txdescr_cmn_msghdr_t;
+
+typedef struct txdescr_msghdr {
+	txdescr_cmn_msghdr_t txcmn;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+	tx_lenptr_tup_t tx_tup[0]; /* Based on descriptor count */
+} txdescr_msghdr_t;
+
+/* Tx status header info */
+typedef struct txstatus_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 pktid;
+} txstatus_hdr_t;
+/* RX bufid-len-ptr tuple */
+typedef struct rx_lenptr_tup {
+	uint32 rxbufid;
+	uint16 len;
+	uint16 rsvd2;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} rx_lenptr_tup_t;
+/* Rx descr Post hdr info */
+typedef struct rxdesc_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint8 rsvd1;
+	uint8 descnt;
+	rx_lenptr_tup_t rx_tup[0];
+} rxdesc_msghdr_t;
+
+/* RX complete tuples */
+typedef struct rxcmplt_tup {
+	uint16 retbuf_len;
+	uint16 data_offset;
+	uint32 rxstatus0;
+	uint32 rxstatus1;
+	uint32 rxbufid;
+} rxcmplt_tup_t;
+/* RX complete messge hdr */
+typedef struct rxcmplt_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint16 rxcmpltcnt;
+	rxcmplt_tup_t rx_tup[0];
+} rxcmplt_hdr_t;
+typedef struct hostevent_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 evnt_pyld;
+} hostevent_hdr_t;
+
+typedef struct dma_xfer_params {
+	uint32 src_physaddr_hi;
+	uint32 src_physaddr_lo;
+	uint32 dest_physaddr_hi;
+	uint32 dest_physaddr_lo;
+	uint32 len;
+	uint32 srcdelay;
+	uint32 destdelay;
+} dma_xfer_params_t;
+
+enum {
+	HOST_EVENT_CONS_CMD = 1
+};
+
+/* defines for flags */
+#define MSGBUF_IOC_ACTION_MASK 0x1
+
+#endif /* _bcmmsgbuf_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
new file mode 100644
index 0000000000000000000000000000000000000000..14ccdcda2fea4e5ddd70e1fb267ef82968e5f5f4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
@@ -0,0 +1,197 @@
+/*
+ * Broadcom PCIE
+ * Software-specific definitions shared between device and host side
+ * Explains the shared area between host and dongle
+ * $Copyright Open 2005 Broadcom Corporation$
+ *
+ * $Id: bcmpcie.h 490808 2014-07-12 00:33:13Z $
+ */
+
+#ifndef	_bcmpcie_h_
+#define	_bcmpcie_h_
+
+#include <bcmutils.h>
+
+#define ADDR_64(x)			(x.addr)
+#define HIGH_ADDR_32(x)     ((uint32) (((sh_addr_t) x).high_addr))
+#define LOW_ADDR_32(x)      ((uint32) (((sh_addr_t) x).low_addr))
+
+typedef struct {
+	uint32 low_addr;
+	uint32 high_addr;
+} sh_addr_t;
+
+
+
+#ifdef BCMPCIE_SUPPORT_TX_PUSH_RING
+#define BCMPCIE_PUSH_TX_RING	1
+#else
+#define BCMPCIE_PUSH_TX_RING	0
+#endif /* BCMPCIE_SUPPORT_TX_PUSH_RING */
+
+/* May be overridden by 43xxxxx-roml.mk */
+#if !defined(BCMPCIE_MAX_TX_FLOWS)
+#define BCMPCIE_MAX_TX_FLOWS	40
+#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+
+#define PCIE_SHARED_VERSION		0x00005
+#define PCIE_SHARED_VERSION_MASK	0x000FF
+#define PCIE_SHARED_ASSERT_BUILT	0x00100
+#define PCIE_SHARED_ASSERT		0x00200
+#define PCIE_SHARED_TRAP		0x00400
+#define PCIE_SHARED_IN_BRPT		0x00800
+#define PCIE_SHARED_SET_BRPT		0x01000
+#define PCIE_SHARED_PENDING_BRPT	0x02000
+#define PCIE_SHARED_TXPUSH_SPRT		0x04000
+#define PCIE_SHARED_EVT_SEQNUM		0x08000
+#define PCIE_SHARED_DMA_INDEX		0x10000
+
+#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT		0
+#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT		1
+#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE		2
+#define BCMPCIE_D2H_MSGRING_TX_COMPLETE			3
+#define BCMPCIE_D2H_MSGRING_RX_COMPLETE			4
+#define BCMPCIE_COMMON_MSGRING_MAX_ID			4
+
+/* Added only for single tx ring */
+#define BCMPCIE_H2D_TXFLOWRINGID			5
+
+#define BCMPCIE_H2D_COMMON_MSGRINGS			2
+#define BCMPCIE_D2H_COMMON_MSGRINGS			3
+#define BCMPCIE_COMMON_MSGRINGS				5
+
+enum h2dring_idx {
+	BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = 0,
+	BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =	1,
+	BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = 2
+};
+
+enum d2hring_idx {
+	BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = 0,
+	BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = 1,
+	BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = 2
+};
+
+typedef struct ring_mem {
+	uint16		idx;
+	uint8		type;
+	uint8		rsvd;
+	uint16		max_item;
+	uint16		len_items;
+	sh_addr_t	base_addr;
+} ring_mem_t;
+
+#define RINGSTATE_INITED	1
+
+typedef struct ring_state {
+	uint8 idx;
+	uint8 state;
+	uint16 r_offset;
+	uint16 w_offset;
+	uint16 e_offset;
+} ring_state_t;
+
+
+
+typedef struct ring_info {
+	/* locations in the TCM where the ringmem is and ringstate are defined */
+	uint32		ringmem_ptr;	/* ring mem location in TCM */
+	uint32		h2d_w_idx_ptr;
+
+	uint32		h2d_r_idx_ptr;
+	uint32		d2h_w_idx_ptr;
+
+	uint32		d2h_r_idx_ptr;
+	/* host locations where the DMA of read/write indices are */
+	sh_addr_t	h2d_w_idx_hostaddr;
+	sh_addr_t	h2d_r_idx_hostaddr;
+	sh_addr_t	d2h_w_idx_hostaddr;
+	sh_addr_t	d2h_r_idx_hostaddr;
+	uint16		max_sub_queues;
+	uint16		rsvd;
+} ring_info_t;
+
+typedef struct {
+	/* shared area version captured at flags 7:0 */
+	uint32	flags;
+
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hnd_cons_t */
+
+	uint32  msgtrace_addr;
+
+	uint32  fwid;
+
+	/* Used for debug/flow control */
+	uint16  total_lfrag_pkt_cnt;
+	uint16  max_host_rxbufs; /* rsvd in spec */
+
+	uint32 dma_rxoffset; /* rsvd in spec */
+
+	/* these will be used for sleep request/ack, d3 req/ack */
+	uint32  h2d_mb_data_ptr;
+	uint32  d2h_mb_data_ptr;
+
+	/* information pertinent to host IPC/msgbuf channels */
+	/* location in the TCM memory which has the ring_info */
+	uint32	rings_info_ptr;
+
+	/* block of host memory for the scratch buffer */
+	uint32		host_dma_scratch_buffer_len;
+	sh_addr_t	host_dma_scratch_buffer;
+
+	/* block of host memory for the dongle to push the status into */
+	uint32		device_rings_stsblk_len;
+	sh_addr_t	device_rings_stsblk;
+#ifdef BCM_BUZZZ
+	uint32	buzzz;	/* BUZZZ state format strings and trace buffer */
+#endif
+} pciedev_shared_t;
+
+
+/* H2D mail box Data */
+#define H2D_HOST_D3_INFORM	0x00000001
+#define H2D_HOST_DS_ACK		0x00000002
+#define H2D_HOST_CONS_INT	0x80000000	/* h2d int for console cmds  */
+
+/* D2H mail box Data */
+#define D2H_DEV_D3_ACK		0x00000001
+#define D2H_DEV_DS_ENTER_REQ	0x00000002
+#define D2H_DEV_DS_EXIT_NOTE	0x00000004
+#define D2H_DEV_FWHALT		0x10000000
+
+
+extern pciedev_shared_t pciedev_shared;
+#define NEXTTXP(i, d)           ((((i)+1) >= (d)) ? 0 : ((i)+1))
+#define NTXPACTIVE(r, w, d)     (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
+#define NTXPAVAIL(r, w, d)      (((d) - NTXPACTIVE((r), (w), (d))) > 1)
+
+/* Function can be used to notify host of FW halt */
+#define READ_AVAIL_SPACE(w, r, d)		\
+			((w >= r) ? (w - r) : (d - r))
+
+#define WRT_PEND(x)	((x)->wr_pending)
+#define DNGL_RING_WPTR(msgbuf)		(*((msgbuf)->tcm_rs_w_ptr))
+#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a)	(DNGL_RING_WPTR(msgbuf) = (a))
+
+#define DNGL_RING_RPTR(msgbuf)		(*((msgbuf)->tcm_rs_r_ptr))
+#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a)	(DNGL_RING_RPTR(msgbuf) = (a))
+
+#define  RING_READ_PTR(x)	((x)->ringstate->r_offset)
+#define  RING_WRITE_PTR(x)	((x)->ringstate->w_offset)
+#define  RING_START_PTR(x)	((x)->ringmem->base_addr.low_addr)
+#define  RING_MAX_ITEM(x)	((x)->ringmem->max_item)
+#define  RING_LEN_ITEMS(x)	((x)->ringmem->len_items)
+#define	 HOST_RING_BASE(x)	((x)->ring_base.va)
+#define	 HOST_RING_END(x)	((uint8 *)HOST_RING_BASE((x)) + \
+			 ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
+
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)		((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d)	(d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d)	\
+	MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d))
+
+#endif	/* _bcmpcie_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 0000000000000000000000000000000000000000..d95f8127166b84639dd920ed9266d635bcf85d09
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,163 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef	_BCM_PCI_SPI_H
+#define	_BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	uint32 spih_ctrl;		/* 0x00 SPI Control Register */
+	uint32 spih_stat;		/* 0x04 SPI Status Register */
+	uint32 spih_data;		/* 0x08 SPI Data Register, 32-bits wide */
+	uint32 spih_ext;		/* 0x0C SPI Extension Register */
+	uint32 PAD[4];			/* 0x10-0x1F PADDING */
+
+	uint32 spih_gpio_ctrl;		/* 0x20 SPI GPIO Control Register */
+	uint32 spih_gpio_data;		/* 0x24 SPI GPIO Data Register */
+	uint32 PAD[6];			/* 0x28-0x3F PADDING */
+
+	uint32 spih_int_edge;		/* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+	uint32 spih_int_pol;		/* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+							/* 1=Active High) */
+	uint32 spih_int_mask;		/* 0x48 SPI Interrupt Mask */
+	uint32 spih_int_status;		/* 0x4C SPI Interrupt Status */
+	uint32 PAD[4];			/* 0x50-0x5F PADDING */
+
+	uint32 spih_hex_disp;		/* 0x60 SPI 4-digit hex display value */
+	uint32 spih_current_ma;		/* 0x64 SPI SD card current consumption in mA */
+	uint32 PAD[1];			/* 0x68 PADDING */
+	uint32 spih_disp_sel;		/* 0x6c SPI 4-digit hex display mode select (1=current) */
+	uint32 PAD[4];			/* 0x70-0x7F PADDING */
+	uint32 PAD[8];			/* 0x80-0x9F PADDING */
+	uint32 PAD[8];			/* 0xA0-0xBF PADDING */
+	uint32 spih_pll_ctrl;	/* 0xC0 PLL Control Register */
+	uint32 spih_pll_status;	/* 0xC4 PLL Status Register */
+	uint32 spih_xtal_freq;	/* 0xC8 External Clock Frequency in units of 10000Hz */
+	uint32 spih_clk_count;	/* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+	uint32 cfg_space[0x40];		/* 0x000-0x0FF PCI Configuration Space (Read Only) */
+	uint32 P_IMG_CTRL0;		/* 0x100 PCI Image0 Control Register */
+
+	uint32 P_BA0;			/* 0x104 32 R/W PCI Image0 Base Address register */
+	uint32 P_AM0;			/* 0x108 32 R/W PCI Image0 Address Mask register */
+	uint32 P_TA0;			/* 0x10C 32 R/W PCI Image0 Translation Address register */
+	uint32 P_IMG_CTRL1;		/* 0x110 32 R/W PCI Image1 Control register */
+	uint32 P_BA1;			/* 0x114 32 R/W PCI Image1 Base Address register */
+	uint32 P_AM1;			/* 0x118 32 R/W PCI Image1 Address Mask register */
+	uint32 P_TA1;			/* 0x11C 32 R/W PCI Image1 Translation Address register */
+	uint32 P_IMG_CTRL2;		/* 0x120 32 R/W PCI Image2 Control register */
+	uint32 P_BA2;			/* 0x124 32 R/W PCI Image2 Base Address register */
+	uint32 P_AM2;			/* 0x128 32 R/W PCI Image2 Address Mask register */
+	uint32 P_TA2;			/* 0x12C 32 R/W PCI Image2 Translation Address register */
+	uint32 P_IMG_CTRL3;		/* 0x130 32 R/W PCI Image3 Control register */
+	uint32 P_BA3;			/* 0x134 32 R/W PCI Image3 Base Address register */
+	uint32 P_AM3;			/* 0x138 32 R/W PCI Image3 Address Mask register */
+	uint32 P_TA3;			/* 0x13C 32 R/W PCI Image3 Translation Address register */
+	uint32 P_IMG_CTRL4;		/* 0x140 32 R/W PCI Image4 Control register */
+	uint32 P_BA4;			/* 0x144 32 R/W PCI Image4 Base Address register */
+	uint32 P_AM4;			/* 0x148 32 R/W PCI Image4 Address Mask register */
+	uint32 P_TA4;			/* 0x14C 32 R/W PCI Image4 Translation Address register */
+	uint32 P_IMG_CTRL5;		/* 0x150 32 R/W PCI Image5 Control register */
+	uint32 P_BA5;			/* 0x154 32 R/W PCI Image5 Base Address register */
+	uint32 P_AM5;			/* 0x158 32 R/W PCI Image5 Address Mask register */
+	uint32 P_TA5;			/* 0x15C 32 R/W PCI Image5 Translation Address register */
+	uint32 P_ERR_CS;		/* 0x160 32 R/W PCI Error Control and Status register */
+	uint32 P_ERR_ADDR;		/* 0x164 32 R PCI Erroneous Address register */
+	uint32 P_ERR_DATA;		/* 0x168 32 R PCI Erroneous Data register */
+
+	uint32 PAD[5];			/* 0x16C-0x17F PADDING */
+
+	uint32 WB_CONF_SPC_BAR;		/* 0x180 32 R WISHBONE Configuration Space Base Address */
+	uint32 W_IMG_CTRL1;		/* 0x184 32 R/W WISHBONE Image1 Control register */
+	uint32 W_BA1;			/* 0x188 32 R/W WISHBONE Image1 Base Address register */
+	uint32 W_AM1;			/* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+	uint32 W_TA1;			/* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+	uint32 W_IMG_CTRL2;		/* 0x194 32 R/W WISHBONE Image2 Control register */
+	uint32 W_BA2;			/* 0x198 32 R/W WISHBONE Image2 Base Address register */
+	uint32 W_AM2;			/* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+	uint32 W_TA2;			/* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+	uint32 W_IMG_CTRL3;		/* 0x1A4 32 R/W WISHBONE Image3 Control register */
+	uint32 W_BA3;			/* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+	uint32 W_AM3;			/* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+	uint32 W_TA3;			/* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+	uint32 W_IMG_CTRL4;		/* 0x1B4 32 R/W WISHBONE Image4 Control register */
+	uint32 W_BA4;			/* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+	uint32 W_AM4;			/* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+	uint32 W_TA4;			/* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+	uint32 W_IMG_CTRL5;		/* 0x1C4 32 R/W WISHBONE Image5 Control register */
+	uint32 W_BA5;			/* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+	uint32 W_AM5;			/* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+	uint32 W_TA5;			/* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+	uint32 W_ERR_CS;		/* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+	uint32 W_ERR_ADDR;		/* 0x1D8 32 R WISHBONE Erroneous Address register */
+	uint32 W_ERR_DATA;		/* 0x1DC 32 R WISHBONE Erroneous Data register */
+	uint32 CNF_ADDR;		/* 0x1E0 32 R/W Configuration Cycle register */
+	uint32 CNF_DATA;		/* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+	uint32 INT_ACK;			/* 0x1E8 32 R Interrupt Acknowledge register */
+	uint32 ICR;			/* 0x1EC 32 R/W Interrupt Control register */
+	uint32 ISR;			/* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN		(1 << 0)	/* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN	(1 << 1)	/* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN	(1 << 2)	/* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN	(1 << 3)	/* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN	(1 << 4)	/* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET	(1U << 31)	/* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST		(1 << 0)	/* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST	(1 << 1)	/* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST	(1 << 2)	/* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST	(1 << 3)	/* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST	(1 << 4)	/* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR		(1 << 0)	/* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR		(1 << 1)	/* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR		(1 << 2)	/* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS			(1 << 0)	/* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER		(1 << 1)	/* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT	(1 << 2)	/* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK		0x30		/* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT	4		/* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL		(1 << 3)	/* SPI Write FIFO Full */
+#define SPIH_WFEMPTY		(1 << 2)	/* SPI Write FIFO Empty */
+#define SPIH_RFFULL		(1 << 1)	/* SPI Read FIFO Full */
+#define SPIH_RFEMPTY		(1 << 0)	/* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK		(1U << 31)	/* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK		(1 << 1)	/* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED		(1 << 3)	/* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND		0xf4240		/* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 0000000000000000000000000000000000000000..39cfc4516d4f7a3d5e6c3ddb3237aefa81bd416c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,18 @@
+/*
+ * Performance counters software interface.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 0000000000000000000000000000000000000000..a494ca28bde96b134a1e08d84ae4a15239471fe9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,125 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+/* Max number of glommed pkts */
+#ifdef CUSTOM_MAX_TXGLOM_SIZE
+#define SDPCM_MAXGLOM_SIZE  CUSTOM_MAX_TXGLOM_SIZE
+#else
+#define SDPCM_MAXGLOM_SIZE	40
+#endif /* CUSTOM_MAX_TXGLOM_SIZE */
+
+#define SDPCM_TXGLOM_CPY 0			/* SDIO 2.0 should use copy mode */
+#define SDPCM_TXGLOM_MDESC	1		/* SDIO 3.0 should use multi-desc mode */
+
+#ifdef CUSTOM_DEF_TXGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE  CUSTOM_DEF_TXGLOM_SIZE
+#else
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif /* CUSTOM_DEF_TXGLOM_SIZE */
+
+#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
+#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
+#undef SDPCM_DEFGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+
+
+#if defined(BCMSDIOH_STD)
+	#define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 0000000000000000000000000000000000000000..aeb7dc944d5049a9e0d43df36df251e3e78c75e0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,239 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * $ Copyright Open License Broadcom Corporation $
+ *
+ * $Id: bcmsdh.h 450676 2014-01-22 22:45:13Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \
+	defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+#if 0 && (NDISVER >= 0x0630) && 1
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
+	void **regsva, uint irq, shared_info_t *sh);
+#else
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+	void	*os_cxt;        /* Pointer to per-OS private data */
+};
+#endif 
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
+extern void bcmsdh_glom_clear(void *sdh);
+extern uint bcmsdh_set_mode(void *sdh, uint mode);
+extern bool bcmsdh_glom_enabled(void);
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE	0x8	/* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+/* callback functions */
+typedef struct {
+	/* probe the device */
+	void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* remove the device */
+	void (*remove)(void *context);
+	/* can we suspend now */
+	int (*suspend)(void *context);
+	/* resume from suspend */
+	int (*resume)(void *context);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+#endif 
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000000000000000000000000000000000000..69d8e7a8cae4a9ffd59c96aa156e6d3dcd722c8d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,117 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h 408158 2013-06-17 22:15:35Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR			0x100	/* Get rid of this! */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES	32
+
+struct sdioh_info {
+	osl_t		*osh;			/* osh handler */
+	void		*bcmsdh;		/* upper layer handle */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+
+	int		intrcount;		/* Client interrupts */
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	bool		use_rxchain;
+	struct scatterlist	sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+	struct sdio_func	fake_func0;
+	struct sdio_func	*func[SDIOD_MAX_IOFUNCS];
+
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 0000000000000000000000000000000000000000..932686c619fe8853207e8f8689fb6c2f994afffa
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,260 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * $Copyright Open 2005 Broadcom Corporation$
+ *
+ * $Id: bcmsdpcm.h 472405 2014-04-23 23:46:55Z $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL      (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL    (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON	(1 << 0)	/* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE	(1 << 1)	/* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	0x01	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	0x02	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		0x04	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	0x08	/* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT		0x10	/* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN		4	/* Generally: Cmd(1), Ext(1), Len(2);
+						 * Semantics of Ext byte depend on command.
+						 * Len is current or requested frame length, not
+						 * including test header; sent little-endian.
+						 */
+#define SDPCM_TEST_PKT_CNT_FLD_LEN	4	/* Packet count filed legth */
+#define SDPCM_TEST_DISCARD		0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ		0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP		0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST		0x04	/* Receiver to send a burst. Ext is a frame count
+						 * (Backward compatabilty) Set frame count in a
+						 * 4 byte filed adjacent to the HDR
+						 */
+#define SDPCM_TEST_SEND			0x05	/* Receiver sets send mode. Ext is boolean on/off
+						 * Set frame count in a 4 byte filed adjacent to
+						 * the HDR
+						 */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0001
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+#define SDPCM_SHARED_IN_BRPT       0x0800
+#define SDPCM_SHARED_SET_BRPT      0x1000
+#define SDPCM_SHARED_PENDING_BRPT  0x2000
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hnd_cons_t */
+	uint32  msgtrace_addr;
+	uint32  fwid;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 0000000000000000000000000000000000000000..9c082ec1364f247ab9f8a51fa8f90434b477a2e6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,117 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ */
+#ifndef	_BCM_SD_SPI_H
+#define	_BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 0000000000000000000000000000000000000000..c1562a6eb3c343dbc6422f2a70f39421d9cc3337
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,264 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: bcmsdstd.h 455390 2014-02-13 22:14:56Z $
+ */
+#ifndef	_BCM_SD_STD_H
+#define	_BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd)	do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+#define HC_INTR_RETUNING	0x1000
+
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE	(40+1)
+
+typedef struct glom_buf {
+	uint32 count;				/* Total number of pkts queued */
+	void *dma_buf_arr[SDIOH_MAXGLOM_SIZE];	/* Frame address */
+	ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+	uint16 nbytes[SDIOH_MAXGLOM_SIZE];	/* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	void		*bcmsdh;		/* handler to upper layer stack (bcmsdh) */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	int		intrcount;		/* Client interrupts */
+	int		local_intrcount;	/* Controller interrupts */
+	bool		host_init_done;		/* Controller initted */
+	bool		card_init_done;		/* Client SDIO interface initted */
+	bool		polled_mode;		/* polling for command completion */
+
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32		data_xfer_count;	/* Current transfer */
+	uint16		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int 	host_UHSISupported;		/* whether UHSI is supported for HC. */
+	int 	card_UHSI_voltage_Supported; 	/* whether UHSI is supported for
+						 * Card in terms of Voltage [1.8 or 3.3].
+						 */
+	int	global_UHSI_Supp;	/* type of UHSI support in both host and card.
+					 * HOST_SDR_UNSUPP: capabilities not supported/matched
+					 * HOST_SDR_12_25: SDR12 and SDR25 supported
+					 * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+					 */
+	volatile int	sd3_dat_state; 		/* data transfer state used for retuning check */
+	volatile int	sd3_tun_state; 		/* tuning state used for retuning check */
+	bool	sd3_tuning_reqd; 	/* tuning requirement parameter */
+	uint32	caps3;			/* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+	glom_buf_t glom_info;		/* pkt information used for glomming */
+	uint	txglom_mode;		/* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 			0
+#define TUNING_START 			1
+#define TUNING_START_AFTER_DAT 	2
+#define TUNING_ONGOING 			3
+
+#define DATA_TRANSFER_IDLE 		0
+#define DATA_TRANSFER_ONGOING	1
+
+#define CHECK_TUNING_PRE_DATA	1
+#define CHECK_TUNING_POST_DATA	2
+
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING  0x00
+#endif
+
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 0000000000000000000000000000000000000000..bb0ee1503ee2963b221becedf2a459e3c7e1c032
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,22 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef	_BCM_SPI_H
+#define	_BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 0000000000000000000000000000000000000000..f0efe3d560a3b4270721641cda22d0fa8a3a5dab
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,1145 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmutils.h 490808 2014-07-12 00:33:13Z $
+ */
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src)            strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count)    strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src)            strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef PKTQ_LOG
+#include <wlioctl.h>
+#endif
+
+/* ctype replacement */
+#define _BCM_U	0x01	/* upper */
+#define _BCM_L	0x02	/* lower */
+#define _BCM_D	0x04	/* digit */
+#define _BCM_C	0x08	/* cntrl */
+#define _BCM_P	0x10	/* punct */
+#define _BCM_S	0x20	/* white space (space/lf/tab) */
+#define _BCM_X	0x40	/* hex digit */
+#define _BCM_SP	0x80	/* hard space (0x20) */
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
+
+struct bcmstrbuf {
+	char *buf;	/* pointer to current position in origbuf */
+	unsigned int size;	/* current (residual) size in bytes */
+	char *origbuf;	/* unmodified pointer to orignal buffer */
+	unsigned int origsize;	/* unmodified orignal buffer size in bytes */
+};
+
+/* ** driver-only section ** */
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	/* Pin not defined */
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD	10
+#endif
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+	while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
+		OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+		countdown -= SPINWAIT_POLL_PERIOD; \
+	} \
+}
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+#define BCM_MAC_RXCPL_IDX_BITS			12
+#define BCM_MAX_RXCPL_IDX_INVALID		0
+#define BCM_MAC_RXCPL_IFIDX_BITS		3
+#define BCM_MAC_RXCPL_DOT11_BITS		1
+#define BCM_MAX_RXCPL_IFIDX			((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1)
+#define BCM_MAC_RXCPL_FLAG_BITS			4
+#define BCM_RXCPL_FLAGS_IN_TRANSIT		0x1
+#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST	0x2
+#define BCM_RXCPL_FLAGS_RXCPLVALID		0x4
+#define BCM_RXCPL_FLAGS_RSVD			0x8
+
+#define BCM_RXCPL_SET_IN_TRANSIT(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_CLR_IN_TRANSIT(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_IN_TRANSIT(a)		((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT)
+
+#define BCM_RXCPL_SET_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+
+#define BCM_RXCPL_SET_VALID_INFO(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_CLR_VALID_INFO(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE)
+
+
+struct reorder_rxcpl_id_list {
+	uint16 head;
+	uint16 tail;
+	uint32 cnt;
+};
+
+typedef struct rxcpl_id {
+	uint32		idx : BCM_MAC_RXCPL_IDX_BITS;
+	uint32		next_idx : BCM_MAC_RXCPL_IDX_BITS;
+	uint32		ifidx : BCM_MAC_RXCPL_IFIDX_BITS;
+	uint32		dot11 : BCM_MAC_RXCPL_DOT11_BITS;
+	uint32		flags : BCM_MAC_RXCPL_FLAG_BITS;
+} rxcpl_idx_id_t;
+
+typedef struct rxcpl_data_len {
+	uint32		metadata_len_w : 6;
+	uint32		dataoffset: 10;
+	uint32		datalen : 16;
+} rxcpl_data_len_t;
+
+typedef struct rxcpl_info {
+	rxcpl_idx_id_t		rxcpl_id;
+	uint32		host_pktref;
+	union {
+		rxcpl_data_len_t	rxcpl_len;
+		struct rxcpl_info	*free_next;
+	};
+} rxcpl_info_t;
+
+/* rx completion list */
+typedef struct bcm_rxcplid_list {
+	uint32			max;
+	uint32			avail;
+	rxcpl_info_t		*rxcpl_ptr;
+	rxcpl_info_t		*free_list;
+} bcm_rxcplid_list_t;
+
+extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max);
+extern rxcpl_info_t * bcm_alloc_rxcplinfo(void);
+extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr);
+extern void bcm_chain_rxcplid(uint16 first,  uint16 next);
+extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id);
+extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr);
+extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count);
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktdataoffset(osl_t *osh, void *p,  uint offset);
+extern void *pktoffset(osl_t *osh, void *p,  uint offset);
+
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define	PKTPRIO_VDSCP	0x100		/* DSCP prio found after VLAN tag */
+#define	PKTPRIO_VLAN	0x200		/* VLAN prio found */
+#define	PKTPRIO_UPD	0x400		/* DSCP used to update VLAN prio */
+#define	PKTPRIO_DSCP	0x800		/* DSCP prio found */
+
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
+#define DSCP_AF11	0x0A
+#define DSCP_AF12	0x0C
+#define DSCP_AF13	0x0E
+/* AF2x: Low-Latency Data (RFC2597) */
+#define DSCP_AF21	0x12
+#define DSCP_AF22	0x14
+#define DSCP_AF23	0x16
+/* AF3x: Multimedia Streaming (RFC2597) */
+#define DSCP_AF31	0x1A
+#define DSCP_AF32	0x1C
+#define DSCP_AF33	0x1E
+/* EF: Telephony (RFC3246) */
+#define DSCP_EF		0x2E
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
+
+/* string */
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+/* ethernet address */
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+/* ip address */
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
+
+/* delay */
+extern void bcm_mdelay(uint ms);
+/* variable access */
+#define NVRAM_RECLAIM_CHECK(name)
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define TSF_TICKS_PER_MS	1000
+#define TS_ENTER		0xdeadbeef	/* Timestamp profiling enter */
+#define TS_EXIT			0xbeefcafe	/* Timestamp profiling exit */
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(buf, size)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry.  Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+typedef struct bcm_iovar {
+	const char *name;	/* name for lookup and display */
+	uint16 varid;		/* id for switch */
+	uint16 flags;		/* driver-specific flag bits */
+	uint16 type;		/* base type of argument */
+	uint16 minlen;		/* min length for buffer vars */
+} bcm_iovar_t;
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id)		((id) * 2)
+#define IOV_SVAL(id)		((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid)	(actionid >> 1)
+
+/* flags are per-driver based on driver attributes */
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif 
+#endif	/* BCMDRIVER */
+
+/* Base type definitions */
+#define IOVT_VOID	0	/* no value (implictly set only) */
+#define IOVT_BOOL	1	/* any value ok (zero/nonzero) */
+#define IOVT_INT8	2	/* integer values are range-checked */
+#define IOVT_UINT8	3	/* unsigned int 8 bits */
+#define IOVT_INT16	4	/* int 16 bits */
+#define IOVT_UINT16	5	/* unsigned int 16 bits */
+#define IOVT_INT32	6	/* int 32 bits */
+#define IOVT_UINT32	7	/* unsigned int 32 bits */
+#define IOVT_BUFFER	8	/* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* Initializer for IOV type strings */
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 		64	/* Max string length for BCM errors */
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK				0	/* Success */
+#define BCME_ERROR			-1	/* Error generic */
+#define BCME_BADARG			-2	/* Bad Argument */
+#define BCME_BADOPTION			-3	/* Bad option */
+#define BCME_NOTUP			-4	/* Not up */
+#define BCME_NOTDOWN			-5	/* Not down */
+#define BCME_NOTAP			-6	/* Not AP */
+#define BCME_NOTSTA			-7	/* Not STA  */
+#define BCME_BADKEYIDX			-8	/* BAD Key Index */
+#define BCME_RADIOOFF 			-9	/* Radio Off */
+#define BCME_NOTBANDLOCKED		-10	/* Not  band locked */
+#define BCME_NOCLK			-11	/* No Clock */
+#define BCME_BADRATESET			-12	/* BAD Rate valueset */
+#define BCME_BADBAND			-13	/* BAD Band */
+#define BCME_BUFTOOSHORT		-14	/* Buffer too short */
+#define BCME_BUFTOOLONG			-15	/* Buffer too long */
+#define BCME_BUSY			-16	/* Busy */
+#define BCME_NOTASSOCIATED		-17	/* Not Associated */
+#define BCME_BADSSIDLEN			-18	/* Bad SSID len */
+#define BCME_OUTOFRANGECHAN		-19	/* Out of Range Channel */
+#define BCME_BADCHAN			-20	/* Bad Channel */
+#define BCME_BADADDR			-21	/* Bad Address */
+#define BCME_NORESOURCE			-22	/* Not Enough Resources */
+#define BCME_UNSUPPORTED		-23	/* Unsupported */
+#define BCME_BADLEN			-24	/* Bad length */
+#define BCME_NOTREADY			-25	/* Not Ready */
+#define BCME_EPERM			-26	/* Not Permitted */
+#define BCME_NOMEM			-27	/* No Memory */
+#define BCME_ASSOCIATED			-28	/* Associated */
+#define BCME_RANGE			-29	/* Not In Range */
+#define BCME_NOTFOUND			-30	/* Not Found */
+#define BCME_WME_NOT_ENABLED		-31	/* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND		-32	/* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED		-33	/* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION	-34	/* Not WME Association */
+#define BCME_SDIO_ERROR			-35	/* SDIO Bus Error */
+#define BCME_DONGLE_DOWN		-36	/* Dongle Not Accessible */
+#define BCME_VERSION			-37 	/* Incorrect version */
+#define BCME_TXFAIL			-38 	/* TX failure */
+#define BCME_RXFAIL			-39	/* RX failure */
+#define BCME_NODEVICE			-40 	/* Device not present */
+#define BCME_NMODE_DISABLED		-41 	/* NMODE disabled */
+#define BCME_NONRESIDENT		-42 /* access to nonresident overlay */
+#define BCME_SCANREJECT			-43 	/* reject scan request */
+#define BCME_USAGE_ERROR                -44     /* WLCMD usage error */
+#define BCME_IOCTL_ERROR                -45     /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR            -46     /* RWL serial port error */
+#define BCME_DISABLED			-47     /* Disabled in this build */
+#define BCME_DECERR				-48		/* Decrypt error */
+#define BCME_ENCERR				-49		/* Encrypt error */
+#define BCME_MICERR				-50		/* Integrity/MIC error */
+#define BCME_REPLAY				-51		/* Replay */
+#define BCME_IE_NOTFOUND		-52		/* IE not found */
+#define BCME_LAST			BCME_IE_NOTFOUND
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"NMODE Disabled",		\
+	"Nonresident overlay access", \
+	"Scan Rejected",		\
+	"WLCMD usage error",		\
+	"WLCMD ioctl error",		\
+	"RWL serial port error", 	\
+	"Disabled",			\
+	"Decrypt error", \
+	"Encrypt error", \
+	"MIC error", \
+	"Replay", \
+	"IE not found", \
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b)) ? (a) : (b))
+#endif /* MIN */
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b)) ? (a) : (b))
+#endif /* MAX */
+
+/* limit to [min, max] */
+#ifndef LIMIT_TO_RANGE
+#define LIMIT_TO_RANGE(x, min, max) \
+	((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_RANGE */
+
+/* limit to  max */
+#ifndef LIMIT_TO_MAX
+#define LIMIT_TO_MAX(x, max) \
+	(((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_MAX */
+
+/* limit to min */
+#ifndef LIMIT_TO_MIN
+#define LIMIT_TO_MIN(x, min) \
+	(((x) < (min) ? (min) : (x)))
+#endif /* LIMIT_TO_MIN */
+
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+	(0xffffffff - (prev) + (curr) + 1))
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align)	((p) & ~((align) - 1))
+#define	ISALIGNED(a, x)		(((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+/*
+ * The ARM RVCT compiler complains when using OFFSETOF where a constant
+ * expression is expected, such as an initializer for a static object.
+ * offsetof from the runtime library doesn't have that problem.
+ */
+#include <stddef.h>
+#define	OFFSETOF(type, member)	offsetof(type, member)
+#else
+#  if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+#    define	OFFSETOF(type, member)	__builtin_offsetof(type, member)
+#  else
+#    define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#  endif /* GCC 4.8 or newer */
+#endif /* __ARMCC_VERSION */
+#endif /* OFFSETOF */
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef ARRAYLAST /* returns pointer to last array element */
+#define ARRAYLAST(a)		(&a[ARRAYSIZE(a)-1])
+#endif
+
+/* Reference a function; used to prevent a static function from being optimized out */
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f)	(_bcmutils_dummy_fn = (void *)(f))
+
+/* bit map related macros */
+#ifndef setbit
+#ifndef NBBY		/* the BSD family defines NBBY */
+#define	NBBY	8	/* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+extern void setbit(void *array, uint bit);
+extern void clrbit(void *array, uint bit);
+extern bool isset(const void *array, uint bit);
+extern bool isclr(const void *array, uint bit);
+#else
+#define	setbit(a, i)	(((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+#endif /* setbit */
+extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
+
+#define	isbitset(a, i)	(((a) & (1 << (i))) != 0)
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+extern void bcm_bitprint32(const uint32 u32);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK)                     \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val)     \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */              \
+	uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */      \
+	uint32 mask = (MSK << pos);                                     \
+	uint32 tmp = *a & ~mask;                                        \
+	*a = tmp | (val << pos);                                        \
+}                                                                   \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix)               \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH);                                 \
+	uint32 pos = (ix & OFF) << LSH;                                 \
+	return ((*a >> pos) & MSK);                                     \
+}
+
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE  0xff		/* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE  0x9f		/* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff		/* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8		/* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffff	/* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3	/* Good final CRC32 checksum value */
+
+/* use for direct output of MAC address in printf etc */
+#define MACF				"%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea)	((struct ether_addr *) (ea))->octet[0], \
+							((struct ether_addr *) (ea))->octet[1], \
+							((struct ether_addr *) (ea))->octet[2], \
+							((struct ether_addr *) (ea))->octet[3], \
+							((struct ether_addr *) (ea))->octet[4], \
+							((struct ether_addr *) (ea))->octet[5]
+
+#define ETHER_TO_MACF(ea) 	(ea).octet[0], \
+							(ea).octet[1], \
+							(ea).octet[2], \
+							(ea).octet[3], \
+							(ea).octet[4], \
+							(ea).octet[5]
+#if !defined(SIMPLE_MAC_PRINT)
+#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
+#else
+#define MACDBG				"%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
+#endif /* SIMPLE_MAC_PRINT */
+
+/* bcm_format_flags() bit description structure */
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+/* bcm_format_field */
+typedef struct bcm_bit_desc_ex {
+	uint32 mask;
+	const bcm_bit_desc_t *bitfield;
+} bcm_bit_desc_ex_t;
+
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN	18	/* 18-bytes of Ethernet address buffer length */
+
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		/* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+		/* x86 supports unaligned.  This version runs 6x-9x faster on x86. */
+		((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+	} else {
+		/* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+/* externs */
+/* crc */
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+/* format/print */
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
+extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
+/* print out which bits in flags are set */
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+/* IE parsing */
+
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+/* bcm tlv w/ 16 bit id/len */
+typedef struct bcm_xtlv {
+	uint16	id;
+	uint16	len;
+	uint8	data[1];
+} bcm_xtlv_t;
+
+/* descriptor of xtlv data src or dst  */
+typedef struct {
+	uint16	type;
+	uint16	len;
+	void	*ptr; /* ptr to memory location */
+} xtlv_desc_t;
+
+/*  set a var from xtlv buffer */
+typedef int
+(bcm_set_var_from_tlv_cbfn_t)(void *ctx, void **tlv_buf, uint16 type, uint16 len);
+
+struct bcm_tlvbuf {
+    uint16 size;
+    uint8 *head; /* point to head of buffer */
+    uint8 *buf; /* current position of buffer */
+	/* followed by the allocated buffer */
+};
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+#define BCM_XTLV_MAX_DATA_SIZE (65535)
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
+#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
+#define BCM_XTLV_SIZE(elt) (BCM_XTLV_HDR_SIZE + BCM_XTLV_LEN(elt))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+#define bcm_valid_xtlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_XTLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt)))
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen);
+
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+	int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+	int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* xtlv */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen);
+extern struct bcm_tlvbuf *bcm_xtlv_buf_alloc(void *osh, uint16 len);
+extern void bcm_xtlv_buf_free(void *osh, struct bcm_tlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_len(struct bcm_tlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_rlen(struct bcm_tlvbuf *tbuf);
+extern uint8 *bcm_xtlv_buf(struct bcm_tlvbuf *tbuf);
+extern uint8 *bcm_xtlv_head(struct bcm_tlvbuf *tbuf);
+extern int bcm_xtlv_put_data(struct bcm_tlvbuf *tbuf, uint16 type, const void *data, uint16 dlen);
+extern int bcm_xtlv_put_8(struct bcm_tlvbuf *tbuf, uint16 type, const int8 data);
+extern int bcm_xtlv_put_16(struct bcm_tlvbuf *tbuf, uint16 type, const int16 data);
+extern int bcm_xtlv_put_32(struct bcm_tlvbuf *tbuf, uint16 type, const int32 data);
+extern int bcm_unpack_xtlv_entry(void **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst);
+extern int bcm_skip_xtlv(void **tlv_buf);
+extern int bcm_pack_xtlv_entry(void **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src);
+extern int bcm_unpack_xtlv_buf(void *ctx,
+	void *tlv_buf, uint16 buflen, bcm_set_var_from_tlv_cbfn_t *cbfn);
+extern int
+bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items);
+extern int
+bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items);
+extern int
+bcm_pack_xtlv_entry_from_hex_string(void **tlv_buf, uint16 *buflen, uint16 type, char *hex);
+
+/* bcmerror */
+extern const char *bcmerrorstr(int bcmerror);
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		/* set one bool */
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	/* clear one bool */
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	/* TRUE if one bool is set */
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+/* generic datastruct to help dump routines */
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 	offset;
+	uint32 	len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+/* power conversion */
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+/* calculate a * b + c */
+extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
+/* calculate a / b */
+extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+#	define B2(n)    n,     n + 1,     n + 1,     n + 2
+#	define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+#	define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+	B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32)
+{
+	/* function local scope declaration of const _CSBTBL[] */
+	const uint8 * p = (const uint8 *)&u32;
+	return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32)
+{
+	int shifts = 0;
+	while (u32) {
+		shifts++; u32 >>= 1;
+	}
+	return (32U - shifts);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz"	: MIPS, ARM
+ * "cntlzw"	: PowerPC
+ * "BSF"	: x86
+ * "lzcnt"	: AMD, SPARC
+ */
+
+#if defined(__arm__)
+
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32)
+{
+#if defined(__USE_ASM_CLZ__)
+	int zeros;
+	__asm__ volatile("clz    %0, %1 \n" : "=r" (zeros) : "r"  (u32));
+	return zeros;
+#else	/* C equivalent */
+	return C_bcm_count_leading_zeros(u32);
+#endif  /* C equivalent */
+}
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap;	/* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL	((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX	((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+
+
+/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
+
+#define ID16_INVALID                ((uint16)(~0))
+
+/*
+ * Construct a 16bit id allocator, managing 16bit ids in the range:
+ *    [start_val16 .. start_val16+total_ids)
+ * Note: start_val16 is inclusive.
+ * Returns an opaque handle to the 16bit id allocator.
+ */
+extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
+extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+
+/* Allocate a unique 16bit id */
+extern uint16 id16_map_alloc(void * id16_map_hndl);
+
+/* Free a 16bit id value into the id16 allocator */
+extern void id16_map_free(void * id16_map_hndl, uint16 val16);
+
+/* Get the number of failures encountered during id allocation. */
+extern uint32 id16_map_failures(void * id16_map_hndl);
+
+/* Audit the 16bit id allocator state. */
+extern bool id16_map_audit(void * id16_map_hndl);
+/* End - Simple 16bit Id Allocator. */
+
+#endif /* BCMDRIVER */
+
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum);
+
+#ifndef _dll_t_
+#define _dll_t_
+/*
+ * -----------------------------------------------------------------------------
+ *                      Double Linked List Macros
+ * -----------------------------------------------------------------------------
+ *
+ * All dll operations must be performed on a pre-initialized node.
+ * Inserting an uninitialized node into a list effectively initialized it.
+ *
+ * When a node is deleted from a list, you may initialize it to avoid corruption
+ * incurred by double deletion. You may skip initialization if the node is
+ * immediately inserted into another list.
+ *
+ * By placing a dll_t element at the start of a struct, you may cast a dll_t *
+ * to the struct or vice versa.
+ *
+ * Example of declaring an initializing someList and inserting nodeA, nodeB
+ *
+ *     typedef struct item {
+ *         dll_t node;
+ *         int someData;
+ *     } Item_t;
+ *     Item_t nodeA, nodeB, nodeC;
+ *     nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333;
+ *
+ *     dll_t someList;
+ *     dll_init(&someList);
+ *
+ *     dll_append(&someList, (dll_t *) &nodeA);
+ *     dll_prepend(&someList, &nodeB.node);
+ *     dll_insert((dll_t *)&nodeC, &nodeA.node);
+ *
+ *     dll_delete((dll_t *) &nodeB);
+ *
+ * Example of a for loop to walk someList of node_p
+ *
+ *   extern void mydisplay(Item_t * item_p);
+ *
+ *   dll_t * item_p, * next_p;
+ *   for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p);
+ *        item_p = next_p)
+ *   {
+ *       next_p = dll_next_p(item_p);
+ *       ... use item_p at will, including removing it from list ...
+ *       mydisplay((PItem_t)item_p);
+ *   }
+ *
+ * -----------------------------------------------------------------------------
+ */
+typedef struct dll {
+	struct dll * next_p;
+	struct dll * prev_p;
+} dll_t;
+
+static INLINE void
+dll_init(dll_t *node_p)
+{
+	node_p->next_p = node_p;
+	node_p->prev_p = node_p;
+}
+/* dll macros returing a pointer to dll_t */
+
+static INLINE dll_t *
+dll_head_p(dll_t *list_p)
+{
+	return list_p->next_p;
+}
+
+
+static INLINE dll_t *
+dll_tail_p(dll_t *list_p)
+{
+	return (list_p)->prev_p;
+}
+
+
+static INLINE dll_t *
+dll_next_p(dll_t *node_p)
+{
+	return (node_p)->next_p;
+}
+
+
+static INLINE dll_t *
+dll_prev_p(dll_t *node_p)
+{
+	return (node_p)->next_p;
+}
+
+
+static INLINE bool
+dll_empty(dll_t *list_p)
+{
+	return ((list_p)->next_p == (list_p));
+}
+
+
+static INLINE bool
+dll_end(dll_t *list_p, dll_t * node_p)
+{
+	return (list_p == node_p);
+}
+
+
+/* inserts the node new_p "after" the node at_p */
+static INLINE void
+dll_insert(dll_t *new_p, dll_t * at_p)
+{
+	new_p->next_p = at_p->next_p;
+	new_p->prev_p = at_p;
+	at_p->next_p = new_p;
+	(new_p->next_p)->prev_p = new_p;
+}
+
+static INLINE void
+dll_append(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, dll_tail_p(list_p));
+}
+
+static INLINE void
+dll_prepend(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, list_p);
+}
+
+
+/* deletes a node from any list that it "may" be in, if at all. */
+static INLINE void
+dll_delete(dll_t *node_p)
+{
+	node_p->prev_p->next_p = node_p->next_p;
+	node_p->next_p->prev_p = node_p->prev_p;
+}
+#endif  /* ! defined(_dll_t_) */
+
+/* Elements managed in a double linked list */
+
+typedef struct dll_pool {
+	dll_t       free_list;
+	uint16      free_count;
+	uint16      elems_max;
+	uint16      elem_size;
+	dll_t       elements[1];
+} dll_pool_t;
+
+dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size);
+void * dll_pool_alloc(dll_pool_t * dll_pool_p);
+void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p);
+void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p);
+typedef void (* dll_elem_dump)(void * elem_p);
+void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
+
+#ifdef __cplusplus
+	}
+#endif
+
+/* #define DEBUG_COUNTER */
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+	char name[16];				/* name of this counter table */
+	uint32 prev_log_print;		/* Internal use. Timestamp of the previous log print */
+	uint log_print_interval;	/* Desired interval to print logs in ms */
+	uint needed_cnt;			/* How many counters need to be used */
+	uint32 cnt[CNTR_TBL_MAX];		/* Counting entries to increase at desired places */
+	bool enabled;				/* Whether to enable printing log */
+} counter_tbl_t;
+
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#endif	/* _bcmutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h
new file mode 100644
index 0000000000000000000000000000000000000000..b96aee6e1f8e95b595aff5070bfde33e1f95f0d9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h
@@ -0,0 +1,530 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef	_bcmwifi_channels_h_
+#define	_bcmwifi_channels_h_
+
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
+typedef uint16 chanspec_t;
+
+/* channel defines */
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_80MHZ_APART			16
+#define CH_40MHZ_APART			8
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1	/* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL		14	/* Max channel in 2G band */
+#define MAXCHANNEL		224	/* max # supported channels. The max channel no is above,
+					 * this is that + 1 rounded up to a multiple of NBBY (8).
+					 * DO NOT MAKE it > 255: channels are uint8's all over
+					 */
+#define MAXCHANNEL_NUM	(MAXCHANNEL - 1)	/* max channel number */
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
+#define CHSPEC_CTLOVLP(sp1, sp2, sep)	(ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < \
+				  (sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+#define WL_CHANSPEC_CHAN1_MASK		0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT		0
+#define WL_CHANSPEC_CHAN2_MASK		0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT		4
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT	8
+#define WL_CHANSPEC_CTL_SB_LLL		0x0000
+#define WL_CHANSPEC_CTL_SB_LLU		0x0100
+#define WL_CHANSPEC_CTL_SB_LUL		0x0200
+#define WL_CHANSPEC_CTL_SB_LUU		0x0300
+#define WL_CHANSPEC_CTL_SB_ULL		0x0400
+#define WL_CHANSPEC_CTL_SB_ULU		0x0500
+#define WL_CHANSPEC_CTL_SB_UUL		0x0600
+#define WL_CHANSPEC_CTL_SB_UUU		0x0700
+#define WL_CHANSPEC_CTL_SB_LL		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL		WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU		WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER	WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER	WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE		WL_CHANSPEC_CTL_SB_LLL
+
+#define WL_CHANSPEC_BW_MASK		0x3800
+#define WL_CHANSPEC_BW_SHIFT		11
+#define WL_CHANSPEC_BW_5		0x0000
+#define WL_CHANSPEC_BW_10		0x0800
+#define WL_CHANSPEC_BW_20		0x1000
+#define WL_CHANSPEC_BW_40		0x1800
+#define WL_CHANSPEC_BW_80		0x2000
+#define WL_CHANSPEC_BW_160		0x2800
+#define WL_CHANSPEC_BW_8080		0x3000
+
+#define WL_CHANSPEC_BAND_MASK		0xc000
+#define WL_CHANSPEC_BAND_SHIFT		14
+#define WL_CHANSPEC_BAND_2G		0x0000
+#define WL_CHANSPEC_BAND_3G		0x4000
+#define WL_CHANSPEC_BAND_4G		0x8000
+#define WL_CHANSPEC_BAND_5G		0xc000
+#define INVCHANSPEC			255
+
+/* channel defines */
+#define LOWER_20_SB(channel)		(((channel) > CH_10MHZ_APART) ? \
+					((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)		(((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+					((channel) + CH_10MHZ_APART) : 0)
+
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) 	(((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+				((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel)		((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel)		((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+					(((channel) <= CH_MAX_2G_CHANNEL) ? \
+					WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	(((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+/* simple MACROs to get different fields of chanspec */
+#ifdef WL11AC_80P80
+#define CHSPEC_CHANNEL(chspec)	wf_chspec_channel(chspec)
+#else
+#define CHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#endif
+#define CHSPEC_CHAN1(chspec)	((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec)	((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
+#define CHSPEC_BAND(chspec)		((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec)	((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)		((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec)	0
+#define CHSPEC_IS20(chspec)	1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	0
+#endif
+
+#else /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#endif /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS5G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#define CHANSPEC_STR_LEN    20
+
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+	CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#define CHSPEC_BW_GE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) >= bw))
+
+#define CHSPEC_BW_LE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) <= bw))
+
+#define CHSPEC_BW_GT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) > bw))
+
+#define CHSPEC_BW_LT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) < bw))
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK		0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT		     0
+
+#define WL_LCHANSPEC_CTL_SB_MASK	0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT	     8
+#define WL_LCHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE	0x0300
+
+#define WL_LCHANSPEC_BW_MASK		0x0C00
+#define WL_LCHANSPEC_BW_SHIFT		    10
+#define WL_LCHANSPEC_BW_10		0x0400
+#define WL_LCHANSPEC_BW_20		0x0800
+#define WL_LCHANSPEC_BW_40		0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK		0xf000
+#define WL_LCHANSPEC_BAND_SHIFT		    12
+#define WL_LCHANSPEC_BAND_5G		0x1000
+#define WL_LCHANSPEC_BAND_2G		0x2000
+
+#define LCHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec)	((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec)	((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec)	((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb)  ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+	(chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+	WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+	WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G		4814	/* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G		10000	/* 5   GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G		8000	/* 4.9 GHz band for Japan */
+
+#define WLC_2G_25MHZ_OFFSET		5	/* 2.4GHz band channel offset */
+
+/**
+ *  No of sub-band vlaue of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ   2
+#define WF_NUM_SIDEBANDS_80MHZ   4
+#define WF_NUM_SIDEBANDS_8080MHZ 4
+#define WF_NUM_SIDEBANDS_160MHZ  8
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		Original chanspec in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		NULL in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param	a     pointer to input string
+ *
+ * @return	>= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+/**
+ * Return the bandwidth string.
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the bandwidth string
+ */
+extern char * wf_chspec_to_bw_str(chanspec_t chspec);
+
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	freq          frequency in MHz
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	channel       input channel number
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param	ctl_ch		channel
+ * @param	bw	        bandwidth
+ *
+ * @return	> 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+extern uint wf_channel2freq(uint channel);
+extern uint wf_freq2channel(uint freq);
+
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0_80MHz - center channel number of one frequency segment
+ *    chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+	uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+#ifdef WL11AC_80P80
+/*
+ * This function returns the centre chanel for the given chanspec.
+ * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
+ */
+extern uint8 wf_chspec_channel(chanspec_t chspec);
+#endif
+#endif	/* _bcmwifi_channels_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h
new file mode 100644
index 0000000000000000000000000000000000000000..cf5f88d78cf7034394b817e44d8a938c53ca2007
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h
@@ -0,0 +1,452 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmwifi_rates.h 5187 2012-06-29 06:17:50Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#define WL_RATESET_SZ_DSSS		4
+#define WL_RATESET_SZ_OFDM		8
+#define WL_RATESET_SZ_VHT_MCS	10
+
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS	WL_RATESET_SZ_VHT_MCS
+#else
+#define WL_RATESET_SZ_HT_MCS	8
+#endif
+
+#define WL_RATESET_SZ_HT_IOCTL	8	/* MAC histogram, compatibility with wl utility */
+
+#define WL_TX_CHAINS_MAX	3
+
+#define WL_RATE_DISABLED		(-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+	WL_TX_BW_20,
+	WL_TX_BW_40,
+	WL_TX_BW_80,
+	WL_TX_BW_20IN40,
+	WL_TX_BW_20IN80,
+	WL_TX_BW_40IN80,
+	WL_TX_BW_160,
+	WL_TX_BW_20IN160,
+	WL_TX_BW_40IN160,
+	WL_TX_BW_80IN160,
+	WL_TX_BW_ALL,
+	WL_TX_BW_8080,
+	WL_TX_BW_8080CHAN2,
+	WL_TX_BW_20IN8080,
+	WL_TX_BW_40IN8080,
+	WL_TX_BW_80IN8080
+} wl_tx_bw_t;
+
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+	WL_TX_MODE_NONE,
+	WL_TX_MODE_STBC,
+	WL_TX_MODE_CDD,
+	WL_TX_MODE_TXBF,
+	WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+	WL_TX_CHAINS_1 = 1,
+	WL_TX_CHAINS_2,
+	WL_TX_CHAINS_3
+} wl_tx_chains_t;
+
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+	WL_TX_NSS_1 = 1,
+	WL_TX_NSS_2,
+	WL_TX_NSS_3
+} wl_tx_nss_t;
+
+
+typedef enum clm_rates {
+	/************
+	* 1 chain  *
+	************
+	*/
+
+	/* 1 Stream */
+	WL_RATE_1X1_DSSS_1         = 0,
+	WL_RATE_1X1_DSSS_2         = 1,
+	WL_RATE_1X1_DSSS_5_5       = 2,
+	WL_RATE_1X1_DSSS_11        = 3,
+
+	WL_RATE_1X1_OFDM_6         = 4,
+	WL_RATE_1X1_OFDM_9         = 5,
+	WL_RATE_1X1_OFDM_12        = 6,
+	WL_RATE_1X1_OFDM_18        = 7,
+	WL_RATE_1X1_OFDM_24        = 8,
+	WL_RATE_1X1_OFDM_36        = 9,
+	WL_RATE_1X1_OFDM_48        = 10,
+	WL_RATE_1X1_OFDM_54        = 11,
+
+	WL_RATE_1X1_MCS0           = 12,
+	WL_RATE_1X1_MCS1           = 13,
+	WL_RATE_1X1_MCS2           = 14,
+	WL_RATE_1X1_MCS3           = 15,
+	WL_RATE_1X1_MCS4           = 16,
+	WL_RATE_1X1_MCS5           = 17,
+	WL_RATE_1X1_MCS6           = 18,
+	WL_RATE_1X1_MCS7           = 19,
+
+	WL_RATE_1X1_VHT0SS1        = 12,
+	WL_RATE_1X1_VHT1SS1        = 13,
+	WL_RATE_1X1_VHT2SS1        = 14,
+	WL_RATE_1X1_VHT3SS1        = 15,
+	WL_RATE_1X1_VHT4SS1        = 16,
+	WL_RATE_1X1_VHT5SS1        = 17,
+	WL_RATE_1X1_VHT6SS1        = 18,
+	WL_RATE_1X1_VHT7SS1        = 19,
+	WL_RATE_1X1_VHT8SS1        = 20,
+	WL_RATE_1X1_VHT9SS1        = 21,
+
+
+	/************
+	* 2 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 1 */
+	WL_RATE_1X2_DSSS_1         = 22,
+	WL_RATE_1X2_DSSS_2         = 23,
+	WL_RATE_1X2_DSSS_5_5       = 24,
+	WL_RATE_1X2_DSSS_11        = 25,
+
+	WL_RATE_1X2_CDD_OFDM_6     = 26,
+	WL_RATE_1X2_CDD_OFDM_9     = 27,
+	WL_RATE_1X2_CDD_OFDM_12    = 28,
+	WL_RATE_1X2_CDD_OFDM_18    = 29,
+	WL_RATE_1X2_CDD_OFDM_24    = 30,
+	WL_RATE_1X2_CDD_OFDM_36    = 31,
+	WL_RATE_1X2_CDD_OFDM_48    = 32,
+	WL_RATE_1X2_CDD_OFDM_54    = 33,
+
+	WL_RATE_1X2_CDD_MCS0       = 34,
+	WL_RATE_1X2_CDD_MCS1       = 35,
+	WL_RATE_1X2_CDD_MCS2       = 36,
+	WL_RATE_1X2_CDD_MCS3       = 37,
+	WL_RATE_1X2_CDD_MCS4       = 38,
+	WL_RATE_1X2_CDD_MCS5       = 39,
+	WL_RATE_1X2_CDD_MCS6       = 40,
+	WL_RATE_1X2_CDD_MCS7       = 41,
+
+	WL_RATE_1X2_VHT0SS1        = 34,
+	WL_RATE_1X2_VHT1SS1        = 35,
+	WL_RATE_1X2_VHT2SS1        = 36,
+	WL_RATE_1X2_VHT3SS1        = 37,
+	WL_RATE_1X2_VHT4SS1        = 38,
+	WL_RATE_1X2_VHT5SS1        = 39,
+	WL_RATE_1X2_VHT6SS1        = 40,
+	WL_RATE_1X2_VHT7SS1        = 41,
+	WL_RATE_1X2_VHT8SS1        = 42,
+	WL_RATE_1X2_VHT9SS1        = 43,
+
+	/* 2 Streams */
+	WL_RATE_2X2_STBC_MCS0      = 44,
+	WL_RATE_2X2_STBC_MCS1      = 45,
+	WL_RATE_2X2_STBC_MCS2      = 46,
+	WL_RATE_2X2_STBC_MCS3      = 47,
+	WL_RATE_2X2_STBC_MCS4      = 48,
+	WL_RATE_2X2_STBC_MCS5      = 49,
+	WL_RATE_2X2_STBC_MCS6      = 50,
+	WL_RATE_2X2_STBC_MCS7      = 51,
+
+	WL_RATE_2X2_STBC_VHT0SS1   = 44,
+	WL_RATE_2X2_STBC_VHT1SS1   = 45,
+	WL_RATE_2X2_STBC_VHT2SS1   = 46,
+	WL_RATE_2X2_STBC_VHT3SS1   = 47,
+	WL_RATE_2X2_STBC_VHT4SS1   = 48,
+	WL_RATE_2X2_STBC_VHT5SS1   = 49,
+	WL_RATE_2X2_STBC_VHT6SS1   = 50,
+	WL_RATE_2X2_STBC_VHT7SS1   = 51,
+	WL_RATE_2X2_STBC_VHT8SS1   = 52,
+	WL_RATE_2X2_STBC_VHT9SS1   = 53,
+
+	WL_RATE_2X2_SDM_MCS8       = 54,
+	WL_RATE_2X2_SDM_MCS9       = 55,
+	WL_RATE_2X2_SDM_MCS10      = 56,
+	WL_RATE_2X2_SDM_MCS11      = 57,
+	WL_RATE_2X2_SDM_MCS12      = 58,
+	WL_RATE_2X2_SDM_MCS13      = 59,
+	WL_RATE_2X2_SDM_MCS14      = 60,
+	WL_RATE_2X2_SDM_MCS15      = 61,
+
+	WL_RATE_2X2_VHT0SS2        = 54,
+	WL_RATE_2X2_VHT1SS2        = 55,
+	WL_RATE_2X2_VHT2SS2        = 56,
+	WL_RATE_2X2_VHT3SS2        = 57,
+	WL_RATE_2X2_VHT4SS2        = 58,
+	WL_RATE_2X2_VHT5SS2        = 59,
+	WL_RATE_2X2_VHT6SS2        = 60,
+	WL_RATE_2X2_VHT7SS2        = 61,
+	WL_RATE_2X2_VHT8SS2        = 62,
+	WL_RATE_2X2_VHT9SS2        = 63,
+
+	/************
+	* 3 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 2 */
+	WL_RATE_1X3_DSSS_1         = 64,
+	WL_RATE_1X3_DSSS_2         = 65,
+	WL_RATE_1X3_DSSS_5_5       = 66,
+	WL_RATE_1X3_DSSS_11        = 67,
+
+	WL_RATE_1X3_CDD_OFDM_6     = 68,
+	WL_RATE_1X3_CDD_OFDM_9     = 69,
+	WL_RATE_1X3_CDD_OFDM_12    = 70,
+	WL_RATE_1X3_CDD_OFDM_18    = 71,
+	WL_RATE_1X3_CDD_OFDM_24    = 72,
+	WL_RATE_1X3_CDD_OFDM_36    = 73,
+	WL_RATE_1X3_CDD_OFDM_48    = 74,
+	WL_RATE_1X3_CDD_OFDM_54    = 75,
+
+	WL_RATE_1X3_CDD_MCS0       = 76,
+	WL_RATE_1X3_CDD_MCS1       = 77,
+	WL_RATE_1X3_CDD_MCS2       = 78,
+	WL_RATE_1X3_CDD_MCS3       = 79,
+	WL_RATE_1X3_CDD_MCS4       = 80,
+	WL_RATE_1X3_CDD_MCS5       = 81,
+	WL_RATE_1X3_CDD_MCS6       = 82,
+	WL_RATE_1X3_CDD_MCS7       = 83,
+
+	WL_RATE_1X3_VHT0SS1        = 76,
+	WL_RATE_1X3_VHT1SS1        = 77,
+	WL_RATE_1X3_VHT2SS1        = 78,
+	WL_RATE_1X3_VHT3SS1        = 79,
+	WL_RATE_1X3_VHT4SS1        = 80,
+	WL_RATE_1X3_VHT5SS1        = 81,
+	WL_RATE_1X3_VHT6SS1        = 82,
+	WL_RATE_1X3_VHT7SS1        = 83,
+	WL_RATE_1X3_VHT8SS1        = 84,
+	WL_RATE_1X3_VHT9SS1        = 85,
+
+	/* 2 Streams expanded + 1 */
+	WL_RATE_2X3_STBC_MCS0      = 86,
+	WL_RATE_2X3_STBC_MCS1      = 87,
+	WL_RATE_2X3_STBC_MCS2      = 88,
+	WL_RATE_2X3_STBC_MCS3      = 89,
+	WL_RATE_2X3_STBC_MCS4      = 90,
+	WL_RATE_2X3_STBC_MCS5      = 91,
+	WL_RATE_2X3_STBC_MCS6      = 92,
+	WL_RATE_2X3_STBC_MCS7      = 93,
+
+	WL_RATE_2X3_STBC_VHT0SS1   = 86,
+	WL_RATE_2X3_STBC_VHT1SS1   = 87,
+	WL_RATE_2X3_STBC_VHT2SS1   = 88,
+	WL_RATE_2X3_STBC_VHT3SS1   = 89,
+	WL_RATE_2X3_STBC_VHT4SS1   = 90,
+	WL_RATE_2X3_STBC_VHT5SS1   = 91,
+	WL_RATE_2X3_STBC_VHT6SS1   = 92,
+	WL_RATE_2X3_STBC_VHT7SS1   = 93,
+	WL_RATE_2X3_STBC_VHT8SS1   = 94,
+	WL_RATE_2X3_STBC_VHT9SS1   = 95,
+
+	WL_RATE_2X3_SDM_MCS8       = 96,
+	WL_RATE_2X3_SDM_MCS9       = 97,
+	WL_RATE_2X3_SDM_MCS10      = 98,
+	WL_RATE_2X3_SDM_MCS11      = 99,
+	WL_RATE_2X3_SDM_MCS12      = 100,
+	WL_RATE_2X3_SDM_MCS13      = 101,
+	WL_RATE_2X3_SDM_MCS14      = 102,
+	WL_RATE_2X3_SDM_MCS15      = 103,
+
+	WL_RATE_2X3_VHT0SS2        = 96,
+	WL_RATE_2X3_VHT1SS2        = 97,
+	WL_RATE_2X3_VHT2SS2        = 98,
+	WL_RATE_2X3_VHT3SS2        = 99,
+	WL_RATE_2X3_VHT4SS2        = 100,
+	WL_RATE_2X3_VHT5SS2        = 101,
+	WL_RATE_2X3_VHT6SS2        = 102,
+	WL_RATE_2X3_VHT7SS2        = 103,
+	WL_RATE_2X3_VHT8SS2        = 104,
+	WL_RATE_2X3_VHT9SS2        = 105,
+
+	/* 3 Streams */
+	WL_RATE_3X3_SDM_MCS16      = 106,
+	WL_RATE_3X3_SDM_MCS17      = 107,
+	WL_RATE_3X3_SDM_MCS18      = 108,
+	WL_RATE_3X3_SDM_MCS19      = 109,
+	WL_RATE_3X3_SDM_MCS20      = 110,
+	WL_RATE_3X3_SDM_MCS21      = 111,
+	WL_RATE_3X3_SDM_MCS22      = 112,
+	WL_RATE_3X3_SDM_MCS23      = 113,
+
+	WL_RATE_3X3_VHT0SS3        = 106,
+	WL_RATE_3X3_VHT1SS3        = 107,
+	WL_RATE_3X3_VHT2SS3        = 108,
+	WL_RATE_3X3_VHT3SS3        = 109,
+	WL_RATE_3X3_VHT4SS3        = 110,
+	WL_RATE_3X3_VHT5SS3        = 111,
+	WL_RATE_3X3_VHT6SS3        = 112,
+	WL_RATE_3X3_VHT7SS3        = 113,
+	WL_RATE_3X3_VHT8SS3        = 114,
+	WL_RATE_3X3_VHT9SS3        = 115,
+
+
+	/****************************
+	 * TX Beamforming, 2 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 1 */
+
+	WL_RATE_1X2_TXBF_OFDM_6    = 116,
+	WL_RATE_1X2_TXBF_OFDM_9    = 117,
+	WL_RATE_1X2_TXBF_OFDM_12   = 118,
+	WL_RATE_1X2_TXBF_OFDM_18   = 119,
+	WL_RATE_1X2_TXBF_OFDM_24   = 120,
+	WL_RATE_1X2_TXBF_OFDM_36   = 121,
+	WL_RATE_1X2_TXBF_OFDM_48   = 122,
+	WL_RATE_1X2_TXBF_OFDM_54   = 123,
+
+	WL_RATE_1X2_TXBF_MCS0      = 124,
+	WL_RATE_1X2_TXBF_MCS1      = 125,
+	WL_RATE_1X2_TXBF_MCS2      = 126,
+	WL_RATE_1X2_TXBF_MCS3      = 127,
+	WL_RATE_1X2_TXBF_MCS4      = 128,
+	WL_RATE_1X2_TXBF_MCS5      = 129,
+	WL_RATE_1X2_TXBF_MCS6      = 130,
+	WL_RATE_1X2_TXBF_MCS7      = 131,
+
+	WL_RATE_1X2_TXBF_VHT0SS1   = 124,
+	WL_RATE_1X2_TXBF_VHT1SS1   = 125,
+	WL_RATE_1X2_TXBF_VHT2SS1   = 126,
+	WL_RATE_1X2_TXBF_VHT3SS1   = 127,
+	WL_RATE_1X2_TXBF_VHT4SS1   = 128,
+	WL_RATE_1X2_TXBF_VHT5SS1   = 129,
+	WL_RATE_1X2_TXBF_VHT6SS1   = 130,
+	WL_RATE_1X2_TXBF_VHT7SS1   = 131,
+	WL_RATE_1X2_TXBF_VHT8SS1   = 132,
+	WL_RATE_1X2_TXBF_VHT9SS1   = 133,
+
+	/* 2 Streams */
+
+	WL_RATE_2X2_TXBF_SDM_MCS8  = 134,
+	WL_RATE_2X2_TXBF_SDM_MCS9  = 135,
+	WL_RATE_2X2_TXBF_SDM_MCS10 = 136,
+	WL_RATE_2X2_TXBF_SDM_MCS11 = 137,
+	WL_RATE_2X2_TXBF_SDM_MCS12 = 138,
+	WL_RATE_2X2_TXBF_SDM_MCS13 = 139,
+	WL_RATE_2X2_TXBF_SDM_MCS14 = 140,
+	WL_RATE_2X2_TXBF_SDM_MCS15 = 141,
+
+	WL_RATE_2X2_TXBF_VHT0SS2   = 134,
+	WL_RATE_2X2_TXBF_VHT1SS2   = 135,
+	WL_RATE_2X2_TXBF_VHT2SS2   = 136,
+	WL_RATE_2X2_TXBF_VHT3SS2   = 137,
+	WL_RATE_2X2_TXBF_VHT4SS2   = 138,
+	WL_RATE_2X2_TXBF_VHT5SS2   = 139,
+	WL_RATE_2X2_TXBF_VHT6SS2   = 140,
+	WL_RATE_2X2_TXBF_VHT7SS2   = 141,
+
+
+	/****************************
+	 * TX Beamforming, 3 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 2 */
+
+	WL_RATE_1X3_TXBF_OFDM_6    = 142,
+	WL_RATE_1X3_TXBF_OFDM_9    = 143,
+	WL_RATE_1X3_TXBF_OFDM_12   = 144,
+	WL_RATE_1X3_TXBF_OFDM_18   = 145,
+	WL_RATE_1X3_TXBF_OFDM_24   = 146,
+	WL_RATE_1X3_TXBF_OFDM_36   = 147,
+	WL_RATE_1X3_TXBF_OFDM_48   = 148,
+	WL_RATE_1X3_TXBF_OFDM_54   = 149,
+
+	WL_RATE_1X3_TXBF_MCS0      = 150,
+	WL_RATE_1X3_TXBF_MCS1      = 151,
+	WL_RATE_1X3_TXBF_MCS2      = 152,
+	WL_RATE_1X3_TXBF_MCS3      = 153,
+	WL_RATE_1X3_TXBF_MCS4      = 154,
+	WL_RATE_1X3_TXBF_MCS5      = 155,
+	WL_RATE_1X3_TXBF_MCS6      = 156,
+	WL_RATE_1X3_TXBF_MCS7      = 157,
+
+	WL_RATE_1X3_TXBF_VHT0SS1   = 150,
+	WL_RATE_1X3_TXBF_VHT1SS1   = 151,
+	WL_RATE_1X3_TXBF_VHT2SS1   = 152,
+	WL_RATE_1X3_TXBF_VHT3SS1   = 153,
+	WL_RATE_1X3_TXBF_VHT4SS1   = 154,
+	WL_RATE_1X3_TXBF_VHT5SS1   = 155,
+	WL_RATE_1X3_TXBF_VHT6SS1   = 156,
+	WL_RATE_1X3_TXBF_VHT7SS1   = 157,
+	WL_RATE_1X3_TXBF_VHT8SS1   = 158,
+	WL_RATE_1X3_TXBF_VHT9SS1   = 159,
+
+	/* 2 Streams expanded + 1 */
+
+	WL_RATE_2X3_TXBF_SDM_MCS8  = 160,
+	WL_RATE_2X3_TXBF_SDM_MCS9  = 161,
+	WL_RATE_2X3_TXBF_SDM_MCS10 = 162,
+	WL_RATE_2X3_TXBF_SDM_MCS11 = 163,
+	WL_RATE_2X3_TXBF_SDM_MCS12 = 164,
+	WL_RATE_2X3_TXBF_SDM_MCS13 = 165,
+	WL_RATE_2X3_TXBF_SDM_MCS14 = 166,
+	WL_RATE_2X3_TXBF_SDM_MCS15 = 167,
+
+	WL_RATE_2X3_TXBF_VHT0SS2   = 160,
+	WL_RATE_2X3_TXBF_VHT1SS2   = 161,
+	WL_RATE_2X3_TXBF_VHT2SS2   = 162,
+	WL_RATE_2X3_TXBF_VHT3SS2   = 163,
+	WL_RATE_2X3_TXBF_VHT4SS2   = 164,
+	WL_RATE_2X3_TXBF_VHT5SS2   = 165,
+	WL_RATE_2X3_TXBF_VHT6SS2   = 166,
+	WL_RATE_2X3_TXBF_VHT7SS2   = 167,
+	WL_RATE_2X3_TXBF_VHT8SS2   = 168,
+	WL_RATE_2X3_TXBF_VHT9SS2   = 169,
+
+	/* 3 Streams */
+
+	WL_RATE_3X3_TXBF_SDM_MCS16 = 170,
+	WL_RATE_3X3_TXBF_SDM_MCS17 = 171,
+	WL_RATE_3X3_TXBF_SDM_MCS18 = 172,
+	WL_RATE_3X3_TXBF_SDM_MCS19 = 173,
+	WL_RATE_3X3_TXBF_SDM_MCS20 = 174,
+	WL_RATE_3X3_TXBF_SDM_MCS21 = 175,
+	WL_RATE_3X3_TXBF_SDM_MCS22 = 176,
+	WL_RATE_3X3_TXBF_SDM_MCS23 = 177,
+
+	WL_RATE_3X3_TXBF_VHT0SS3   = 170,
+	WL_RATE_3X3_TXBF_VHT1SS3   = 171,
+	WL_RATE_3X3_TXBF_VHT2SS3   = 172,
+	WL_RATE_3X3_TXBF_VHT3SS3   = 173,
+	WL_RATE_3X3_TXBF_VHT4SS3   = 174,
+	WL_RATE_3X3_TXBF_VHT5SS3   = 175,
+	WL_RATE_3X3_TXBF_VHT6SS3   = 176,
+	WL_RATE_3X3_TXBF_VHT7SS3   = 177
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 178
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmwifi_rates_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
new file mode 100644
index 0000000000000000000000000000000000000000..5a63facfa2574379559c964db88f9b1d1058d414
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
@@ -0,0 +1,45 @@
+/*
+ * Definitions for nl80211 vendor command/event access to host driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: brcm_nl80211.h 487126 2014-06-24 23:06:12Z $
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+#define OUI_BRCM  0x001018
+
+enum wl_vendor_subcmd {
+	BRCM_VENDOR_SCMD_UNSPEC,
+	BRCM_VENDOR_SCMD_PRIV_STR
+};
+
+struct bcm_nlmsg_hdr {
+	uint cmd;	/* common ioctl definition */
+	uint len;	/* expected return buffer length */
+	uint offset;	/* user buffer offset */
+	uint set;	/* get or set request optional */
+	uint magic;	/* magic number for verification */
+};
+
+enum bcmnl_attrs {
+	BCM_NLATTR_UNSPEC,
+
+	BCM_NLATTR_LEN,
+	BCM_NLATTR_DATA,
+
+	__BCM_NLATTR_AFTER_LAST,
+	BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+	int err;			/* return result */
+	void *data;			/* ioctl return buffer pointer */
+	uint len;			/* ioctl return buffer length */
+	struct bcm_nlmsg_hdr *nlioc;	/* bcm_nlmsg_hdr header pointer */
+};
+
+#endif /* _brcm_nl80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/circularbuf.h b/drivers/net/wireless/bcmdhd/include/circularbuf.h
new file mode 100644
index 0000000000000000000000000000000000000000..fa939ca3a6774309609e3635a1a1a5a5694841df
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/circularbuf.h
@@ -0,0 +1,97 @@
+/*
+ * Initialization and support routines for self-booting compressed image.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: circularbuf.h 452258 2014-01-29 19:17:57Z $
+ */
+
+#ifndef __CIRCULARBUF_H_INCLUDED__
+#define __CIRCULARBUF_H_INCLUDED__
+
+#include <osl.h>
+#include <typedefs.h>
+#include <bcmendian.h>
+
+/* Enumerations of return values provided by MsgBuf implementation */
+typedef enum {
+	CIRCULARBUF_FAILURE = -1,
+	CIRCULARBUF_SUCCESS
+} circularbuf_ret_t;
+
+/* Core circularbuf circular buffer structure */
+typedef struct circularbuf_s
+{
+	uint16 depth;	/* Depth of circular buffer */
+	uint16 r_ptr;	/* Read Ptr */
+	uint16 w_ptr;	/* Write Ptr */
+	uint16 e_ptr;	/* End Ptr */
+	uint16 wp_ptr;	/* wp_ptr/pending - scheduled for DMA. But, not yet complete. */
+	uint16 rp_ptr;	/* rp_ptr/pending - scheduled for DMA. But, not yet complete. */
+
+	uint8  *buf_addr;
+	void  *mb_ctx;
+	void  (*mb_ring_bell)(void *ctx);
+} circularbuf_t;
+
+#define CBUF_ERROR_VAL   0x00000001      /* Error level tracing */
+#define CBUF_TRACE_VAL   0x00000002      /* Function level tracing */
+#define CBUF_INFORM_VAL  0x00000004      /* debug level tracing */
+
+extern int cbuf_msg_level;
+
+#define CBUF_ERROR(args)         do {if (cbuf_msg_level & CBUF_ERROR_VAL) printf args;} while (0)
+#define CBUF_TRACE(args)         do {if (cbuf_msg_level & CBUF_TRACE_VAL) printf args;} while (0)
+#define CBUF_INFO(args)          do {if (cbuf_msg_level & CBUF_INFORM_VAL) printf args;} while (0)
+
+#define     CIRCULARBUF_START(x)     ((x)->buf_addr)
+#define     CIRCULARBUF_WRITE_PTR(x) ((x)->w_ptr)
+#define     CIRCULARBUF_READ_PTR(x)  ((x)->r_ptr)
+#define     CIRCULARBUF_END_PTR(x)   ((x)->e_ptr)
+
+#define circularbuf_debug_print(handle)                                 \
+			CBUF_INFO(("%s:%d:\t%p  rp=%4d  r=%4d  wp=%4d  w=%4d  e=%4d\n", \
+					__FUNCTION__, __LINE__,                             \
+					(void *) CIRCULARBUF_START(handle),                 \
+					(int) (handle)->rp_ptr, (int) (handle)->r_ptr,          \
+					(int) (handle)->wp_ptr, (int) (handle)->w_ptr,          \
+					(int) (handle)->e_ptr));
+
+
+/* Callback registered by application/mail-box with the circularbuf implementation.
+ * This will be invoked by the circularbuf implementation when write is complete and
+ * ready for informing the peer
+ */
+typedef void (*mb_ring_t)(void *ctx);
+
+
+/* Public Functions exposed by circularbuf */
+void
+circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len);
+void
+circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx);
+
+/* Write Functions */
+void *
+circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size);
+void
+circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written);
+
+/* Read Functions */
+void *
+circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *avail_len);
+circularbuf_ret_t
+circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read);
+
+/*
+ * circularbuf_get_read_ptr() updates rp_ptr by the amount that the consumer
+ * is supposed to read. The consumer may not read the entire amount.
+ * In such a case, circularbuf_revert_rp_ptr() call follows a corresponding
+ * circularbuf_get_read_ptr() call to revert the rp_ptr back to
+ * the point till which data has actually been processed.
+ * It is not valid if it is preceded by multiple get_read_ptr() calls
+ */
+circularbuf_ret_t
+circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes);
+
+#endif /* __CIRCULARBUF_H_INCLUDED__ */
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 0000000000000000000000000000000000000000..74334961a41d739a39e96542c008eaf4ca330c7c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,121 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhdioctl.h 438755 2013-11-22 23:20:40Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	bool set;	/* get or set request (optional) */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+	uint driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+	BUS_TYPE_USB = 0, /* for USB dongles */
+	BUS_TYPE_SDIO, /* for SDIO dongles */
+	BUS_TYPE_PCIE /* for PCIE dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+#define	DHD_IOCTL_MAXLEN	8192		/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#if 0 && (NDISVER >= 0x0630) && 1
+#define DHD_SCAN_VAL	0x2000
+#else
+#define DHD_ISCAN_VAL	0x2000
+#endif
+#define DHD_ARPOE_VAL	0x4000
+#define DHD_REORDER_VAL	0x8000
+#define DHD_WL_VAL		0x10000
+#define DHD_NOCHECKDIED_VAL		0x20000 /* UTF WAR */
+#define DHD_WL_VAL2		0x40000
+#define DHD_PNO_VAL		0x80000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint version;		/* To allow structure change tracking */
+	uint freq;		/* Max ticks between tx/rx attempts */
+	uint count;		/* Test packets to send/rcv each attempt */
+	uint print;		/* Print counts every <print> attempts */
+	uint total;		/* Total packets (or bursts) */
+	uint minlen;		/* Minimum length of packets to send */
+	uint maxlen;		/* Maximum length of packets to send */
+	uint numsent;		/* Count of test packets sent */
+	uint numrcvd;		/* Count of test packets received */
+	uint numfail;		/* Count of test send failures */
+	uint mode;		/* Test mode (type of test packets) */
+	uint stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 0000000000000000000000000000000000000000..101944c0f5d6750d2cedd54b827ce8f26fa8fffe
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,30 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	1
+
+#define	EPI_MINOR_VERSION	201
+
+#define	EPI_RC_NUMBER		34
+
+#define	EPI_INCREMENTAL_NUMBER	0
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		1, 201, 34, 0
+
+#define	EPI_VERSION_NUM		0x01c92200
+
+#define EPI_VERSION_DEV		1.201.34
+
+/* Driver Version String, ASCII, 32 chars max */
+#define	EPI_VERSION_STR		"1.201.34 (r491657)"
+
+#endif /* _epivers_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h.in b/drivers/net/wireless/bcmdhd/include/epivers.h.in
new file mode 100644
index 0000000000000000000000000000000000000000..9897e987a87c736df789bc2ae46cdd3bbcd9fb20
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h.in
@@ -0,0 +1,30 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	@EPI_MAJOR_VERSION@
+
+#define	EPI_MINOR_VERSION	@EPI_MINOR_VERSION@
+
+#define	EPI_RC_NUMBER		@EPI_RC_NUMBER@
+
+#define	EPI_INCREMENTAL_NUMBER	@EPI_INCREMENTAL_NUMBER@
+
+#define	EPI_BUILD_NUMBER	@EPI_BUILD_NUMBER@
+
+#define	EPI_VERSION		@EPI_VERSION@
+
+#define	EPI_VERSION_NUM		@EPI_VERSION_NUM@
+
+#define EPI_VERSION_DEV		@EPI_VERSION_DEV@
+
+/* Driver Version String, ASCII, 32 chars max */
+#define	EPI_VERSION_STR		"@EPI_VERSION_STR@@EPI_VERSION_TYPE@ (@VC_VERSION_NUM@)"
+
+#endif /* _epivers_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.sh b/drivers/net/wireless/bcmdhd/include/epivers.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9a723527b5347804bf2c13f9fc8aedd032a33f9d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.sh
@@ -0,0 +1,333 @@
+#! /bin/bash
+#
+# Create the epivers.h file from epivers.h.in
+#
+# Epivers.h version support svn/sparse/gclient workspaces
+#
+# $Id: epivers.sh 389103 2013-03-05 17:24:49Z $
+#
+# Version generation works off of svn property HeadURL, if
+# not set it keys its versions from current svn workspace or
+# via .gclient_info deps contents
+#
+# GetCompVer.py return value and action needed
+#    i. trunk => use current date as version string
+#   ii. local => use SVNURL expanded by HeadURL keyword
+#  iii. <tag> => use it as as is
+#                (some components can override and say give me native ver)
+#   iv. empty =>
+#             a) If TAG is specified use it
+#             a) If no TAG is specified use date
+#
+# Contact: Prakash Dhavali
+# Contact: hnd-software-scm-list
+#
+
+# If the version header file already exists, increment its build number.
+# Otherwise, create a new file.
+if [ -f epivers.h ]; then
+
+	# If REUSE_VERSION is set, epivers iteration is not incremented
+	# This can be used precommit and continuous integration projects
+	if [ -n "$REUSE_VERSION" ]; then
+		echo "Previous epivers.h exists. Skipping version increment"
+		exit 0
+	fi
+
+	build=$(grep EPI_BUILD_NUMBER epivers.h | sed -e "s,.*BUILD_NUMBER[ 	]*,,")
+	build=$(expr ${build} + 1)
+	echo build=${build}
+	sed -e "s,.*_BUILD_NUMBER.*,#define EPI_BUILD_NUMBER	${build}," \
+		< epivers.h > epivers.h.new
+	cp -p epivers.h epivers.h.prev
+	mv epivers.h.new epivers.h
+	exit 0
+
+else # epivers.h doesn't exist
+
+	SVNCMD=${SVNCMD:-"svn --non-interactive"}
+	SRCBASE=${SRCBASE:-..}
+	NULL=/dev/null
+	[ -z "$VERBOSE" ] || NULL=/dev/stderr
+
+	# Check for the in file, if not there we're in the wrong directory
+	if [ ! -f epivers.h.in ]; then
+		echo "ERROR: No epivers.h.in found"
+		exit 1
+	fi
+
+	# Following SVNURL should be expanded on checkout
+	SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_201_34/src/include/epivers.sh $'
+
+	# .gclient_info is created by gclient checkout/sync steps
+	# and contains "DEPS='<deps-url1> <deps-url2> ..." entry
+	GCLIENT_INFO=${GCLIENT_INFO:-${SRCBASE}/../.gclient_info}
+
+	# In gclient, derive SVNURL from gclient_info file
+	if [ -s "${GCLIENT_INFO}" ]; then
+		source ${GCLIENT_INFO}
+		if [ -z "$DEPS" ]; then
+			echo "ERROR: DEPS entry missing in $GCLIENT_INFO"
+			exit 1
+		else
+			for dep in $DEPS; do
+				SVNURL=${SVNURL:-$dep}
+				# Set SVNURL to first DEPS with /tags/ (if any)
+				if [[ $dep == */tags/* ]]; then
+					SVNURL=$dep
+					echo "INFO: Found gclient DEPS: $SVNURL"
+					break
+				fi
+			done
+		fi
+	elif [ -f "${GCLIENT_INFO}" ]; then
+		echo "ERROR: $GCLIENT_INFO exists, but it is empty"
+		exit 1
+	fi
+
+	# If SVNURL isn't expanded, extract it from svn info
+	if echo "$SVNURL" | egrep -vq 'HeadURL.*epivers.sh.*|http://.*/DEPS'; then
+		[ -n "$VERBOSE" ] && \
+			echo "DBG: SVN URL ($SVNURL) wasn't expanded. Getting it from svn info"
+		SVNURL=$($SVNCMD info epivers.sh 2> $NULL | egrep "^URL:")
+	fi
+
+	if echo "${TAG}" | grep -q "_BRANCH_\|_TWIG_"; then
+		branchtag=$TAG
+	else
+		branchtag=""
+	fi
+
+	# If this is a tagged build, use the tag to supply the numbers
+	# Tag should be in the form
+	#    <NAME>_REL_<MAJ>_<MINOR>
+	# or
+	#    <NAME>_REL_<MAJ>_<MINOR>_RC<RCNUM>
+	# or
+	#    <NAME>_REL_<MAJ>_<MINOR>_RC<RCNUM>_<INCREMENTAL>
+
+	MERGERLOG=${SRCBASE}/../merger_sources.log
+	GETCOMPVER=getcompver.py
+	GETCOMPVER_NET=/projects/hnd_software/gallery/src/tools/build/$GETCOMPVER
+	GETCOMPVER_NET_WIN=Z:${GETCOMPVER_NET}
+
+	#
+	# If there is a local copy GETCOMPVER use it ahead of network copy
+	#
+	if [ -s "$GETCOMPVER" ]; then
+	        GETCOMPVER_PATH="$GETCOMPVER"
+	elif [ -s "${SRCBASE}/../src/tools/build/$GETCOMPVER" ]; then
+	        GETCOMPVER_PATH="${SRCBASE}/../src/tools/build/$GETCOMPVER"
+	elif [ -s "$GETCOMPVER_NET" ]; then
+	        GETCOMPVER_PATH="$GETCOMPVER_NET"
+	elif [ -s "$GETCOMPVER_NET_WIN" ]; then
+	        GETCOMPVER_PATH="$GETCOMPVER_NET_WIN"
+	fi
+
+	#
+	# If $GETCOMPVER isn't found, fetch it from SVN
+	# (this should be very rare)
+	#
+	if [ ! -s "$GETCOMPVER_PATH" ]; then
+		[ -n "$VERBOSE" ] && \
+			echo "DBG: Fetching $GETCOMPVER from trunk"
+
+		$SVNCMD export -q \
+			^/proj/trunk/src/tools/build/${GETCOMPVER} \
+			${GETCOMPVER} 2> $NULL
+
+		GETCOMPVER_PATH=$GETCOMPVER
+	fi
+
+	# Now get tag for src/include from automerger log
+	[ -n "$VERBOSE" ] && \
+		echo "DBG: python $GETCOMPVER_PATH $MERGERLOG src/include"
+
+	COMPTAG=$(python $GETCOMPVER_PATH $MERGERLOG src/include 2> $NULL | sed -e 's/[[:space:]]*//g')
+
+	echo "DBG: Component Tag String Derived = $COMPTAG"
+
+	# Process COMPTAG values
+	# Rule:
+	# If trunk is returned, use date as component tag
+	# If LOCAL_COMPONENT is returned, use SVN URL to get native tag
+	# If component is returned or empty, assign it to SVNTAG
+	# GetCompVer.py return value and action needed
+	#    i. trunk => use current date as version string
+	#   ii. local => use SVNURL expanded by HeadURL keyword
+	#  iii. <tag> => use it as as is
+	#   iv. empty =>
+	#             a) If TAG is specified use it
+	#             a) If no TAG is specified use SVNURL from HeadURL
+
+	SVNURL_VER=false
+
+	if [ "$COMPTAG" == "" ]; then
+		SVNURL_VER=true
+	elif [ "$COMPTAG" == "LOCAL_COMPONENT" ]; then
+		SVNURL_VER=true
+	elif [ "$COMPTAG" == "trunk" ]; then
+		SVNTAG=$(date '+TRUNKCOMP_REL_%Y_%m_%d')
+	else
+		SVNTAG=$COMPTAG
+	fi
+
+	# Given SVNURL path conventions or naming conventions, derive SVNTAG
+	# TO-DO: SVNTAG derivation logic can move to a central common API
+	# TO-DO: ${SRCBASE}/tools/build/svnurl2tag.sh
+	if [ "$SVNURL_VER" == "true" ]; then
+		case "${SVNURL}" in
+			*_BRANCH_*)
+				SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_BRANCH_/{printf "%s",$1}')
+				;;
+			*_TWIG_*)
+				SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_TWIG_/{printf "%s",$1}')
+				;;
+			*_REL_*)
+				SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_REL_/{printf "%s",$1}')
+				;;
+			*/branches/*)
+				SVNTAG=${SVNURL#*/branches/}
+				SVNTAG=${SVNTAG%%/*}
+				;;
+			*/proj/tags/*|*/deps/tags/*)
+				SVNTAG=${SVNURL#*/tags/*/}
+				SVNTAG=${SVNTAG%%/*}
+				;;
+			*/trunk/*)
+				SVNTAG=$(date '+TRUNKURL_REL_%Y_%m_%d')
+				;;
+			*)
+				SVNTAG=$(date '+OTHER_REL_%Y_%m_%d')
+				;;
+		esac
+		echo "DBG: Native Tag String Derived from URL: $SVNTAG"
+	else
+		echo "DBG: Native Tag String Derived: $SVNTAG"
+	fi
+
+	TAG=${SVNTAG}
+
+	# Normalize the branch name portion to "D11" in case it has underscores in it
+	branch_name=$(expr match "$TAG" '\(.*\)_\(BRANCH\|TWIG\|REL\)_.*')
+		TAG=$(echo $TAG | sed -e "s%^$branch_name%D11%")
+
+	# Split the tag into an array on underbar or whitespace boundaries.
+	IFS="_	     " tag=(${TAG})
+	unset IFS
+
+	tagged=1
+	if [ ${#tag[*]} -eq 0 ]; then
+	   tag=($(date '+TOT REL %Y %m %d 0 %y'));
+	   # reconstruct a TAG from the date
+	   TAG=${tag[0]}_${tag[1]}_${tag[2]}_${tag[3]}_${tag[4]}_${tag[5]}
+	   tagged=0
+	fi
+
+	# Allow environment variable to override values.
+	# Missing values default to 0
+	#
+	maj=${EPI_MAJOR_VERSION:-${tag[2]:-0}}
+	min=${EPI_MINOR_VERSION:-${tag[3]:-0}}
+	rcnum=${EPI_RC_NUMBER:-${tag[4]:-0}}
+
+	# If increment field is 0, set it to date suffix if on TOB
+	if [ -n "$branchtag" ]; then
+		[ "${tag[5]:-0}" -eq 0 ] && echo "Using date suffix for incr"
+		today=${EPI_DATE_STR:-$(date '+%Y%m%d')}
+		incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-${today:-0}}}
+	else
+		incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}}
+	fi
+	origincr=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}}
+	build=${EPI_BUILD_NUMBER:-0}
+
+	# Strip 'RC' from front of rcnum if present
+	rcnum=${rcnum/#RC/}
+
+	# strip leading zero off the number (otherwise they look like octal)
+	maj=${maj/#0/}
+	min=${min/#0/}
+	rcnum=${rcnum/#0/}
+	incremental=${incremental/#0/}
+	origincr=${origincr/#0/}
+	build=${build/#0/}
+
+	# some numbers may now be null.  replace with with zero.
+	maj=${maj:-0}
+	min=${min:-0}
+
+	rcnum=${rcnum:-0}
+	incremental=${incremental:-0}
+	origincr=${origincr:-0}
+	build=${build:-0}
+
+	if [ -n "$EPI_VERSION_NUM" ]; then
+	    vernum=$EPI_VERSION_NUM
+	elif [ ${tagged} -eq 1 ]; then
+	    # vernum is 32chars max
+	    vernum=$(printf "0x%02x%02x%02x%02x" ${maj} ${min} ${rcnum} ${origincr})
+	else
+	    vernum=$(printf "0x00%02x%02x%02x" ${tag[7]} ${min} ${rcnum})
+	fi
+
+	# make sure the size of vernum is under 32 bits.
+	# Otherwise, truncate. The string will keep full information.
+	vernum=${vernum:0:10}
+
+	# build the string directly from the tag, irrespective of its length
+	# remove the name , the tag type, then replace all _ by .
+	tag_ver_str=${TAG/${tag[0]}_}
+	tag_ver_str=${tag_ver_str/${tag[1]}_}
+	tag_ver_str=${tag_ver_str//_/.}
+
+	# record tag type
+	tagtype=
+
+	if [ "${tag[1]}" = "BRANCH" -o "${tag[1]}" = "TWIG" ]; then
+	   tagtype=" (TOB)"
+	   echo "tag type: $tagtype"
+	fi
+
+	echo "Effective version string: $tag_ver_str"
+
+	if [ "$(uname -s)" == "Darwin" ]; then
+	   # Mac does not like 2-digit numbers so convert the number to single
+	   # digit. 5.100 becomes 5.1
+	   if [ $min -gt 99 ]; then
+	       minmac=$(expr $min / 100)
+	   else
+	       minmac=$min
+	   fi
+	   epi_ver_dev="${maj}.${minmac}.0"
+	else
+	   epi_ver_dev="${maj}.${min}.${rcnum}"
+	fi
+
+	# Finally get version control revision number of <SRCBASE> (if any)
+	vc_version_num=$($SVNCMD info ${SRCBASE} 2> $NULL | awk -F': ' '/^Last Changed Rev: /{printf "%s", $2}')
+
+	# OK, go do it
+	echo "maj=${maj}, min=${min}, rc=${rcnum}, inc=${incremental}, build=${build}"
+
+	sed \
+		-e "s;@EPI_MAJOR_VERSION@;${maj};" \
+		-e "s;@EPI_MINOR_VERSION@;${min};" \
+		-e "s;@EPI_RC_NUMBER@;${rcnum};" \
+		-e "s;@EPI_INCREMENTAL_NUMBER@;${incremental};" \
+		-e "s;@EPI_BUILD_NUMBER@;${build};" \
+		-e "s;@EPI_VERSION@;${maj}, ${min}, ${rcnum}, ${incremental};" \
+		-e "s;@EPI_VERSION_STR@;${tag_ver_str};" \
+		-e "s;@EPI_VERSION_TYPE@;${tagtype};" \
+		-e "s;@VERSION_TYPE@;${tagtype};" \
+		-e "s;@EPI_VERSION_NUM@;${vernum};" \
+		-e "s;@EPI_VERSION_DEV@;${epi_ver_dev};" \
+		-e "s;@VC_VERSION_NUM@;r${vc_version_num};" \
+		< epivers.h.in > epivers.h
+
+	# In shared workspaces across different platforms, ensure that
+	# windows generated file is made platform neutral without CRLF
+	if uname -s | egrep -i -q "cygwin"; then
+	   dos2unix epivers.h > $NULL 2>&1
+	fi
+fi # epivers.h
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
new file mode 100644
index 0000000000000000000000000000000000000000..69738bb1fdca1fa10b152fa702cc78f9316e0acb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
@@ -0,0 +1,70 @@
+/*
+ * HND arm trap handling.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hnd_armtrap.h 470663 2014-04-16 00:24:43Z $
+ */
+
+#ifndef	_hnd_armtrap_h_
+#define	_hnd_armtrap_h_
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define	MAX_TRAP_TYPE	(TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;	/* a1 */
+	uint32		r1;	/* a2 */
+	uint32		r2;	/* a3 */
+	uint32		r3;	/* a4 */
+	uint32		r4;	/* v1 */
+	uint32		r5;	/* v2 */
+	uint32		r6;	/* v3 */
+	uint32		r7;	/* v4 */
+	uint32		r8;	/* v5 */
+	uint32		r9;	/* sb/v6 */
+	uint32		r10;	/* sl/v7 */
+	uint32		r11;	/* fp/v8 */
+	uint32		r12;	/* ip */
+	uint32		r13;	/* sp */
+	uint32		r14;	/* lr */
+	uint32		pc;	/* r15 */
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hnd_armtrap_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
new file mode 100644
index 0000000000000000000000000000000000000000..dbc83052d980ac4612d4b298ea7a44fcdaa57ee5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
@@ -0,0 +1,59 @@
+/*
+ * Console support for RTE - for host use only.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hnd_cons.h 473343 2014-04-29 01:45:22Z $
+ */
+#ifndef	_hnd_cons_h_
+#define	_hnd_cons_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+#define CBUF_LEN	(128)
+
+#define LOG_BUF_LEN	1024
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+#undef RWL_MAX_DATA_LEN
+#undef CBUF_LEN
+#undef LOG_BUF_LEN
+#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
+#define CBUF_LEN	(RWL_MAX_DATA_LEN + 64)
+#define LOG_BUF_LEN (16 * 1024)
+#endif
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	uint		out_idx;	/* output index */
+} hnd_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hnd_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hnd_cons_t;
+
+#endif /* _hnd_cons_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
new file mode 100644
index 0000000000000000000000000000000000000000..3e6878a1174717715972e6aec394f196b56df4ff
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
@@ -0,0 +1,186 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktpool_h_
+#define _hnd_pktpool_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool)		((pool) && (pool)->inited)
+#define SHARED_POOL		(pktpool_shared)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus)		0
+#define SHARED_POOL		((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL	(pktpool_shared_lfrag)
+#endif
+#define SHARED_RXFRAG_POOL	(pktpool_shared_rxlfrag)
+
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX		40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX		3
+
+/* forward declaration */
+struct pktpool;
+
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+	pktpool_cb_t cb;
+	void *arg;
+} pktpool_cbinfo_t;
+/* call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
+typedef struct {
+	pktpool_cb_extn_t cb;
+	void *arg;
+} pktpool_cbextn_info_t;
+
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE	0
+#define POOL_RXFILL	1
+#define POOL_RXDH	2
+#define POOL_RXD11	3
+#define POOL_TXDH	4
+#define POOL_TXD11	5
+#define POOL_AMPDU	6
+#define POOL_TXENQ	7
+
+typedef struct {
+	void *p;
+	uint32 cycles;
+	uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+	uint8 txdh;	/* tx to host */
+	uint8 txd11;	/* tx to d11 */
+	uint8 enq;	/* waiting in q */
+	uint8 rxdh;	/* rx from host */
+	uint8 rxd11;	/* rx from d11 */
+	uint8 rxfill;	/* dma_rxfill */
+	uint8 idle;	/* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+	bool inited;            /* pktpool_init was successful */
+	uint8 type;             /* type of lbuf: basic, frag, etc */
+	uint8 id;               /* pktpool ID:  index in registry */
+	bool istx;              /* direction: transmit or receive data path */
+
+	void * freelist;        /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+	uint16 avail;           /* number of packets in pool's free list */
+	uint16 len;             /* number of packets managed by pool */
+	uint16 maxlen;          /* maximum size of pool <= PKTPOOL_LEN_MAX */
+	uint16 plen;            /* size of pkt buffer, excluding lbuf|lbuf_frag */
+
+	bool empty;
+	uint8 cbtoggle;
+	uint8 cbcnt;
+	uint8 ecbcnt;
+	bool emptycb_disable;
+	pktpool_cbinfo_t *availcb_excl;
+	pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+	pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+	pktpool_cbextn_info_t cbext;
+	pktpool_cbextn_info_t rxcplidfn;
+#ifdef BCMDBG_POOL
+	uint8 dbg_cbcnt;
+	pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+	uint16 dbg_qlen;
+	pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+	pktpool_cbinfo_t dmarxfill;
+} pktpool_t;
+
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
+extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+extern void pktpool_invoke_dmarxfill(pktpool_t *pktp);
+extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+
+#define POOLPTR(pp)         ((pktpool_t *)(pp))
+#define POOLID(pp)          (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_len(pp)     (POOLPTR(pp)->len)
+#define pktpool_avail(pp)   (POOLPTR(pp)->avail)
+#define pktpool_plen(pp)    (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp)  (POOLPTR(pp)->maxlen)
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID          (0)
+#define PKTPOOL_MAXIMUM_ID          (15)
+
+/* Registry of pktpool(s) */
+extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1];
+
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id)          (pktpools_registry[id])
+#define PKTPOOL_PTR2ID(pp)          (POOLID(pp))
+
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif /* _hnd_pktpool_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
new file mode 100644
index 0000000000000000000000000000000000000000..c27a21d581bab5a3300f6df9b754fa03e0894db8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
@@ -0,0 +1,168 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktq_h_
+#define _hnd_pktq_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX            0xFFFF  /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	/* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	/* Maximum precedence levels */
+#endif
+
+typedef struct pktq_prec {
+	void *head;     /* first packet to dequeue */
+	void *tail;     /* last packet to dequeue */
+	uint16 len;     /* number of queued packets */
+	uint16 max;     /* maximum number of queued packets */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+	uint32 requested;    /* packets requested to be stored */
+	uint32 stored;	     /* packets stored */
+	uint32 saved;	     /* packets saved,
+	                            because a lowest priority queue has given away one packet
+	                      */
+	uint32 selfsaved;    /* packets saved,
+	                            because an older packet from the same queue has been dropped
+	                      */
+	uint32 full_dropped; /* packets dropped,
+	                            because pktq is full with higher precedence packets
+	                      */
+	uint32 dropped;      /* packets dropped because pktq per that precedence is full */
+	uint32 sacrificed;   /* packets dropped,
+	                            in order to save one from a queue of a highest priority
+	                      */
+	uint32 busy;         /* packets droped because of hardware/transmission error */
+	uint32 retry;        /* packets re-sent because they were not received */
+	uint32 ps_retry;     /* packets retried again prior to moving power save mode */
+	uint32 suppress;     /* packets which were suppressed and not transmitted */
+	uint32 retry_drop;   /* packets finally dropped after retry limit */
+	uint32 max_avail;    /* the high-water mark of the queue capacity for packets -
+	                            goes to zero as queue fills
+	                      */
+	uint32 max_used;     /* the high-water mark of the queue utilisation for packets -
+						        increases with use ('inverse' of max_avail)
+				          */
+	uint32 queue_capacity; /* the maximum capacity of the queue */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running totoal of primary phy rate of all packets */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+	uint32  _logtime;      /* timestamp of last counter clear  */
+} pktq_counters_t;
+
+typedef struct {
+	uint32                  _prec_log;
+	pktq_counters_t*        _prec_cnt[PKTQ_MAX_PREC];     /* Counters per queue  */
+} pktq_log_t;
+#endif /* PKTQ_LOG */
+
+
+#define PKTQ_COMMON	\
+	uint16 num_prec;        /* number of precedences in use */			\
+	uint16 hi_prec;         /* rapid dequeue hint (>= highest non-empty prec) */	\
+	uint16 max;             /* total max packets */					\
+	uint16 len;             /* total number of packets */
+
+/* multi-priority pkt queue */
+struct pktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+	pktq_log_t*      pktqlog;
+#endif
+};
+
+/* simple, non-priority pkt queue */
+struct spktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg).  return true if pkt belongs to if */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max)	((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec)		((pq)->q[prec].max)
+#define pktq_plen(pq, prec)		((pq)->q[prec].len)
+#define pktq_pavail(pq, prec)		((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)		((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec)		((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec)		((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)	((pq)->q[prec].tail)
+
+extern void  pktq_append(struct pktq *pq, int prec, struct spktq *list);
+extern void  pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+	ifpkt_cb_t fn, int arg);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq)		((int)(pq)->len)
+#define pktq_max(pq)		((int)(pq)->max)
+#define pktq_avail(pq)		((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)		((pq)->len >= (pq)->max)
+#define pktq_empty(pq)		((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqflush(osh, pq)	pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0)
+#define pktqinit(pq, len)	pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif /* _hnd_pktq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 0000000000000000000000000000000000000000..fc402b07228fabae16c764f590bb682a1845a495
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,23 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hndpmu.h 471127 2014-04-17 23:24:23Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 0000000000000000000000000000000000000000..a44c2f7c2b5f1d45d8f3899a0c16755951e40a28
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,268 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hndsoc.h 473238 2014-04-28 19:14:56Z $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2		0x80000000	/* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE    	0x18000000	/* Enumeration space base */
+
+#define SI_WRAP_BASE    	0x18100000	/* Wrapper space base */
+#define SI_CORE_SIZE    	0x1000		/* each core gets 4Kbytes for registers */
+
+#ifndef SI_MAXCORES
+#define	SI_MAXCORES		32		/* NorthStar has more cores */
+#endif /* SI_MAXCORES */
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+#define	SI_FASTRAM_SWAPPED	0x19800000
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_FLASH_WINDOW		0x01000000	/* Flash XIP Window */
+
+#define SI_NS_NANDFLASH		0x1c000000	/* NorthStar NAND flash base */
+#define SI_NS_NORFLASH		0x1e000000	/* NorthStar NOR flash base */
+#define SI_NS_ROM		0xfffd0000	/* NorthStar ROM */
+#define	SI_NS_FLASH_WINDOW	0x02000000	/* Flash XIP Window */
+
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCR4_ROM		0x000f0000	/* ARM Cortex-R4 ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_SFLASH		0x14000000
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	HTPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define	DMEMS_CORE_ID		0x835		/* SDR/DDR1 memory controller core */
+#define	DEF_SHIM_COMP		0x837		/* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID		0x83b		/* Dot11 ACPHY */
+#define PCIE2_CORE_ID		0x83c		/* pci express Gen2 core */
+#define USB30D_CORE_ID		0x83d		/* usb 3.0 device core */
+#define ARMCR4_CORE_ID		0x83e		/* ARM CR4 CPU */
+#define GCI_CORE_ID		0x840		/* GCI Core */
+#define M2MDMA_CORE_ID          0x844           /* memory to memory dma */
+#define APB_BRIDGE_CORE_ID	0x135		/* APB bridge core ID */
+#define AXI_CORE_ID		0x301		/* AXI/GPV core ID */
+#define EROM_CORE_ID		0x366		/* EROM core ID */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+#define CC_4706_CORE_ID		0x500		/* chipcommon core */
+#define NS_PCIEG2_CORE_ID	0x501		/* PCIE Gen 2 core */
+#define NS_DMA_CORE_ID		0x502		/* DMA core */
+#define NS_SDIO3_CORE_ID	0x503		/* SDIO3 core */
+#define NS_USB20_CORE_ID	0x504		/* USB2.0 core */
+#define NS_USB30_CORE_ID	0x505		/* USB3.0 core */
+#define NS_A9JTAG_CORE_ID	0x506		/* ARM Cortex A9 JTAG core */
+#define NS_DDR23_CORE_ID	0x507		/* Denali DDR2/DDR3 memory controller */
+#define NS_ROM_CORE_ID		0x508		/* ROM core */
+#define NS_NAND_CORE_ID		0x509		/* NAND flash controller core */
+#define NS_QSPI_CORE_ID		0x50a		/* SPI flash controller core */
+#define NS_CCB_CORE_ID		0x50b		/* ChipcommonB core */
+#define SOCRAM_4706_CORE_ID	0x50e		/* internal memory core */
+#define NS_SOCRAM_CORE_ID	SOCRAM_4706_CORE_ID
+#define	ARMCA9_CORE_ID		0x510		/* ARM Cortex A9 core (ihost) */
+#define	NS_IHOST_CORE_ID	ARMCA9_CORE_ID	/* ARM Cortex A9 core (ihost) */
+#define GMAC_COMMON_4706_CORE_ID	0x5dc		/* Gigabit MAC core */
+#define GMAC_4706_CORE_ID	0x52d		/* Gigabit MAC core */
+#define AMEMC_CORE_ID		0x52e		/* DDR1/2 memory controller core */
+#define ALTA_CORE_ID		0x534		/* I2S core */
+#define DDR23_PHY_CORE_ID	0x5dd
+
+#define SI_PCI1_MEM     0x40000000  /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG     0x44000000  /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32		0xc0000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+#define CC_4706B0_CORE_REV	0x8000001f		/* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV	0x80000005		/* internal memory core */
+#define GMAC_4706B0_CORE_REV	0x80000000		/* Gigabit MAC core */
+#define NS_PCIEG2_CORE_REV_B0	0x7		/* NS-B0 PCIE Gen 2 core rev */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+#define	SOCI_UBUS		2
+#define	SOCI_NAI		3
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* Norstar core status flags */
+#define SISF_NS_BOOTDEV_MASK	0x0003	/* ROM core */
+#define SISF_NS_BOOTDEV_NOR	0x0000	/* ROM core */
+#define SISF_NS_BOOTDEV_NAND	0x0001	/* ROM core */
+#define SISF_NS_BOOTDEV_ROM	0x0002	/* ROM core */
+#define SISF_NS_BOOTDEV_OFFLOAD	0x0003	/* ROM core */
+#define SISF_NS_SKUVEC_MASK	0x000c	/* ROM core */
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+#define SI_PWR_CTL_ST		0x1e8		/* For memory clock gating */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define CCS_HQCLKREQ		0x00000040	/* HQ Clock Required */
+#define CCS_USBCLKREQ		0x00000100	/* USB Clock Req */
+#define CCS_SECICLKREQ		0x00000100	/* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ	0x00000100	/* ARM CR4 fast clock request */
+#define CCS_AVBCLKREQ		0x00000400	/* AVB Clock enable request */
+#define CCS_ERSRC_REQ_MASK	0x00000700	/* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT	8
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define CCS_BP_ON_APL		0x00040000	/* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT		0x00080000	/* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS	0x01000000	/* Fast CPU clock is running */
+#define CCS_ERSRC_STS_MASK	0x07000000	/* external resource status */
+#define CCS_ERSRC_STS_SHIFT	24
+
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit integers */
+
+/* Boot/Kernel related defintion and functions */
+#define	SOC_BOOTDEV_ROM		0x00000001
+#define	SOC_BOOTDEV_PFLASH	0x00000002
+#define	SOC_BOOTDEV_SFLASH	0x00000004
+#define	SOC_BOOTDEV_NANDFLASH	0x00000008
+
+#define	SOC_KNLDEV_NORFLASH	0x00000002
+#define	SOC_KNLDEV_NANDFLASH	0x00000004
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+int soc_boot_dev(void *sih);
+int soc_knl_dev(void *sih);
+#endif	/* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 0000000000000000000000000000000000000000..a7e2fe374be9a3508f1d3a776cc4d4d1987550c8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,985 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: linux_osl.h 491170 2014-07-15 06:23:58Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+#define DECLSPEC_ALIGN(x)	__attribute__ ((aligned(x)))
+
+/* Linux Kernel: File Operations: start */
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCMDRIVER
+
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
+
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+#ifdef EXYNOS5433_PCIE_WAR
+extern void exynos_pcie_set_l1_exit(void);
+extern void exynos_pcie_clear_l1_exit(void);
+#endif /* EXYNOS5433_PCIE_WAR */
+
+/* Global ASSERT type */
+extern uint32 g_assert_type;
+
+/* ASSERT */
+#if defined(BCMASSERT_LOG)
+	#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+	#ifdef __GNUC__
+		#define GCC_VERSION \
+			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+		#if GCC_VERSION > 30100
+			#define ASSERT(exp)	do {} while (0)
+		#else
+			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
+			#define ASSERT(exp)
+		#endif /* GCC_VERSION > 30100 */
+	#endif /* __GNUC__ */
+#endif
+
+/* bcm_prefetch_32B */
+static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
+{
+#if defined(BCM47XX_CA9) && (__LINUX_ARM_ARCH__ >= 5)
+	switch (cachelines_32B) {
+		case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
+		case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
+		case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
+		case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr +  0) : "cc");
+	}
+#endif
+}
+
+/* microsecond delay */
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_SLEEP(ms)			osl_sleep(ms)
+extern void osl_sleep(uint ms);
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+/* PCI configuration space access macros */
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+#define OSL_PCIE_DOMAIN(osh)	osl_pcie_domain(osh)
+#define OSL_PCIE_BUS(osh)	osl_pcie_bus(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern uint osl_pcie_domain(osl_t *osh);
+extern uint osl_pcie_bus(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+
+/* Pkttag flag should be part of public information */
+typedef struct {
+	bool pkttag;
+	bool mmbus;		/* Bus supports memory-mapped register accesses */
+	pktfree_cb_fn_t tx_fn;  /* Callback function for PKTFREE */
+	void *tx_ctx;		/* Context to the callback function */
+	void	*unused[3];
+} osl_pubinfo_t;
+
+extern void osl_flag_set(osl_t *osh, uint32 mask);
+extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
+	do {						\
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
+	} while (0)
+
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v)		(v)
+	#define MALLOC(osh, size)	osl_malloc((osh), (size))
+	#define MALLOCZ(osh, size)	osl_mallocz((osh), (size))
+	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+	#define MALLOCED(osh)		osl_malloced((osh))
+	#define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+	extern void *osl_malloc(osl_t *osh, uint size);
+	extern void *osl_mallocz(osl_t *osh, uint size);
+	extern void osl_mfree(osl_t *osh, void *addr, uint size);
+	extern uint osl_malloced(osl_t *osh);
+	extern uint osl_check_memleak(osl_t *osh);
+
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* allocate/free shared (dma-able) consistent memory */
+#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+#define	DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
+	uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+
+/* map/unmap direction */
+#define	DMA_TX	1	/* TX direction for DMA */
+#define	DMA_RX	2	/* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *txp_dmah);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#if (defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__))
+	extern void osl_cache_flush(void *va, uint size);
+	extern void osl_cache_inv(void *va, uint size);
+	extern void osl_prefetch(const void *ptr);
+	#define OSL_CACHE_FLUSH(va, len)	osl_cache_flush((void *) va, len)
+	#define OSL_CACHE_INV(va, len)		osl_cache_inv((void *) va, len)
+	#define OSL_PREFETCH(ptr)			osl_prefetch(ptr)
+#ifdef __ARM_ARCH_7A__
+	extern int osl_arch_is_coherent(void);
+	#define OSL_ARCH_IS_COHERENT()		osl_arch_is_coherent()
+#else
+	#define OSL_ARCH_IS_COHERENT()		NULL
+#endif /* __ARM_ARCH_7A__ */
+#else
+	#define OSL_CACHE_FLUSH(va, len)	BCM_REFERENCE(va)
+	#define OSL_CACHE_INV(va, len)		BCM_REFERENCE(va)
+	#define OSL_PREFETCH(ptr)		BCM_REFERENCE(ptr)
+
+	#define OSL_ARCH_IS_COHERENT()		NULL
+#endif
+
+/* register access macros */
+#if defined(BCMSDIO)
+	#include <bcmsdh.h>
+	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r)), (v)))
+	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r))))
+#elif defined(BCM47XX_ACP_WAR)
+extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
+
+#define OSL_READ_REG(osh, r) \
+	({\
+		__typeof(*(r)) __osl_v; \
+		osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
+		__osl_v; \
+	})
+#endif
+
+#if defined(BCM47XX_ACP_WAR)
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
+#else
+
+#if defined(BCMSDIO)
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+		mmap_op else bus_op
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+		mmap_op : bus_op
+#else
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+#endif
+#endif /* BCM47XX_ACP_WAR */
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define	PKTBUFSZ	2048   /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#define OSH_NULL   NULL
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#include <linuxver.h>           /* use current 2.4.x calling conventions */
+#include <linux/kernel.h>       /* for vsn/printf's */
+#include <linux/string.h>       /* for mem*, str* */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
+#define OSL_SYSUPTIME()		((uint32)jiffies_to_msecs(jiffies))
+#else
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+#define	printf(fmt, args...)	printk("BCMDHD:"fmt , ## args)
+#include <linux/kernel.h>	/* for vsn/printf's */
+#include <linux/string.h>	/* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* register access macros */
+
+#ifdef EXYNOS5433_PCIE_WAR
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			exynos_pcie_set_l1_exit();	\
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			exynos_pcie_clear_l1_exit();	\
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#else
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#endif /* EXYNOS5433_PCIE_WAR */
+
+#ifdef EXYNOS5433_PCIE_WAR
+#define W_REG(osh, r, v) do { \
+	exynos_pcie_set_l1_exit();	\
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+		exynos_pcie_clear_l1_exit();	\
+	} while (0)
+#else
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#endif /* EXYNOS5433_PCIE_WAR */
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+/* bcopy, bcmp, and bzero functions */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va)	((void *)va)
+#define OSL_CACHED(va)		((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
+
+/* get processor cycle count */
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif
+
+/* dereference an address that may cause a bus exception */
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+/* map/unmap physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
+#define	REG_UNMAP(va)		iounmap((va))
+
+/* shared (dma-able) memory access macros */
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons),  we need the Linux headers.
+ */
+#include <linuxver.h>		/* use current 2.4.x calling conventions */
+
+/* packet primitives */
+#ifdef BCMDBG_CTRACE
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FILE__)
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf)		BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#else
+#define	PKTGET_STATIC	PKTGET
+#define	PKTFREE_STATIC	PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define	PKTDATA(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define	PKTLEN(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+	 })
+#define PKTTAILROOM(osh, skb)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_tailroom((struct sk_buff*)(skb)); \
+	 })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pad((struct sk_buff*)(skb), (padlen)); \
+	 })
+#define	PKTNEXT(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define	PKTSETNEXT(osh, skb, x)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+	 })
+#define	PKTSETLEN(osh, skb, len)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 __skb_trim((struct sk_buff*)(skb), (len)); \
+	 })
+#define	PKTPUSH(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_push((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTPULL(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pull((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y)	BCM_REFERENCE(osh)
+#define	PKTPOOL(osh, skb)		({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb)        PKTLINK(skb)
+#define PKTSETFREELIST(skb, x)  PKTSETLINK((skb), (x))
+#define PKTPTR(skb)             (skb)
+#define PKTID(skb)              ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id)       ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m)		({BCM_REFERENCE(osh); m;})
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+#define PKTORPHAN(skb)          skb_orphan(skb)
+#else
+#define PKTORPHAN(skb)          ({BCM_REFERENCE(skb); 0;})
+#endif /* LINUX VERSION >= 3.6 */
+
+
+#ifdef BCMDBG_CTRACE
+#define	DEL_CTRACE(zosh, zskb) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_del(&(zskb)->ctrace_list); \
+	(zosh)->ctrace_num--; \
+	(zskb)->ctrace_start = 0; \
+	(zskb)->ctrace_count = 0; \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define	UPDATE_CTRACE(zskb, zfile, zline) { \
+	struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+	if (_zskb->ctrace_count < CTRACE_NUM) { \
+		_zskb->func[_zskb->ctrace_count] = zfile; \
+		_zskb->line[_zskb->ctrace_count] = zline; \
+		_zskb->ctrace_count++; \
+	} \
+	else { \
+		_zskb->func[_zskb->ctrace_start] = zfile; \
+		_zskb->line[_zskb->ctrace_start] = zline; \
+		_zskb->ctrace_start++; \
+		if (_zskb->ctrace_start >= CTRACE_NUM) \
+			_zskb->ctrace_start = 0; \
+	} \
+}
+
+#define	ADD_CTRACE(zosh, zskb, zfile, zline) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+	(zosh)->ctrace_num++; \
+	UPDATE_CTRACE(zskb, zfile, zline); \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb)	UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#ifdef CTFPOOL
+#define	CTFPOOL_REFILL_THRESH	3
+typedef struct ctfpool {
+	void		*head;
+	spinlock_t	lock;
+	uint		max_obj;
+	uint		curr_obj;
+	uint		obj_size;
+	uint		refills;
+	uint		fast_allocs;
+	uint 		fast_frees;
+	uint 		slow_allocs;
+} ctfpool_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->pktc_flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	FASTBUF	(1 << 16)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
+#else
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
+#endif /* 2.6.22 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->ctfpool)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
+#else
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+#endif
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#else /* CTFPOOL */
+#define	PKTSETFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
+
+#define	PKTSETCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#ifdef HNDCTF
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->pktc_flags & CHAINED)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	SKIPCT	(1 << 18)
+#define	CHAINED	(1 << 19)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->mac_len & CHAINED)
+#else /* 2.6.22 */
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->__unused & CHAINED)
+#endif /* 2.6.22 */
+typedef struct ctf_mark {
+	uint32	value;
+}	ctf_mark_t;
+#define CTF_MARK(m)				(m.value)
+#else /* HNDCTF */
+#define	PKTSETSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m)		({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#if defined(BCM_GMAC3)
+
+/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
+
+/* Account for packets delivered to downstream forwarder by GMAC interface. */
+extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTTOFWDER(osh, skbs, skb_cnt)  \
+	osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+
+/* Account for packets received from downstream forwarder. */
+#if defined(BCMDBG_CTRACE) /* pkt logging */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
+                             int line, char *file);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
+	                 __LINE__, __FILE__)
+#else  /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+#endif
+
+
+/** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
+ * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
+ * been accessed in the GMAC forwarder. This may be used to limit the number of
+ * cachelines that need to be flushed or invalidated.
+ * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
+ * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
+ * Likewise, a debug print of a packet payload in say the ethernet driver needs
+ * to be accompanied with a clear of the FWDERBUF tag.
+ */
+
+/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */
+#define FWDER_HWRXOFF       (30)
+
+/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have
+ * read into the L1 cache (not dirty). This may be used in reduced cache ops.
+ *
+ * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
+ */
+#define FWDER_PKTMAPSZ      (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
+	 })
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+
+#define FWDERBUF	        (1 << 20)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
+	 })
+
+#else /* 2.6.22 */
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
+	 })
+
+#endif /* 2.6.22 */
+
+#else  /* ! BCM_GMAC3 */
+
+#define PKTSETFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTCLRFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTISFWDERBUF(osh, skb)   ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#endif /* ! BCM_GMAC3 */
+
+
+#ifdef HNDCTF
+/* For broadstream iqos */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	TOBR		(1 << 5)
+#define	PKTSETTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
+	 })
+#define	PKTCLRTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
+	 })
+#define	PKTISTOBR(skb)	(((struct sk_buff*)(skb))->pktc_flags & TOBR)
+#define	PKTSETCTFIPCTXIF(skb, ifp)	(((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#else /* 2.6.22 */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#endif /* 2.6.22 */
+#else /* HNDCTF */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#endif /* HNDCTF */
+
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx)	(((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx)	({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb)	(((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp)	(((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb)	(((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define	AUX_TCP_FIN_RST	(1 << 0)
+#define	AUX_FREED	(1 << 1)
+#define PKTSETFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define	PKTCLRFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define	PKTISFAAUX(skb)		(((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define	PKTCLRFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define	PKTISFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define	PKTISFABRIDGED(skb)	PKTISFAAUX(skb)
+#else
+#define	PKTISFAAUX(skb)		({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFABRIDGED(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFAFREED(skb)	({BCM_REFERENCE(skb); FALSE;})
+
+#define	PKTCLRFAAUX(skb)	BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb)	BCM_REFERENCE(skb)
+#define	PKTCLRFAFREED(skb)	BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void osl_pktclone(osl_t *osh, void **pkt);
+
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b)	osl_ctrace_dump((osh), (b))
+extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb)  osl_pkt_frmnative(((osl_t *)osh), \
+				(struct sk_buff*)(skb), __LINE__, __FILE__)
+#define	PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p)                     0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#define PKTALLOCED(osh)		osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
+#define OSL_RAND()		osl_rand()
+extern uint32 osl_rand(void);
+
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
+
+#ifdef PKTC
+/* Use 8 bytes of skb tstamp field to store below info */
+struct chain_node {
+	struct sk_buff	*link;
+	unsigned int	flags:3, pkts:9, bytes:20;
+};
+
+#define CHAIN_NODE(skb)		((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
+
+#define	PKTCSETATTR(s, f, p, b)	({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
+	                         CHAIN_NODE(s)->bytes = (b);})
+#define	PKTCCLRATTR(s)		({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
+	                         CHAIN_NODE(s)->bytes = 0;})
+#define	PKTCGETATTR(s)		(CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
+	                         CHAIN_NODE(s)->bytes)
+#define	PKTCCNT(skb)		(CHAIN_NODE(skb)->pkts)
+#define	PKTCLEN(skb)		(CHAIN_NODE(skb)->bytes)
+#define	PKTCGETFLAGS(skb)	(CHAIN_NODE(skb)->flags)
+#define	PKTCSETFLAGS(skb, f)	(CHAIN_NODE(skb)->flags = (f))
+#define	PKTCCLRFLAGS(skb)	(CHAIN_NODE(skb)->flags = 0)
+#define	PKTCFLAGS(skb)		(CHAIN_NODE(skb)->flags)
+#define	PKTCSETCNT(skb, c)	(CHAIN_NODE(skb)->pkts = (c))
+#define	PKTCINCRCNT(skb)	(CHAIN_NODE(skb)->pkts++)
+#define	PKTCADDCNT(skb, c)	(CHAIN_NODE(skb)->pkts += (c))
+#define	PKTCSETLEN(skb, l)	(CHAIN_NODE(skb)->bytes = (l))
+#define	PKTCADDLEN(skb, l)	(CHAIN_NODE(skb)->bytes += (l))
+#define	PKTCSETFLAG(skb, fb)	(CHAIN_NODE(skb)->flags |= (fb))
+#define	PKTCCLRFLAG(skb, fb)	(CHAIN_NODE(skb)->flags &= ~(fb))
+#define	PKTCLINK(skb)		(CHAIN_NODE(skb)->link)
+#define	PKTSETCLINK(skb, x)	(CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for (; (skb) != NULL; (skb) = (nskb)) \
+		if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
+		    PKTSETCLINK((skb), NULL), 1)
+#define	PKTCFREE(osh, skb, send) \
+do { \
+	void *nskb; \
+	ASSERT((skb) != NULL); \
+	FOREACH_CHAINED_PKT((skb), nskb) { \
+		PKTCLRCHAINED((osh), (skb)); \
+		PKTCCLRFLAGS((skb)); \
+		PKTFREE((osh), (skb), (send)); \
+	} \
+} while (0)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} else { \
+		PKTSETCLINK((t), (p)); \
+		(t) = (p); \
+	} \
+} while (0)
+#endif /* PKTC */
+
+#else /* ! BCMDRIVER */
+
+
+/* ASSERT */
+	#define ASSERT(exp)	do {} while (0)
+
+/* MALLOC and MFREE */
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+/* str* and mem* functions */
+#include <string.h>
+
+/* *printf functions */
+#include <stdio.h>
+
+/* bcopy, bcmp, and bzero */
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif /* ! BCMDRIVER */
+
+#endif	/* _linux_osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 0000000000000000000000000000000000000000..006c85dd8cacb0b6434b083b24a36f7c2fbcc8d7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,731 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <typedefs.h>
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+	(RHEL_MAJOR == 5))
+/* Exclude RHEL 5 */
+typedef void (*work_func_t)(void *work);
+#endif
+#endif	/* >= 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* Some distributions have their own 2.6.x compatibility layers */
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif /* < 2.6.18 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	/* < 2.6.17 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#include <linux/sched/rt.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#    define __devinit	__init
+#  else
+/* All devices are hotpluggable since linux 3.8.0 */
+#    define __devinit
+#  endif
+#endif /* !__devinit */
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
+
+struct pci_device_id {
+	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
+	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
+	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
+	unsigned long driver_data;		/* Data private to the driver */
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	/* NULL if wants all devices */
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); /* New device inserted */
+	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
+						 * capable driver)
+						 */
+	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
+	void (*resume)(struct pci_dev *dev);	/* Device woken up */
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+/* compatpci.c */
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif /* PCI registration */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif /* DMA mapping */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+/* pcmcia-cs provides its own netdevice compatibility layer */
+#ifndef _COMPAT_NETDEVICE_H
+
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif /* _COMPAT_NETDEVICE_H */
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+/* 2.4.x renamed bottom halves to tasklets */
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+/* 2.4.x introduced del_timer_sync() */
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif /* SoftNet */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+/*
+ * Emit code to initialise all of a tq_struct
+ */
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+
+/* Power management related macro & routines */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
+#else
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	/*
+	 * otherwise, write the context information we know from bootup.
+	 * This works around a problem where warm-booting from Windows
+	 * combined with a D3(hot)->D0 transition causes PCI config
+	 * header data to be forgotten.
+	 */
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+#endif /* PCI power management */
+
+/* Old cp0 access macros deprecated in 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+/* Module refcount handled internally in 2.6.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* struct packet_type redefined in 2.6.x */
+#define af_packet_priv			data
+#endif
+
+/* suspend args */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+	void	*parent;  /* some external entity that the thread supposed to work for */
+	char	*proc_name;
+	struct	task_struct *p_task;
+	long	thr_pid;
+	int		prio; /* priority */
+	struct	semaphore sema;
+	int	terminated;
+	struct	completion completed;
+	spinlock_t	spinlock;
+	int		up_cnt;
+} tsk_ctl_t;
+
+
+/* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+static inline bool binary_sema_down(tsk_ctl_t *tsk)
+{
+	if (down_interruptible(&tsk->sema) == 0) {
+		unsigned long flags = 0;
+		spin_lock_irqsave(&tsk->spinlock, flags);
+		if (tsk->up_cnt == 1)
+			tsk->up_cnt--;
+		else {
+			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
+		}
+		spin_unlock_irqrestore(&tsk->spinlock, flags);
+		return false;
+	} else
+		return true;
+}
+
+static inline bool binary_sema_up(tsk_ctl_t *tsk)
+{
+	bool sem_up = false;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&tsk->spinlock, flags);
+	if (tsk->up_cnt == 0) {
+		tsk->up_cnt++;
+		sem_up = true;
+	} else if (tsk->up_cnt == 1) {
+		/* dhd_sched_dpc: dpc is alread up! */
+	} else
+		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
+
+	spin_unlock_irqrestore(&tsk->spinlock, flags);
+
+	if (sem_up)
+		up(&tsk->sema);
+
+	return sem_up;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
+{ \
+	sema_init(&((tsk_ctl)->sema), 0); \
+	init_completion(&((tsk_ctl)->completed)); \
+	(tsk_ctl)->parent = owner; \
+	(tsk_ctl)->proc_name = name;  \
+	(tsk_ctl)->terminated = FALSE; \
+	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
+	(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+	spin_lock_init(&((tsk_ctl)->spinlock)); \
+	DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+		(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+	(tsk_ctl)->terminated = TRUE; \
+	smp_wmb(); \
+	up(&((tsk_ctl)->sema));	\
+	wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	(tsk_ctl)->thr_pid = -1; \
+}
+
+/*  ----------------------- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid;    \
+pid = find_get_pid((pid_t)nr);    \
+tsk = pid_task(pid, PIDTYPE_PID);    \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = find_task_by_vpid(pid); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			ret = schedule_timeout(ret);			\
+			if (!ret)					\
+				break;					\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__wait_event_interruptible_timeout(wq, condition, __ret); \
+	__ret;								\
+})
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev)	(dev->priv)
+#else
+#define DEV_PRIV(dev)	netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p)         wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
+#endif  /* < 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP()	(FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define RANDOM32	prandom_u32
+#else
+#define RANDOM32	random32
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define SRANDOM32(entropy)	prandom_seed(entropy)
+#else
+#define SRANDOM32(entropy)	srandom32(entropy)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a)				1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a)				1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#endif /* _linuxver_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 0000000000000000000000000000000000000000..ab0360cebd10df3d5b4294dc42667af1e84d74f4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,61 @@
+/*
+ * Command line options parser.
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: miniopt.h 484281 2014-06-12 22:42:26Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 0000000000000000000000000000000000000000..8a3f691dbb0b4c0bfe789da0b0bf5aaae83b582d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,60 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: msgtrace.h 439681 2013-11-27 15:39:50Z $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+/* for osl_t */
+#include <osl_decl.h>
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   trace_type;
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	/* Msgtrace type  only */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+                                     uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern int msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 0000000000000000000000000000000000000000..12b8e00b3d61a81d1819eb96da0db8a22f6e6460
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,131 @@
+/*
+ * OS Abstraction Layer
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: osl.h 474639 2014-05-01 23:52:31Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+#include <osl_decl.h>
+
+#define OSL_PKTTAG_SZ	32 /* Size of PktTag */
+
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+/* Drivers use REGOPSSET() to register register read/write funcitons */
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void  (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#endif
+
+#define PKTCTFMAP(osh, p)		BCM_REFERENCE(osh)
+
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   /* !AND_REG */
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   /* !OR_REG */
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif /* OSL_SYSUPTIME */
+
+#if !defined(PKTC) && !defined(PKTC_DONGLE)
+#define	PKTCGETATTR(skb)	(0)
+#define	PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define	PKTCCLRATTR(skb)	BCM_REFERENCE(skb)
+#define	PKTCCNT(skb)		(1)
+#define	PKTCLEN(skb)		PKTLEN(NULL, skb)
+#define	PKTCGETFLAGS(skb)	(0)
+#define	PKTCSETFLAGS(skb, f)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAGS(skb)	BCM_REFERENCE(skb)
+#define	PKTCFLAGS(skb)		(0)
+#define	PKTCSETCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCINCRCNT(skb)	BCM_REFERENCE(skb)
+#define	PKTCADDCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCSETLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCADDLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCSETFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCLINK(skb)		NULL
+#define	PKTSETCLINK(skb, x)	BCM_REFERENCE(skb)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define	PKTCFREE		PKTFREE
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} \
+} while (0)
+#endif /* !linux || !PKTC */
+
+#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE)
+#define PKTSETCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTCLRCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTISCHAINED(skb)		FALSE
+#endif
+
+/* Lbuf with fraglist */
+#define PKTFRAGPKTID(osh, lb)		(0)
+#define PKTSETFRAGPKTID(osh, lb, id)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTNUM(osh, lb)		(0)
+#define PKTSETFRAGTOTNUM(osh, lb, tot)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTLEN(osh, lb)		(0)
+#define PKTSETFRAGTOTLEN(osh, lb, len)	BCM_REFERENCE(osh)
+#define PKTIFINDEX(osh, lb)		(0)
+#define PKTSETIFINDEX(osh, lb, idx)	BCM_REFERENCE(osh)
+#define	PKTGETLF(osh, len, send, lbuf_type)	(0)
+
+/* in rx path, reuse totlen as used len */
+#define PKTFRAGUSEDLEN(osh, lb)			(0)
+#define PKTSETFRAGUSEDLEN(osh, lb, len)		BCM_REFERENCE(osh)
+
+#define PKTFRAGLEN(osh, lb, ix)			(0)
+#define PKTSETFRAGLEN(osh, lb, ix, len)		BCM_REFERENCE(osh)
+#define PKTFRAGDATA_LO(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+#define PKTFRAGDATA_HI(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+
+/* RX FRAG */
+#define PKTISRXFRAG(osh, lb)    	(0)
+#define PKTSETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#define PKTRESETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+/* TX FRAG */
+#define PKTISTXFRAG(osh, lb)		(0)
+#define PKTSETTXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+/* Need Rx completion used for AMPDU reordering */
+#define PKTNEEDRXCPL(osh, lb)           (TRUE)
+#define PKTSETNORXCPL(osh, lb)          BCM_REFERENCE(osh)
+#define PKTRESETNORXCPL(osh, lb)        BCM_REFERENCE(osh)
+
+#define PKTISFRAG(osh, lb)		(0)
+#define PKTFRAGISCHAINED(osh, i)	(0)
+/* TRIM Tail bytes from lfrag */
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len)	PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+
+#endif	/* _osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h
new file mode 100644
index 0000000000000000000000000000000000000000..944945838e98c6eff4049ee90b91f97627d18c29
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl_decl.h
@@ -0,0 +1,16 @@
+/*
+ * osl forward declarations
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id$
+ */
+
+#ifndef _osl_decl_h_
+#define _osl_decl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 0000000000000000000000000000000000000000..bde3959f350bd12d4004bb3b195cdedd64ddaf8c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,41 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: packed_section_end.h 437241 2013-11-18 07:39:24Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 0000000000000000000000000000000000000000..3c22dcb276a91f05e8e340e8b9a591661c065eb2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,45 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * $Copyright Open Broadcom Corporation$
+ * $Id: packed_section_start.h 437241 2013-11-18 07:39:24Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+/* Declare compiler-specific directives for structure packing. */
+#if defined(__GNUC__) || defined(__lint)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__ ((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 0000000000000000000000000000000000000000..0fae96ce00237d59ec8a90cad344150c97f6778c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,564 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: pcicfg.h 465082 2014-03-26 17:37:28Z $
+ */
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* The following inside ifndef's so we don't collide with NTDDK.H */
+#ifndef PCI_MAX_BUS
+#define PCI_MAX_BUS		0x100
+#endif
+#ifndef PCI_MAX_DEVICES
+#define PCI_MAX_DEVICES		0x20
+#endif
+#ifndef PCI_MAX_FUNCTION
+#define PCI_MAX_FUNCTION	0x8
+#endif
+
+#ifndef PCI_INVALID_VENDORID
+#define PCI_INVALID_VENDORID	0xffff
+#endif
+#ifndef PCI_INVALID_DEVICEID
+#define PCI_INVALID_DEVICEID	0xffff
+#endif
+
+
+/* Convert between bus-slot-function-register and config addresses */
+
+#define	PCICFG_BUS_SHIFT	16	/* Bus shift */
+#define	PCICFG_SLOT_SHIFT	11	/* Slot shift */
+#define	PCICFG_FUN_SHIFT	8	/* Function shift */
+#define	PCICFG_OFF_SHIFT	0	/* Register shift */
+
+#define	PCICFG_BUS_MASK		0xff	/* Bus mask */
+#define	PCICFG_SLOT_MASK	0x1f	/* Slot mask */
+#define	PCICFG_FUN_MASK		7	/* Function mask */
+#define	PCICFG_OFF_MASK		0xff	/* Bus mask */
+
+#define	PCI_CONFIG_ADDR(b, s, f, o)					\
+		((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT)		\
+		 | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT)	\
+		 | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT)	\
+		 | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT))
+
+#define	PCI_CONFIG_BUS(a)	(((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK)
+#define	PCI_CONFIG_SLOT(a)	(((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK)
+#define	PCI_CONFIG_FUN(a)	(((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK)
+#define	PCI_CONFIG_OFF(a)	(((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK)
+
+/* PCIE Config space accessing MACROS */
+
+#define	PCIECFG_BUS_SHIFT	24	/* Bus shift */
+#define	PCIECFG_SLOT_SHIFT	19	/* Slot/Device shift */
+#define	PCIECFG_FUN_SHIFT	16	/* Function shift */
+#define	PCIECFG_OFF_SHIFT	0	/* Register shift */
+
+#define	PCIECFG_BUS_MASK	0xff	/* Bus mask */
+#define	PCIECFG_SLOT_MASK	0x1f	/* Slot/Device mask */
+#define	PCIECFG_FUN_MASK	7	/* Function mask */
+#define	PCIECFG_OFF_MASK	0xfff	/* Register mask */
+
+#define	PCIE_CONFIG_ADDR(b, s, f, o)					\
+		((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT)		\
+		 | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT)	\
+		 | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT)	\
+		 | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT))
+
+#define	PCIE_CONFIG_BUS(a)	(((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK)
+#define	PCIE_CONFIG_SLOT(a)	(((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK)
+#define	PCIE_CONFIG_FUN(a)	(((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK)
+#define	PCIE_CONFIG_OFF(a)	(((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK)
+
+/* The actual config space */
+
+#define	PCI_BAR_MAX		6
+
+#define	PCI_ROM_BAR		8
+
+#define	PCR_RSVDA_MAX		2
+
+/* Bits in PCI bars' flags */
+
+#define	PCIBAR_FLAGS		0xf
+#define	PCIBAR_IO		0x1
+#define	PCIBAR_MEM1M		0x2
+#define	PCIBAR_MEM64		0x4
+#define	PCIBAR_PREFETCH		0x8
+#define	PCIBAR_MEM32_MASK	0xFFFFFF80
+
+/* pci config status reg has a bit to indicate that capability ptr is present */
+
+#define PCI_CAPPTR_PRESENT	0x0010
+
+typedef struct _pci_config_regs {
+	uint16	vendor;
+	uint16	device;
+	uint16	command;
+	uint16	status;
+	uint8	rev_id;
+	uint8	prog_if;
+	uint8	sub_class;
+	uint8	base_class;
+	uint8	cache_line_size;
+	uint8	latency_timer;
+	uint8	header_type;
+	uint8	bist;
+	uint32	base[PCI_BAR_MAX];
+	uint32	cardbus_cis;
+	uint16	subsys_vendor;
+	uint16	subsys_id;
+	uint32	baserom;
+	uint32	rsvd_a[PCR_RSVDA_MAX];
+	uint8	int_line;
+	uint8	int_pin;
+	uint8	min_gnt;
+	uint8	max_lat;
+	uint8	dev_dep[192];
+} pci_config_regs;
+
+#define	SZPCR		(sizeof (pci_config_regs))
+#define	MINSZPCR	64		/* offsetof (dev_dep[0] */
+
+#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_DID		2
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_STAT		6
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_PROGIF		9
+#define	PCI_CFG_SUBCL		0xa
+#define	PCI_CFG_BASECL		0xb
+#define	PCI_CFG_CLSZ		0xc
+#define	PCI_CFG_LATTIM		0xd
+#define	PCI_CFG_HDR		0xe
+#define	PCI_CFG_BIST		0xf
+#define	PCI_CFG_BAR0		0x10
+#define	PCI_CFG_BAR1		0x14
+#define	PCI_CFG_BAR2		0x18
+#define	PCI_CFG_BAR3		0x1c
+#define	PCI_CFG_BAR4		0x20
+#define	PCI_CFG_BAR5		0x24
+#define	PCI_CFG_CIS		0x28
+#define	PCI_CFG_SVID		0x2c
+#define	PCI_CFG_SSID		0x2e
+#define	PCI_CFG_ROMBAR		0x30
+#define PCI_CFG_CAPPTR		0x34
+#define	PCI_CFG_INT		0x3c
+#define	PCI_CFG_PIN		0x3d
+#define	PCI_CFG_MINGNT		0x3e
+#define	PCI_CFG_MAXLAT		0x3f
+#define	PCI_CFG_DEVCTRL		0xd8
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+
+
+/* Classes and subclasses */
+
+typedef enum {
+	PCI_CLASS_OLD = 0,
+	PCI_CLASS_DASDI,
+	PCI_CLASS_NET,
+	PCI_CLASS_DISPLAY,
+	PCI_CLASS_MMEDIA,
+	PCI_CLASS_MEMORY,
+	PCI_CLASS_BRIDGE,
+	PCI_CLASS_COMM,
+	PCI_CLASS_BASE,
+	PCI_CLASS_INPUT,
+	PCI_CLASS_DOCK,
+	PCI_CLASS_CPU,
+	PCI_CLASS_SERIAL,
+	PCI_CLASS_INTELLIGENT = 0xe,
+	PCI_CLASS_SATELLITE,
+	PCI_CLASS_CRYPT,
+	PCI_CLASS_DSP,
+	PCI_CLASS_XOR = 0xfe
+} pci_classes;
+
+typedef enum {
+	PCI_DASDI_SCSI,
+	PCI_DASDI_IDE,
+	PCI_DASDI_FLOPPY,
+	PCI_DASDI_IPI,
+	PCI_DASDI_RAID,
+	PCI_DASDI_OTHER = 0x80
+} pci_dasdi_subclasses;
+
+typedef enum {
+	PCI_NET_ETHER,
+	PCI_NET_TOKEN,
+	PCI_NET_FDDI,
+	PCI_NET_ATM,
+	PCI_NET_OTHER = 0x80
+} pci_net_subclasses;
+
+typedef enum {
+	PCI_DISPLAY_VGA,
+	PCI_DISPLAY_XGA,
+	PCI_DISPLAY_3D,
+	PCI_DISPLAY_OTHER = 0x80
+} pci_display_subclasses;
+
+typedef enum {
+	PCI_MMEDIA_VIDEO,
+	PCI_MMEDIA_AUDIO,
+	PCI_MMEDIA_PHONE,
+	PCI_MEDIA_OTHER = 0x80
+} pci_mmedia_subclasses;
+
+typedef enum {
+	PCI_MEMORY_RAM,
+	PCI_MEMORY_FLASH,
+	PCI_MEMORY_OTHER = 0x80
+} pci_memory_subclasses;
+
+typedef enum {
+	PCI_BRIDGE_HOST,
+	PCI_BRIDGE_ISA,
+	PCI_BRIDGE_EISA,
+	PCI_BRIDGE_MC,
+	PCI_BRIDGE_PCI,
+	PCI_BRIDGE_PCMCIA,
+	PCI_BRIDGE_NUBUS,
+	PCI_BRIDGE_CARDBUS,
+	PCI_BRIDGE_RACEWAY,
+	PCI_BRIDGE_OTHER = 0x80
+} pci_bridge_subclasses;
+
+typedef enum {
+	PCI_COMM_UART,
+	PCI_COMM_PARALLEL,
+	PCI_COMM_MULTIUART,
+	PCI_COMM_MODEM,
+	PCI_COMM_OTHER = 0x80
+} pci_comm_subclasses;
+
+typedef enum {
+	PCI_BASE_PIC,
+	PCI_BASE_DMA,
+	PCI_BASE_TIMER,
+	PCI_BASE_RTC,
+	PCI_BASE_PCI_HOTPLUG,
+	PCI_BASE_OTHER = 0x80
+} pci_base_subclasses;
+
+typedef enum {
+	PCI_INPUT_KBD,
+	PCI_INPUT_PEN,
+	PCI_INPUT_MOUSE,
+	PCI_INPUT_SCANNER,
+	PCI_INPUT_GAMEPORT,
+	PCI_INPUT_OTHER = 0x80
+} pci_input_subclasses;
+
+typedef enum {
+	PCI_DOCK_GENERIC,
+	PCI_DOCK_OTHER = 0x80
+} pci_dock_subclasses;
+
+typedef enum {
+	PCI_CPU_386,
+	PCI_CPU_486,
+	PCI_CPU_PENTIUM,
+	PCI_CPU_ALPHA = 0x10,
+	PCI_CPU_POWERPC = 0x20,
+	PCI_CPU_MIPS = 0x30,
+	PCI_CPU_COPROC = 0x40,
+	PCI_CPU_OTHER = 0x80
+} pci_cpu_subclasses;
+
+typedef enum {
+	PCI_SERIAL_IEEE1394,
+	PCI_SERIAL_ACCESS,
+	PCI_SERIAL_SSA,
+	PCI_SERIAL_USB,
+	PCI_SERIAL_FIBER,
+	PCI_SERIAL_SMBUS,
+	PCI_SERIAL_OTHER = 0x80
+} pci_serial_subclasses;
+
+typedef enum {
+	PCI_INTELLIGENT_I2O
+} pci_intelligent_subclasses;
+
+typedef enum {
+	PCI_SATELLITE_TV,
+	PCI_SATELLITE_AUDIO,
+	PCI_SATELLITE_VOICE,
+	PCI_SATELLITE_DATA,
+	PCI_SATELLITE_OTHER = 0x80
+} pci_satellite_subclasses;
+
+typedef enum {
+	PCI_CRYPT_NETWORK,
+	PCI_CRYPT_ENTERTAINMENT,
+	PCI_CRYPT_OTHER = 0x80
+} pci_crypt_subclasses;
+
+typedef enum {
+	PCI_DSP_DPIO,
+	PCI_DSP_OTHER = 0x80
+} pci_dsp_subclasses;
+
+typedef enum {
+	PCI_XOR_QDMA,
+	PCI_XOR_OTHER = 0x80
+} pci_xor_subclasses;
+
+/* Header types */
+#define	PCI_HEADER_MULTI	0x80
+#define	PCI_HEADER_MASK		0x7f
+typedef enum {
+	PCI_HEADER_NORMAL,
+	PCI_HEADER_BRIDGE,
+	PCI_HEADER_CARDBUS
+} pci_header_types;
+
+
+/* Overlay for a PCI-to-PCI bridge */
+
+#define	PPB_RSVDA_MAX		2
+#define	PPB_RSVDD_MAX		8
+
+typedef struct _ppb_config_regs {
+	uint16	vendor;
+	uint16	device;
+	uint16	command;
+	uint16	status;
+	uint8	rev_id;
+	uint8	prog_if;
+	uint8	sub_class;
+	uint8	base_class;
+	uint8	cache_line_size;
+	uint8	latency_timer;
+	uint8	header_type;
+	uint8	bist;
+	uint32	rsvd_a[PPB_RSVDA_MAX];
+	uint8	prim_bus;
+	uint8	sec_bus;
+	uint8	sub_bus;
+	uint8	sec_lat;
+	uint8	io_base;
+	uint8	io_lim;
+	uint16	sec_status;
+	uint16	mem_base;
+	uint16	mem_lim;
+	uint16	pf_mem_base;
+	uint16	pf_mem_lim;
+	uint32	pf_mem_base_hi;
+	uint32	pf_mem_lim_hi;
+	uint16	io_base_hi;
+	uint16	io_lim_hi;
+	uint16	subsys_vendor;
+	uint16	subsys_id;
+	uint32	rsvd_b;
+	uint8	rsvd_c;
+	uint8	int_pin;
+	uint16	bridge_ctrl;
+	uint8	chip_ctrl;
+	uint8	diag_ctrl;
+	uint16	arb_ctrl;
+	uint32	rsvd_d[PPB_RSVDD_MAX];
+	uint8	dev_dep[192];
+} ppb_config_regs;
+
+
+/* PCI CAPABILITY DEFINES */
+#define PCI_CAP_POWERMGMTCAP_ID		0x01
+#define PCI_CAP_MSICAP_ID		0x05
+#define PCI_CAP_VENDSPEC_ID		0x09
+#define PCI_CAP_PCIECAP_ID		0x10
+
+/* Data structure to define the Message Signalled Interrupt facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_msi {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	msgctrl;
+	uint32	msgaddr;
+} pciconfig_cap_msi;
+#define MSI_ENABLE	0x1		/* bit 0 of msgctrl */
+
+/* Data structure to define the Power managment facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_pwrmgmt {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	pme_cap;
+	uint16	pme_sts_ctrl;
+	uint8	pme_bridge_ext;
+	uint8	data;
+} pciconfig_cap_pwrmgmt;
+
+#define PME_CAP_PM_STATES (0x1f << 27)	/* Bits 31:27 states that can generate PME */
+#define PME_CSR_OFFSET	    0x4		/* 4-bytes offset */
+#define PME_CSR_PME_EN	  (1 << 8)	/* Bit 8 Enable generating of PME */
+#define PME_CSR_PME_STAT  (1 << 15)	/* Bit 15 PME got asserted */
+
+/* Data structure to define the PCIE capability */
+typedef struct _pciconfig_cap_pcie {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	pcie_cap;
+	uint32	dev_cap;
+	uint16	dev_ctrl;
+	uint16	dev_status;
+	uint32	link_cap;
+	uint16	link_ctrl;
+	uint16	link_status;
+	uint32	slot_cap;
+	uint16	slot_ctrl;
+	uint16	slot_status;
+	uint16	root_ctrl;
+	uint16	root_cap;
+	uint32	root_status;
+} pciconfig_cap_pcie;
+
+/* PCIE Enhanced CAPABILITY DEFINES */
+#define PCIE_EXTCFG_OFFSET	0x100
+#define PCIE_ADVERRREP_CAPID	0x0001
+#define PCIE_VC_CAPID		0x0002
+#define PCIE_DEVSNUM_CAPID	0x0003
+#define PCIE_PWRBUDGET_CAPID	0x0004
+
+/* PCIE Extended configuration */
+#define PCIE_ADV_CORR_ERR_MASK	0x114
+#define CORR_ERR_RE	(1 << 0) /* Receiver  */
+#define CORR_ERR_BT 	(1 << 6) /* Bad TLP  */
+#define CORR_ERR_BD	(1 << 7) /* Bad DLLP */
+#define CORR_ERR_RR	(1 << 8) /* REPLAY_NUM rollover */
+#define CORR_ERR_RT	(1 << 12) /* Reply timer timeout */
+#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
+			 CORR_ERR_RR | CORR_ERR_RT)
+
+/* PCIE Root Control Register bits (Host mode only) */
+#define	PCIE_RC_CORR_SERR_EN		0x0001
+#define	PCIE_RC_NONFATAL_SERR_EN	0x0002
+#define	PCIE_RC_FATAL_SERR_EN		0x0004
+#define	PCIE_RC_PME_INT_EN		0x0008
+#define	PCIE_RC_CRS_EN			0x0010
+
+/* PCIE Root Capability Register bits (Host mode only) */
+#define	PCIE_RC_CRS_VISIBILITY		0x0001
+
+/* Header to define the PCIE specific capabilities in the extended config space */
+typedef struct _pcie_enhanced_caphdr {
+	uint16	capID;
+	uint16	cap_ver : 4;
+	uint16	next_ptr : 12;
+} pcie_enhanced_caphdr;
+
+
+/* Everything below is BRCM HND proprietary */
+
+
+/* Brcm PCI configuration registers */
+#define cap_list	rsvd_a[0]
+#define bar0_window	dev_dep[0x80 - 0x40]
+#define bar1_window	dev_dep[0x84 - 0x40]
+#define sprom_control	dev_dep[0x88 - 0x40]
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define	PCI_BAR0_WIN		0x80	/* backplane addres space accessed by BAR0 */
+#define	PCI_BAR1_WIN		0x84	/* backplane addres space accessed by BAR1 */
+#define	PCI_SPROM_CONTROL	0x88	/* sprom property control */
+#define	PCI_BAR1_CONTROL	0x8c	/* BAR1 region burst control */
+#define	PCI_INT_STATUS		0x90	/* PCI and other cores interrupts */
+#define	PCI_INT_MASK		0x94	/* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB		0x98	/* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR	0xa0	/* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA	0xa4	/* data at the location specified by above address */
+#define	PCI_CLK_CTL_ST		0xa8	/* pci config space clock control/status (>=rev14) */
+#define	PCI_BAR0_WIN2		0xac	/* backplane addres space accessed by second 4KB of BAR0 */
+#define	PCI_GPIO_IN		0xb0	/* pci config space gpio input (>=rev3) */
+#define	PCI_GPIO_OUT		0xb4	/* pci config space gpio output (>=rev3) */
+#define	PCI_GPIO_OUTEN		0xb8	/* pci config space gpio output enable (>=rev3) */
+#define	PCI_L1SS_CTRL2		0x24c	/* The L1 PM Substates Control register */
+
+/* Private Registers */
+#define	PCI_STAT_CTRL		0xa80
+#define	PCI_L0_EVENTCNT		0xa84
+#define	PCI_L0_STATETMR		0xa88
+#define	PCI_L1_EVENTCNT		0xa8c
+#define	PCI_L1_STATETMR		0xa90
+#define	PCI_L1_1_EVENTCNT	0xa94
+#define	PCI_L1_1_STATETMR	0xa98
+#define	PCI_L1_2_EVENTCNT	0xa9c
+#define	PCI_L1_2_STATETMR	0xaa0
+#define	PCI_L2_EVENTCNT		0xaa4
+#define	PCI_L2_STATETMR		0xaa8
+
+#define	PCI_PMCR_REFUP		0x1814	/* Trefup time */
+#define	PCI_PMCR_REFUP_EXT	0x1818	/* Trefup extend Max */
+#define PCI_TPOWER_SCALE_MASK 0x3
+#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
+
+#define	PCI_BAR0_SHADOW_OFFSET	(2 * 1024)	/* bar0 + 2K accesses sprom shadow (in pci core) */
+#define	PCI_BAR0_SPROM_OFFSET	(4 * 1024)	/* bar0 + 4K accesses external sprom */
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	/* bar0 + 6K accesses pci core registers */
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	/* pci core SB registers are at the end of the
+						 * 8KB window, so their address is the "regular"
+						 * address plus 4K
+						 */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2		0x70 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN	0x74 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2	0x78 /* backplane addres space accessed by second 4KB of BAR0 */
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	/* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	/* bar0 + 8K accesses pci/pcie core registers */
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ	(16 * 1024)	/* bar0 window size */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* On AI chips we have a second window to map DMP regs are mapped: */
+#define	PCI_16KB0_WIN2_OFFSET	(4 * 1024)	/* bar0 + 4K is "Window 2" */
+
+/* PCI_INT_STATUS */
+#define	PCI_SBIM_STATUS_SERR	0x4	/* backplane SBErr interrupt status */
+
+/* PCI_INT_MASK */
+#define	PCI_SBIM_SHIFT		8	/* backplane core interrupt mask bits offset */
+#define	PCI_SBIM_MASK		0xff00	/* backplane core interrupt mask */
+#define	PCI_SBIM_MASK_SERR	0x4	/* backplane SBErr interrupt mask */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* PCI_SPROM_CONTROL */
+#define SPROM_SZ_MSK		0x02	/* SPROM Size Mask */
+#define SPROM_LOCKED		0x08	/* SPROM Locked */
+#define	SPROM_BLANK		0x04	/* indicating a blank SPROM */
+#define SPROM_WRITEEN		0x10	/* SPROM write enable */
+#define SPROM_BOOTROM_WE	0x20	/* external bootrom write enable */
+#define SPROM_BACKPLANE_EN	0x40	/* Enable indirect backplane access */
+#define SPROM_OTPIN_USE		0x80	/* device OTP In use */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Bits in PCI command and status regs */
+#define PCI_CMD_IO		0x00000001	/* I/O enable */
+#define PCI_CMD_MEMORY		0x00000002	/* Memory enable */
+#define PCI_CMD_MASTER		0x00000004	/* Master enable */
+#define PCI_CMD_SPECIAL		0x00000008	/* Special cycles enable */
+#define PCI_CMD_INVALIDATE	0x00000010	/* Invalidate? */
+#define PCI_CMD_VGA_PAL		0x00000040	/* VGA Palate */
+#define PCI_STAT_TA		0x08000000	/* target abort status */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define PCI_CONFIG_SPACE_SIZE	256
+#endif	/* _h_pcicfg_ */
diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/net/wireless/bcmdhd/include/pcie_core.h
new file mode 100644
index 0000000000000000000000000000000000000000..73028da88e6e4a763905586cfa43587df6788b43
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcie_core.h
@@ -0,0 +1,624 @@
+/*
+ * BCM43XX PCIE core hardware definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: pcie_core.h 483003 2014-06-05 19:57:46Z $
+ */
+#ifndef	_PCIE_CORE_H
+#define	_PCIE_CORE_H
+
+#include <sbhnddma.h>
+#include <siutils.h>
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* PCIE Enumeration space offsets */
+#define  PCIE_CORE_CONFIG_OFFSET	0x0
+#define  PCIE_FUNC0_CONFIG_OFFSET	0x400
+#define  PCIE_FUNC1_CONFIG_OFFSET	0x500
+#define  PCIE_FUNC2_CONFIG_OFFSET	0x600
+#define  PCIE_FUNC3_CONFIG_OFFSET	0x700
+#define  PCIE_SPROM_SHADOW_OFFSET	0x800
+#define  PCIE_SBCONFIG_OFFSET		0xE00
+
+
+#define PCIEDEV_MAX_DMAS			4
+
+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
+#define PCIE_DEV_BAR0_SIZE		0x4000
+#define PCIE_BAR0_WINMAPCORE_OFFSET	0x0
+#define PCIE_BAR0_EXTSPROM_OFFSET	0x1000
+#define PCIE_BAR0_PCIECORE_OFFSET	0x2000
+#define PCIE_BAR0_CCCOREREG_OFFSET	0x3000
+
+/* different register spaces to access thr'u pcie indirect access */
+#define PCIE_CONFIGREGS 	1		/* Access to config space */
+#define PCIE_PCIEREGS 		2		/* Access to pcie registers */
+
+/* dma regs to control the flow between host2dev and dev2host  */
+typedef struct pcie_devdmaregs {
+	dma64regs_t	tx;
+	uint32		PAD[2];
+	dma64regs_t	rx;
+	uint32		PAD[2];
+} pcie_devdmaregs_t;
+
+#define PCIE_DB_HOST2DEV_0		0x1
+#define PCIE_DB_HOST2DEV_1		0x2
+#define PCIE_DB_DEV2HOST_0		0x3
+#define PCIE_DB_DEV2HOST_1		0x4
+
+/* door bell register sets */
+typedef struct pcie_doorbell {
+	uint32		host2dev_0;
+	uint32		host2dev_1;
+	uint32		dev2host_0;
+	uint32		dev2host_1;
+} pcie_doorbell_t;
+
+/* SB side: PCIE core and host control registers */
+typedef struct sbpcieregs {
+	uint32 control;		/* host mode only */
+	uint32 iocstatus;	/* PCIE2: iostatus */
+	uint32 PAD[1];
+	uint32 biststatus;	/* bist Status: 0x00C */
+	uint32 gpiosel;		/* PCIE gpio sel: 0x010 */
+	uint32 gpioouten;	/* PCIE gpio outen: 0x14 */
+	uint32 PAD[2];
+	uint32 intstatus;	/* Interrupt status: 0x20 */
+	uint32 intmask;		/* Interrupt mask: 0x24 */
+	uint32 sbtopcimailbox;	/* sb to pcie mailbox: 0x028 */
+	uint32 obffcontrol;	/* PCIE2: 0x2C */
+	uint32 obffintstatus;	/* PCIE2: 0x30 */
+	uint32 obffdatastatus;	/* PCIE2: 0x34 */
+	uint32 PAD[2];
+	uint32 errlog;		/* PCIE2: 0x40 */
+	uint32 errlogaddr;	/* PCIE2: 0x44 */
+	uint32 mailboxint;	/* PCIE2: 0x48 */
+	uint32 mailboxintmsk; /* PCIE2: 0x4c */
+	uint32 ltrspacing;	/* PCIE2: 0x50 */
+	uint32 ltrhysteresiscnt;	/* PCIE2: 0x54 */
+	uint32 PAD[42];
+
+	uint32 sbtopcie0;	/* sb to pcie translation 0: 0x100 */
+	uint32 sbtopcie1;	/* sb to pcie translation 1: 0x104 */
+	uint32 sbtopcie2;	/* sb to pcie translation 2: 0x108 */
+	uint32 PAD[5];
+
+	/* pcie core supports in direct access to config space */
+	uint32 configaddr;	/* pcie config space access: Address field: 0x120 */
+	uint32 configdata;	/* pcie config space access: Data field: 0x124 */
+	union {
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiodata;	/* Data to the mdio access: 0x12c */
+			/* pcie protocol phy/dllp/tlp register indirect access mechanism */
+			uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */
+			uint32 pcieinddata;	/* Data to/from the internal regsiter: 0x134 */
+			uint32 clkreqenctrl;	/* >= rev 6, Clkreq rdma control : 0x138 */
+			uint32 PAD[177];
+		} pcie1;
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiowrdata;	/* write data to mdio 0x12C */
+			uint32 mdiorddata;	/* read data to mdio 0x130 */
+			uint32	PAD[3]; 	/* 0x134-0x138-0x13c */
+			/* door bell registers available from gen2 rev5 onwards */
+			pcie_doorbell_t	   dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */
+			uint32	dataintf;	/* 0x180 */
+			uint32  PAD[1];		/* 0x184 */
+			uint32	d2h_intrlazy_0; /* 0x188 */
+			uint32	h2d_intrlazy_0; /* 0x18c */
+			uint32  h2d_intstat_0;  /* 0x190 */
+			uint32  h2d_intmask_0;	/* 0x194 */
+			uint32  d2h_intstat_0;  /* 0x198 */
+			uint32  d2h_intmask_0;  /* 0x19c */
+			uint32	ltr_state;	/* 0x1A0 */
+			uint32	pwr_int_status;	/* 0x1A4 */
+			uint32	pwr_int_mask;	/* 0x1A8 */
+			uint32  PAD[21]; 	/* 0x1AC - 0x200 */
+			pcie_devdmaregs_t  h2d0_dmaregs; /* 0x200 - 0x23c */
+			pcie_devdmaregs_t  d2h0_dmaregs; /* 0x240 - 0x27c */
+			pcie_devdmaregs_t  h2d1_dmaregs; /* 0x280 - 0x2bc */
+			pcie_devdmaregs_t  d2h1_dmaregs; /* 0x2c0 - 0x2fc */
+			pcie_devdmaregs_t  h2d2_dmaregs; /* 0x300 - 0x33c */
+			pcie_devdmaregs_t  d2h2_dmaregs; /* 0x340 - 0x37c */
+			pcie_devdmaregs_t  h2d3_dmaregs; /* 0x380 - 0x3bc */
+			pcie_devdmaregs_t  d2h3_dmaregs; /* 0x3c0 - 0x3fc */
+		} pcie2;
+	} u;
+	uint32 pciecfg[4][64];	/* 0x400 - 0x7FF, PCIE Cfg Space */
+	uint16 sprom[64];	/* SPROM shadow Area */
+} sbpcieregs_t;
+
+/* PCI control */
+#define PCIE_RST_OE	0x01	/* When set, drives PCI_RESET out to pin */
+#define PCIE_RST	0x02	/* Value driven out to pin */
+#define PCIE_SPERST	0x04	/* SurvivePeRst */
+#define PCIE_DISABLE_L1CLK_GATING	0x10
+#define PCIE_DLYPERST	0x100	/* Delay PeRst to CoE Core */
+#define PCIE_DISSPROMLD	0x200	/* DisableSpromLoadOnPerst */
+#define PCIE_WakeModeL2	0x1000	/* Wake on L2 */
+
+#define	PCIE_CFGADDR	0x120	/* offsetof(configaddr) */
+#define	PCIE_CFGDATA	0x124	/* offsetof(configdata) */
+
+/* Interrupt status/mask */
+#define PCIE_INTA	0x01	/* PCIE INTA message is received */
+#define PCIE_INTB	0x02	/* PCIE INTB message is received */
+#define PCIE_INTFATAL	0x04	/* PCIE INTFATAL message is received */
+#define PCIE_INTNFATAL	0x08	/* PCIE INTNONFATAL message is received */
+#define PCIE_INTCORR	0x10	/* PCIE INTCORR message is received */
+#define PCIE_INTPME	0x20	/* PCIE INTPME message is received */
+#define PCIE_PERST	0x40	/* PCIE Reset Interrupt */
+
+#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */
+#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */
+#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */
+#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */
+#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */
+#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */
+#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */
+#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */
+
+/* PCIE MailboxInt/MailboxIntMask register */
+#define PCIE_MB_TOSB_FN0_0   	0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1   	0x0002
+#define PCIE_MB_TOSB_FN1_0   	0x0004
+#define PCIE_MB_TOSB_FN1_1   	0x0008
+#define PCIE_MB_TOSB_FN2_0   	0x0010
+#define PCIE_MB_TOSB_FN2_1   	0x0020
+#define PCIE_MB_TOSB_FN3_0   	0x0040
+#define PCIE_MB_TOSB_FN3_1   	0x0080
+#define PCIE_MB_TOPCIE_FN0_0 	0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 	0x0200
+#define PCIE_MB_TOPCIE_FN1_0 	0x0400
+#define PCIE_MB_TOPCIE_FN1_1 	0x0800
+#define PCIE_MB_TOPCIE_FN2_0 	0x1000
+#define PCIE_MB_TOPCIE_FN2_1 	0x2000
+#define PCIE_MB_TOPCIE_FN3_0 	0x4000
+#define PCIE_MB_TOPCIE_FN3_1 	0x8000
+#define	PCIE_MB_TOPCIE_D2H0_DB0	0x10000
+#define	PCIE_MB_TOPCIE_D2H0_DB1	0x20000
+#define	PCIE_MB_TOPCIE_D2H1_DB0	0x40000
+#define	PCIE_MB_TOPCIE_D2H1_DB1	0x80000
+#define	PCIE_MB_TOPCIE_D2H2_DB0	0x100000
+#define	PCIE_MB_TOPCIE_D2H2_DB1	0x200000
+#define	PCIE_MB_TOPCIE_D2H3_DB0	0x400000
+#define	PCIE_MB_TOPCIE_D2H3_DB1	0x800000
+
+#define PCIE_MB_D2H_MB_MASK		\
+	(PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 |	\
+	PCIE_MB_TOPCIE_D2H1_DB1  | PCIE_MB_TOPCIE_D2H1_DB1 |	\
+	PCIE_MB_TOPCIE_D2H2_DB1  | PCIE_MB_TOPCIE_D2H2_DB1 |	\
+	PCIE_MB_TOPCIE_D2H3_DB1  | PCIE_MB_TOPCIE_D2H3_DB1)
+
+/* SB to PCIE translation masks */
+#define SBTOPCIE0_MASK	0xfc000000
+#define SBTOPCIE1_MASK	0xfc000000
+#define SBTOPCIE2_MASK	0xc0000000
+
+/* Access type bits (0:1) */
+#define SBTOPCIE_MEM	0
+#define SBTOPCIE_IO	1
+#define SBTOPCIE_CFG0	2
+#define SBTOPCIE_CFG1	3
+
+/* Prefetch enable bit 2 */
+#define SBTOPCIE_PF		4
+
+/* Write Burst enable for memory write bit 3 */
+#define SBTOPCIE_WR_BURST	8
+
+/* config access */
+#define CONFIGADDR_FUNC_MASK	0x7000
+#define CONFIGADDR_FUNC_SHF	12
+#define CONFIGADDR_REG_MASK	0x0FFF
+#define CONFIGADDR_REG_SHF	0
+
+#define PCIE_CONFIG_INDADDR(f, r)	((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
+			                 (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
+
+/* PCIE protocol regs Indirect Address */
+#define PCIEADDR_PROT_MASK	0x300
+#define PCIEADDR_PROT_SHF	8
+#define PCIEADDR_PL_TLP		0
+#define PCIEADDR_PL_DLLP	1
+#define PCIEADDR_PL_PLP		2
+
+/* PCIE protocol PHY diagnostic registers */
+#define	PCIE_PLP_MODEREG		0x200 /* Mode */
+#define	PCIE_PLP_STATUSREG		0x204 /* Status */
+#define PCIE_PLP_LTSSMCTRLREG		0x208 /* LTSSM control */
+#define PCIE_PLP_LTLINKNUMREG		0x20c /* Link Training Link number */
+#define PCIE_PLP_LTLANENUMREG		0x210 /* Link Training Lane number */
+#define PCIE_PLP_LTNFTSREG		0x214 /* Link Training N_FTS */
+#define PCIE_PLP_ATTNREG		0x218 /* Attention */
+#define PCIE_PLP_ATTNMASKREG		0x21C /* Attention Mask */
+#define PCIE_PLP_RXERRCTR		0x220 /* Rx Error */
+#define PCIE_PLP_RXFRMERRCTR		0x224 /* Rx Framing Error */
+#define PCIE_PLP_RXERRTHRESHREG		0x228 /* Rx Error threshold */
+#define PCIE_PLP_TESTCTRLREG		0x22C /* Test Control reg */
+#define PCIE_PLP_SERDESCTRLOVRDREG	0x230 /* SERDES Control Override */
+#define PCIE_PLP_TIMINGOVRDREG		0x234 /* Timing param override */
+#define PCIE_PLP_RXTXSMDIAGREG		0x238 /* RXTX State Machine Diag */
+#define PCIE_PLP_LTSSMDIAGREG		0x23C /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG			0x100 /* Link Control */
+#define PCIE_DLLP_LSREG			0x104 /* Link Status */
+#define PCIE_DLLP_LAREG			0x108 /* Link Attention */
+#define PCIE_DLLP_LAMASKREG		0x10C /* Link Attention Mask */
+#define PCIE_DLLP_NEXTTXSEQNUMREG	0x110 /* Next Tx Seq Num */
+#define PCIE_DLLP_ACKEDTXSEQNUMREG	0x114 /* Acked Tx Seq Num */
+#define PCIE_DLLP_PURGEDTXSEQNUMREG	0x118 /* Purged Tx Seq Num */
+#define PCIE_DLLP_RXSEQNUMREG		0x11C /* Rx Sequence Number */
+#define PCIE_DLLP_LRREG			0x120 /* Link Replay */
+#define PCIE_DLLP_LACKTOREG		0x124 /* Link Ack Timeout */
+#define PCIE_DLLP_PMTHRESHREG		0x128 /* Power Management Threshold */
+#define PCIE_DLLP_RTRYWPREG		0x12C /* Retry buffer write ptr */
+#define PCIE_DLLP_RTRYRPREG		0x130 /* Retry buffer Read ptr */
+#define PCIE_DLLP_RTRYPPREG		0x134 /* Retry buffer Purged ptr */
+#define PCIE_DLLP_RTRRWREG		0x138 /* Retry buffer Read/Write */
+#define PCIE_DLLP_ECTHRESHREG		0x13C /* Error Count Threshold */
+#define PCIE_DLLP_TLPERRCTRREG		0x140 /* TLP Error Counter */
+#define PCIE_DLLP_ERRCTRREG		0x144 /* Error Counter */
+#define PCIE_DLLP_NAKRXCTRREG		0x148 /* NAK Received Counter */
+#define PCIE_DLLP_TESTREG		0x14C /* Test */
+#define PCIE_DLLP_PKTBIST		0x150 /* Packet BIST */
+#define PCIE_DLLP_PCIE11		0x154 /* DLLP PCIE 1.1 reg */
+
+#define PCIE_DLLP_LSREG_LINKUP		(1 << 16)
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_CONFIGREG		0x000 /* Configuration */
+#define PCIE_TLP_WORKAROUNDSREG		0x004 /* TLP Workarounds */
+#define PCIE_TLP_WRDMAUPPER		0x010 /* Write DMA Upper Address */
+#define PCIE_TLP_WRDMALOWER		0x014 /* Write DMA Lower Address */
+#define PCIE_TLP_WRDMAREQ_LBEREG	0x018 /* Write DMA Len/ByteEn Req */
+#define PCIE_TLP_RDDMAUPPER		0x01C /* Read DMA Upper Address */
+#define PCIE_TLP_RDDMALOWER		0x020 /* Read DMA Lower Address */
+#define PCIE_TLP_RDDMALENREG		0x024 /* Read DMA Len Req */
+#define PCIE_TLP_MSIDMAUPPER		0x028 /* MSI DMA Upper Address */
+#define PCIE_TLP_MSIDMALOWER		0x02C /* MSI DMA Lower Address */
+#define PCIE_TLP_MSIDMALENREG		0x030 /* MSI DMA Len Req */
+#define PCIE_TLP_SLVREQLENREG		0x034 /* Slave Request Len */
+#define PCIE_TLP_FCINPUTSREQ		0x038 /* Flow Control Inputs */
+#define PCIE_TLP_TXSMGRSREQ		0x03C /* Tx StateMachine and Gated Req */
+#define PCIE_TLP_ADRACKCNTARBLEN	0x040 /* Address Ack XferCnt and ARB Len */
+#define PCIE_TLP_DMACPLHDR0		0x044 /* DMA Completion Hdr 0 */
+#define PCIE_TLP_DMACPLHDR1		0x048 /* DMA Completion Hdr 1 */
+#define PCIE_TLP_DMACPLHDR2		0x04C /* DMA Completion Hdr 2 */
+#define PCIE_TLP_DMACPLMISC0		0x050 /* DMA Completion Misc0 */
+#define PCIE_TLP_DMACPLMISC1		0x054 /* DMA Completion Misc1 */
+#define PCIE_TLP_DMACPLMISC2		0x058 /* DMA Completion Misc2 */
+#define PCIE_TLP_SPTCTRLLEN		0x05C /* Split Controller Req len */
+#define PCIE_TLP_SPTCTRLMSIC0		0x060 /* Split Controller Misc 0 */
+#define PCIE_TLP_SPTCTRLMSIC1		0x064 /* Split Controller Misc 1 */
+#define PCIE_TLP_BUSDEVFUNC		0x068 /* Bus/Device/Func */
+#define PCIE_TLP_RESETCTR		0x06C /* Reset Counter */
+#define PCIE_TLP_RTRYBUF		0x070 /* Retry Buffer value */
+#define PCIE_TLP_TGTDEBUG1		0x074 /* Target Debug Reg1 */
+#define PCIE_TLP_TGTDEBUG2		0x078 /* Target Debug Reg2 */
+#define PCIE_TLP_TGTDEBUG3		0x07C /* Target Debug Reg3 */
+#define PCIE_TLP_TGTDEBUG4		0x080 /* Target Debug Reg4 */
+
+/* PCIE2 MDIO register offsets */
+#define PCIE2_MDIO_CONTROL    0x128
+#define PCIE2_MDIO_WR_DATA    0x12C
+#define PCIE2_MDIO_RD_DATA    0x130
+
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK		0x7f	/* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL		0x2
+#define MDIOCTL_PREAM_EN		0x80	/* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE		0x100   /* Tranaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK			0x0000ffff	/* data 2 bytes */
+#define MDIODATA_TA			0x00020000	/* Turnaround */
+#define MDIODATA_REGADDR_SHF_OLD	18		/* Regaddr shift (rev < 10) */
+#define MDIODATA_REGADDR_MASK_OLD	0x003c0000	/* Regaddr Mask (rev < 10) */
+#define MDIODATA_DEVADDR_SHF_OLD	22		/* Physmedia devaddr shift (rev < 10) */
+#define MDIODATA_DEVADDR_MASK_OLD	0x0fc00000	/* Physmedia devaddr Mask (rev < 10) */
+#define MDIODATA_REGADDR_SHF		18		/* Regaddr shift */
+#define MDIODATA_REGADDR_MASK		0x007c0000	/* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF		23		/* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK		0x0f800000	/* Physmedia devaddr Mask */
+#define MDIODATA_WRITE			0x10000000	/* write Transaction */
+#define MDIODATA_READ			0x20000000	/* Read Transaction */
+#define MDIODATA_START			0x40000000	/* start of Transaction */
+
+#define MDIODATA_DEV_ADDR		0x0		/* dev address for serdes */
+#define	MDIODATA_BLK_ADDR		0x1F		/* blk address for serdes */
+
+/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */
+#define MDIOCTL2_DIVISOR_MASK		0x7f	/* clock to be used on MDIO */
+#define MDIOCTL2_DIVISOR_VAL		0x2
+#define MDIOCTL2_REGADDR_SHF		8		/* Regaddr shift */
+#define MDIOCTL2_REGADDR_MASK		0x00FFFF00	/* Regaddr Mask */
+#define MDIOCTL2_DEVADDR_SHF		24		/* Physmedia devaddr shift */
+#define MDIOCTL2_DEVADDR_MASK		0x0f000000	/* Physmedia devaddr Mask */
+#define MDIOCTL2_SLAVE_BYPASS		0x10000000	/* IP slave bypass */
+#define MDIOCTL2_READ			0x20000000	/* IP slave bypass */
+
+#define MDIODATA2_DONE			0x80000000	/* rd/wr transaction done */
+#define MDIODATA2_MASK			0x7FFFFFFF	/* rd/wr transaction data */
+#define MDIODATA2_DEVADDR_SHF		4		/* Physmedia devaddr shift */
+
+
+/* MDIO devices (SERDES modules)
+ *  unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
+ *  two layers mapping (blockidx, register offset) is required
+ */
+#define MDIO_DEV_IEEE0		0x000
+#define MDIO_DEV_IEEE1		0x001
+#define MDIO_DEV_BLK0		0x800
+#define MDIO_DEV_BLK1		0x801
+#define MDIO_DEV_BLK2		0x802
+#define MDIO_DEV_BLK3		0x803
+#define MDIO_DEV_BLK4		0x804
+#define MDIO_DEV_TXPLL		0x808	/* TXPLL register block idx */
+#define MDIO_DEV_TXCTRL0	0x820
+#define MDIO_DEV_SERDESID	0x831
+#define MDIO_DEV_RXCTRL0	0x840
+
+
+/* XgxsBlk1_A Register Offsets */
+#define BLK1_PWR_MGMT0		0x16
+#define BLK1_PWR_MGMT1		0x17
+#define BLK1_PWR_MGMT2		0x18
+#define BLK1_PWR_MGMT3		0x19
+#define BLK1_PWR_MGMT4		0x1A
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL       		0x1d	/* SERDES PLL Dev */
+#define MDIODATA_DEV_TX        		0x1e	/* SERDES TX Dev */
+#define MDIODATA_DEV_RX        		0x1f	/* SERDES RX Dev */
+	/* SERDES RX registers */
+#define SERDES_RX_CTRL			1	/* Rx cntrl */
+#define SERDES_RX_TIMER1		2	/* Rx Timer1 */
+#define SERDES_RX_CDR			6	/* CDR */
+#define SERDES_RX_CDRBW			7	/* CDR BW */
+
+	/* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE		0x80	/* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY		0x40	/* rxpolarity_value */
+
+	/* SERDES PLL registers */
+#define SERDES_PLL_CTRL                 1       /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN             0x4000  /* bit 14 is FREQDET on */
+
+/* Power management threshold */
+#define PCIE_L0THRESHOLDTIME_MASK       0xFF00	/* bits 0 - 7 */
+#define PCIE_L1THRESHOLDTIME_MASK       0xFF00	/* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT      8	/* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL         0x72	/* WAR value */
+#define PCIE_ASPMTIMER_EXTEND		0x01000000	/* > rev7: enable extend ASPM timer */
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET		4	/* word 4 */
+#define SRSH_ASPM_ENB			0x18	/* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB		0x10	/* bit 4 */
+#define SRSH_ASPM_L0s_ENB		0x8	/* bit 3 */
+#define SRSH_PCIE_MISC_CONFIG		5	/* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST	0x8000	/* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5		20	/* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_OFFSET_REV8		52	/* word 52 for srom rev 8 */
+#define SRSH_CLKREQ_ENB			0x0800	/* bit 11 */
+#define SRSH_BD_OFFSET                  6       /* word 6 */
+#define SRSH_AUTOINIT_OFFSET            18      /* auto initialization enable */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET	16	/* linkctrl offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s		0x01	/* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1		0x02	/* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB		0x100	/* CLKREQ Enab in linkctrl */
+#define PCIE_LINKSPEED_MASK       	0xF0000	/* bits 0 - 3 of high word */
+#define PCIE_LINKSPEED_SHIFT      	16	/* PCIE_LINKSPEED_SHIFT */
+
+/* Devcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL_OFFSET		8	/* devctrl offset in pcie cap */
+#define PCIE_CAP_DEVCTRL_MRRS_MASK	0x7000	/* Max read request size mask */
+#define PCIE_CAP_DEVCTRL_MRRS_SHIFT	12	/* Max read request size shift */
+#define PCIE_CAP_DEVCTRL_MRRS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_1024B	3	/* 1024 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_MASK	0x00e0	/* Max payload size mask */
+#define PCIE_CAP_DEVCTRL_MPS_SHIFT	5	/* Max payload size shift */
+#define PCIE_CAP_DEVCTRL_MPS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_1024B	3	/* 1024 Byte */
+
+#define PCIE_ASPM_ENAB			3	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB		2	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB		1	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB			0	/* ASPM L0s & L1 in linkctrl */
+
+#define PCIE_ASPM_L11_ENAB		8	/* ASPM L1.1 in PML1_sub_control2 */
+#define PCIE_ASPM_L12_ENAB		4	/* ASPM L1.2 in PML1_sub_control2 */
+
+/* Devcontrol2 reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL2_OFFSET	0x28	/* devctrl2 offset in pcie cap */
+#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK	0x400	/* Latency Tolerance Reporting Enable */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13	/* Enable OBFF mechanism, select signaling method */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000	/* Enable OBFF mechanism, select signaling method */
+
+/* LTR registers in PCIE Cap */
+#define PCIE_LTR0_REG_OFFSET	0x844	/* ltr0_reg offset in pcie cap */
+#define PCIE_LTR1_REG_OFFSET	0x848	/* ltr1_reg offset in pcie cap */
+#define PCIE_LTR2_REG_OFFSET	0x84c	/* ltr2_reg offset in pcie cap */
+#define PCIE_LTR0_REG_DEFAULT_60	0x883c883c	/* active latency default to 60usec */
+#define PCIE_LTR0_REG_DEFAULT_150	0x88968896	/* active latency default to 150usec */
+#define PCIE_LTR1_REG_DEFAULT		0x88648864	/* idle latency default to 100usec */
+#define PCIE_LTR2_REG_DEFAULT		0x90039003	/* sleep latency default to 3msec */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT	0x10
+
+
+/* PCIE BRCM Vendor CAP REVID reg  bits */
+#define BRCMCAP_PCIEREV_CT_MASK			0xF00
+#define BRCMCAP_PCIEREV_CT_SHIFT		8
+#define BRCMCAP_PCIEREV_REVID_MASK		0xFF
+#define BRCMCAP_PCIEREV_REVID_SHIFT		0
+
+#define PCIE_REVREG_CT_PCIE1		0
+#define PCIE_REVREG_CT_PCIE2		1
+
+/* PCIE GEN2 specific defines */
+/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */
+#define PCIE2R0_BRCMCAP_REVID_OFFSET		4
+#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET	8
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET	12
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET	16
+#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET		20
+#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET		24
+#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET	28
+#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET		32
+#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET	36
+#define PCIE2R0_BRCMCAP_INTMASK_OFFSET		40
+#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET	44
+#define PCIE2R0_BRCMCAP_BPADDR_OFFSET		48
+#define PCIE2R0_BRCMCAP_BPDATA_OFFSET		52
+#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET	56
+
+/* definition of configuration space registers of PCIe gen2
+ * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+#define PCIECFGREG_STATUS_CMD		0x4
+#define PCIECFGREG_PM_CSR		0x4C
+#define PCIECFGREG_MSI_CAP		0x58
+#define PCIECFGREG_MSI_ADDR_L		0x5C
+#define PCIECFGREG_MSI_ADDR_H		0x60
+#define PCIECFGREG_MSI_DATA		0x64
+#define PCIECFGREG_LINK_STATUS_CTRL	0xBC
+#define PCIECFGREG_LINK_STATUS_CTRL2	0xDC
+#define PCIECFGREG_RBAR_CTRL		0x228
+#define PCIECFGREG_PML1_SUB_CTRL1	0x248
+#define PCIECFGREG_REG_BAR2_CONFIG	0x4E0
+#define PCIECFGREG_REG_BAR3_CONFIG	0x4F4
+#define PCIECFGREG_PDL_CTRL1		0x1004
+#define PCIECFGREG_PDL_IDDQ		0x1814
+#define PCIECFGREG_REG_PHY_CTL7		0x181c
+
+/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
+#define PCI_PM_L1_2_ENA_MASK		0x00000001	/* PCI-PM L1.2 Enabled */
+#define PCI_PM_L1_1_ENA_MASK		0x00000002	/* PCI-PM L1.1 Enabled */
+#define ASPM_L1_2_ENA_MASK		0x00000004	/* ASPM L1.2 Enabled */
+#define ASPM_L1_1_ENA_MASK		0x00000008	/* ASPM L1.1 Enabled */
+
+/* PCIe gen2 mailbox interrupt masks */
+#define I_MB    0x3
+#define I_BIT0  0x1
+#define I_BIT1  0x2
+
+/* PCIE gen2 config regs */
+#define PCIIntstatus	0x090
+#define PCIIntmask	0x094
+#define PCISBMbx	0x98
+
+/* enumeration Core regs */
+#define PCIH2D_MailBox  0x140
+#define PCIH2D_DB1 0x144
+#define PCID2H_MailBox  0x148
+#define PCIMailBoxInt	0x48
+#define PCIMailBoxMask	0x4C
+
+#define I_F0_B0         (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
+#define I_F0_B1         (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
+
+#define PCIECFGREG_DEVCONTROL	0xB4
+
+/* SROM hardware region */
+#define SROM_OFFSET_BAR1_CTRL  52
+
+#define BAR1_ENC_SIZE_MASK	0x000e
+#define BAR1_ENC_SIZE_SHIFT	1
+
+#define BAR1_ENC_SIZE_1M	0
+#define BAR1_ENC_SIZE_2M	1
+#define BAR1_ENC_SIZE_4M	2
+
+#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET		0xD4
+#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB	0x400
+
+/*
+ * Latency Tolerance Reporting (LTR) states
+ * Active has the least tolerant latency requirement
+ * Sleep is most tolerant
+ */
+#define LTR_ACTIVE				2
+#define LTR_ACTIVE_IDLE				1
+#define LTR_SLEEP				0
+#define LTR_FINAL_MASK				0x300
+#define LTR_FINAL_SHIFT				8
+
+/* pwrinstatus, pwrintmask regs */
+#define PCIEGEN2_PWRINT_D0_STATE_SHIFT		0
+#define PCIEGEN2_PWRINT_D1_STATE_SHIFT		1
+#define PCIEGEN2_PWRINT_D2_STATE_SHIFT		2
+#define PCIEGEN2_PWRINT_D3_STATE_SHIFT		3
+#define PCIEGEN2_PWRINT_L0_LINK_SHIFT		4
+#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT		5
+#define PCIEGEN2_PWRINT_L1_LINK_SHIFT		6
+#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT	7
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT	8
+
+#define PCIEGEN2_PWRINT_D0_STATE_MASK		(1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D1_STATE_MASK		(1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D2_STATE_MASK		(1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D3_STATE_MASK		(1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_L0_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L0s_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L1_LINK_MASK		(1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK		(1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK	(1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT)
+
+/* sbtopcie mail box */
+#define SBTOPCIE_MB_FUNC0_SHIFT 8
+#define SBTOPCIE_MB_FUNC1_SHIFT 10
+#define SBTOPCIE_MB_FUNC2_SHIFT 12
+#define SBTOPCIE_MB_FUNC3_SHIFT 14
+
+/* pcieiocstatus */
+#define PCIEGEN2_IOC_D0_STATE_SHIFT		8
+#define PCIEGEN2_IOC_D1_STATE_SHIFT		9
+#define PCIEGEN2_IOC_D2_STATE_SHIFT		10
+#define PCIEGEN2_IOC_D3_STATE_SHIFT		11
+#define PCIEGEN2_IOC_L0_LINK_SHIFT		12
+#define PCIEGEN2_IOC_L1_LINK_SHIFT		13
+#define PCIEGEN2_IOC_L1L2_LINK_SHIFT		14
+#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT		15
+
+#define PCIEGEN2_IOC_D0_STATE_MASK		(1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
+#define PCIEGEN2_IOC_D1_STATE_MASK		(1 << PCIEGEN2_IOC_D1_STATE_SHIF)
+#define PCIEGEN2_IOC_D2_STATE_MASK		(1 << PCIEGEN2_IOC_D2_STATE_SHIF)
+#define PCIEGEN2_IOC_D3_STATE_MASK		(1 << PCIEGEN2_IOC_D3_STATE_SHIF)
+#define PCIEGEN2_IOC_L0_LINK_MASK		(1 << PCIEGEN2_IOC_L0_LINK_SHIF)
+#define PCIEGEN2_IOC_L1_LINK_MASK		(1 << PCIEGEN2_IOC_L1_LINK_SHIF)
+#define PCIEGEN2_IOC_L1L2_LINK_MASK		(1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
+#define PCIEGEN2_IOC_L2_L3_LINK_MASK		(1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
+
+/* stat_ctrl */
+#define PCIE_STAT_CTRL_RESET		0x1
+#define PCIE_STAT_CTRL_ENABLE		0x2
+#define PCIE_STAT_CTRL_INTENABLE	0x4
+#define PCIE_STAT_CTRL_INTSTATUS	0x8
+
+#ifdef BCMDRIVER
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+#endif /* BCMDRIVER */
+
+#endif	/* _PCIE_CORE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 0000000000000000000000000000000000000000..3b900145f26d0d34540f62c0ee72cfe752f9862a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,3870 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h 469158 2014-04-09 21:31:31Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US			1024	/* 802.11 Time Unit is 1024 microseconds */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN		24	/* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN		30	/* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN		DOT11_A3_HDR_LEN	/* MAC header length */
+#define DOT11_FCS_LEN			4	/* d11 FCS length */
+#define DOT11_ICV_LEN			4	/* d11 ICV length */
+#define DOT11_ICV_AES_LEN		8	/* d11 ICV/AES length */
+#define DOT11_QOS_LEN			2	/* d11 QoS length */
+#define DOT11_HTC_LEN			4	/* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT		6	/* d11 key index shift */
+#define DOT11_IV_LEN			4	/* d11 IV length */
+#define DOT11_IV_TKIP_LEN		8	/* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN		4	/* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN		8	/* d11 IV/AES/CCM length */
+#define DOT11_IV_MAX_LEN		8	/* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN		2304	/* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
+#define DOT11_MAX_MPDU_LEN		(DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN)	/* d11 max MPDU length */
+
+#define DOT11_MAX_SSID_LEN		32	/* d11 max ssid length */
+
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN		2347	/* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN		2347	/* d11 max RTS length */
+
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN		256	/* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN		2346	/* Max frag is also limited by aMPDUMaxLength
+						* of the attached PHY
+						*/
+#define DOT11_DEFAULT_FRAG_LEN		2346	/* d11 default fragmentation length */
+
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD		1	/* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD		0xFFFF	/* d11 max beacon period */
+
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD		1	/* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD		0xFF	/* d11 max DTIM period */
+
+/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN		8	/* d11 LLC/SNAP header length */
+#define DOT11_OUI_LEN			3	/* d11 OUI length */
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT11_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN	(ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)	/* RCF1042 header length */
+
+/* Generic 802.11 MAC header */
+/**
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ *		 The fields are defined such that the shorter 1, 2, and 3
+ *		 address headers just use the first k fields.
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	a1;		/* address 1 */
+	struct ether_addr	a2;		/* address 2 */
+	struct ether_addr	a3;		/* address 3 */
+	uint16			seq;		/* sequence control */
+	struct ether_addr	a4;		/* address 4 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Control frames */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_RTS_LEN		16		/* d11 RTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CTS_LEN		10		/* d11 CTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_ACK_LEN		10		/* d11 ACK frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* AID */
+	struct ether_addr	bssid;		/* receiver address, STA in AP */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_PS_POLL_LEN	16		/* d11 PS poll frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	bssid;		/* transmitter address, STA in AP */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CS_END_LEN	16		/* d11 CF-END frame length */
+
+/**
+ * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+ *  category+OUI+vendor specific content ( this can be variable)
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+/** generic vendor specific action frame with variable length */
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+
+#define DOT11_ACTION_VS_HDR_LEN	6
+
+#define BCM_ACTION_OUI_BYTE0	0x00
+#define BCM_ACTION_OUI_BYTE1	0x90
+#define BCM_ACTION_OUI_BYTE2	0x4c
+
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL	0x0000	/* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK	0x0001	/* no ack */
+#define DOT11_BA_CTL_POLICY_MASK	0x0001	/* ack policy mask */
+
+#define DOT11_BA_CTL_MTID		0x0002	/* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED		0x0004	/* compressed bitmap */
+
+#define DOT11_BA_CTL_NUMMSDU_MASK	0x0FC0	/* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT	6	/* num msdu in bitmap shift */
+
+#define DOT11_BA_CTL_TID_MASK		0xF000	/* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT		12	/* tid shift */
+
+/** control frame header (BA/BAR) */
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN	16		/* control frame hdr len */
+
+/** BAR frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16			bar_control;	/* BAR Control */
+	uint16			seqnum;		/* Starting Sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN		4		/* BAR frame payload length */
+
+#define DOT11_BA_BITMAP_LEN	128		/* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN	8		/* compressed bitmap length */
+/** BA frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16			ba_control;	/* BA Control */
+	uint16			seqnum;		/* Starting Sequence control */
+	uint8			bitmap[DOT11_BA_BITMAP_LEN];	/* Block Ack Bitmap */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN		4		/* BA frame payload len (wo bitmap) */
+
+/** Management frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	da;		/* receiver address */
+	struct ether_addr	sa;		/* transmitter address */
+	struct ether_addr	bssid;		/* BSS ID */
+	uint16			seq;		/* sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_MGMT_HDR_LEN	24		/* d11 management header length */
+
+/* Management frame payloads */
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32			timestamp[2];
+	uint16			beacon_interval;
+	uint16			capability;
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_BCN_PRB_LEN	12		/* 802.11 beacon/probe frame fixed length */
+#define	DOT11_BCN_PRB_FIXED_LEN	12		/* 802.11 beacon/probe frame fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16			alg;		/* algorithm */
+	uint16			seq;		/* sequence control */
+	uint16			status;		/* status code */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN	6		/* length of auth frame without challenge IE */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN	4	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+	struct ether_addr	ap;		/* Current AP address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN	10	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16			capability;	/* capability information */
+	uint16			status;		/* status code */
+	uint16			aid;		/* association ID */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN	6	/* length of assoc resp frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8	category;
+	uint8	action;
+	uint8	token;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN	3	/* d11 action measurement header length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8	category;
+	uint8	action;
+	uint8	ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8	category;
+	uint8	action;
+	uint8	control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+	uint8	category;
+	uint8	action;
+	uint16	id;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode {
+	uint8	category;
+	uint8	action;
+	uint8	mode;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE	1
+#define SM_PWRSAVE_MODE		2
+
+/* ************* 802.11h related definitions. ************* */
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	int8 min;
+	int8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN	2 	/* length of IE data, not including 2 byte header */
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+/**
+ * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation.  The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8	id;		/* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+	uint8	len;		/* IE length */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN	5
+#define BRCM_EXTCH_IE_TYPE	53	/* 802.11n ID not yet assigned */
+#define DOT11_EXTCH_IE_LEN	1
+#define DOT11_EXT_CH_MASK	0x03	/* extension channel mask */
+#define DOT11_EXT_CH_UPPER	0x01	/* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER	0x03	/* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE	0x00	/* no extension ch.  */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8	category;
+	uint8	action;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN	2
+
+/** CSA IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;	/* id DOT11_MNG_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN	3	/* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY		0	/* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX		1	/* no transmission upon receiving CSA frame. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8	category;
+	uint8	action;
+	dot11_chan_switch_ie_t chan_switch_ie;	/* for switch IE */
+	dot11_brcm_extch_ie_t extch_ie;		/* extension channel offset */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 reg;	/* regulatory class */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+
+/** 11n Extended Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;	/* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN	4	/* length of extended channel switch IE body */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	dot11_ext_csa_ie_t chan_switch_ie;	/* for switch IE */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+
+/**  Wide Bandwidth Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 channel_width;			/* new channel width */
+	uint8 center_frequency_segment_0;	/* center frequency segment 0 */
+	uint8 center_frequency_segment_1;	/* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
+
+#define DOT11_WIDE_BW_SWITCH_IE_LEN     3       /* length of IE data, not including 2 byte header */
+
+/** Channel Switch Wrapper IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
+
+/** VHT Transmit Power Envelope IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 transmit_power_info;
+	uint8 local_max_transmit_power_20;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ	1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ	2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ	3
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8	id;
+	uint8	len;
+	uint8	info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN	1	/* length of OBSS Coexistence INFO IE */
+
+#define	DOT11_OBSS_COEX_INFO_REQ		0x01
+#define	DOT11_OBSS_COEX_40MHZ_INTOLERANT	0x02
+#define	DOT11_OBSS_COEX_20MHZ_WIDTH_REQ	0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8	id;
+	uint8	len;
+	uint8	regclass;
+	uint8	chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN	1	/* fixed length of regclass */
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_MAX	8
+
+#define DOT11_EXTCAP_LEN_COEX	1
+#define DOT11_EXTCAP_LEN_BT	3
+#define DOT11_EXTCAP_LEN_IW	4
+#define DOT11_EXTCAP_LEN_SI	6
+
+#define DOT11_EXTCAP_LEN_TDLS	5
+#define DOT11_11AC_EXTCAP_LEN_TDLS	8
+
+#define DOT11_EXTCAP_LEN_FMS			2
+#define DOT11_EXTCAP_LEN_PROXY_ARP		2
+#define DOT11_EXTCAP_LEN_TFS			3
+#define DOT11_EXTCAP_LEN_WNM_SLEEP		3
+#define DOT11_EXTCAP_LEN_TIMBC			3
+#define DOT11_EXTCAP_LEN_BSSTRANS		3
+#define DOT11_EXTCAP_LEN_DMS			4
+#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION	6
+#define DOT11_EXTCAP_LEN_TDLS_WBW		8
+#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION	8
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+	uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS			37		/* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA	28		/* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM		20		/* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW			30		/* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH			38		/* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH		39		/* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW	61	/* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT		39		/* TDLS max bit defined in ext cap */
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 	0	/* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 		1	/* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI		2	/* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD		3	/* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE		4	/* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON		5	/* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME	6	/* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT		7	/* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI		8	/* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM		9	/* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_PAUSE		255	/* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL 	(1<<0)	/* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE 	(1<<1)	/* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST	(1<<2)	/* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT 	(1<<3)	/* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR 	(1<<4)	/* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE 	(1<<0)	/* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE	(1<<1)	/* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED	(1<<2)	/* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS	((uint8)(1<<0))	/* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM	((uint8)(1<<1))	/* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN	((uint8)(1<<2))	/* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR	((uint8)(1<<3))	/* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS	((uint8)(1<<4))	/* d11 measurement map unmeasuremnt */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14	/* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3	/* d11 measurement request IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN	3	/* d11 measurement response IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN	12	/* d11 measurement basic report length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;	/* TBTTs until beacon interval in quiet starts */
+	uint8 period;	/* Beacon intervals between periodic quiet periods ? */
+	uint16 duration;	/* Length of quiet period, in TU's */
+	uint16 offset;	/* TU's offset from TBTT in Count field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+/* WME Elements */
+#define WME_OUI			"\x00\x50\xf2"	/* WME OUI */
+#define WME_OUI_LEN		3
+#define WME_OUI_TYPE		2	/* WME type */
+#define WME_TYPE		2	/* WME type, deprecated */
+#define WME_SUBTYPE_IE		0	/* Information Element */
+#define WME_SUBTYPE_PARAM_IE	1	/* Parameter Element */
+#define WME_SUBTYPE_TSPEC	2	/* Traffic Specification */
+#define WME_VER			1	/* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE			0	/* Best Effort */
+#define AC_BK			1	/* Background */
+#define AC_VI			2	/* Video */
+#define AC_VO			3	/* Voice */
+#define AC_COUNT		4	/* number of ACs */
+
+typedef uint8 ac_bitmap_t;	/* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE		0x0	/* No ACs */
+#define AC_BITMAP_ALL		0xf	/* All ACs */
+#define AC_BITMAP_TST(ab, ac)	(((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)	(((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+/** WME Information Element (IE) */
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7	/* WME IE length */
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8	ACI;
+	uint8	ECW;
+	uint16  TXOP;		/* stored in network order (ls octet first) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+/** WME Parameter Element (PE) */
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK         0x80        /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT        7           /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK        0x0f        /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT       0           /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK    0xf         /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT   0           /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK     0x8         /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT    3           /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK     0x4         /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT    2           /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK     0x2         /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT    1           /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK     0x1         /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT    0           /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN               1           /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX               15          /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK              0x0f        /* AIFSN mask */
+#define EDCF_ACM_MASK                0x10        /* ACM mask */
+#define EDCF_ACI_MASK                0x60        /* ACI mask */
+#define EDCF_ACI_SHIFT               5           /* ACI shift */
+#define EDCF_AIFSN_SHIFT             12          /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN                 0           /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX                 15          /* cwmin/cwmax exponent maximum value */
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK             0xf0        /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT            4           /* cwmax exponent form shift */
+
+/* TXOP */
+#define EDCF_TXOP_MIN                0           /* TXOP minimum value */
+#define EDCF_TXOP_MAX                65535       /* TXOP maximum value */
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+/* Default BE ACI value for non-WME connection STA */
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA           0x03	/* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA           0xA4	/* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA          0x0000	/* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA           0x27	/* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA           0xA4	/* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA          0x0000	/* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA           0x42	/* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA           0x43	/* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA          0x005e	/* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA           0x62	/* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA           0x32	/* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA          0x002f	/* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP            0x03	/* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP            0x64	/* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP           0x0000	/* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP            0x27	/* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP            0xA4	/* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP           0x0000	/* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP            0x41	/* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP            0x43	/* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP           0x005e	/* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP            0x61	/* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP            0x32	/* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP           0x002f	/* AP TXOP value for audio AC */
+
+/** EDCA Parameter IE */
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          /* EDCA Parameter IE length */
+
+/** QoS Capability IE */
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id; 			/* 11, DOT11_MNG_QBSS_LOAD_ID */
+	uint8 length;
+	uint16 station_count; 		/* total number of STAs associated */
+	uint8 channel_utilization;	/* % of time, normalized to 255, QAP sensed medium busy */
+	uint16 aac; 			/* available admission capacity */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 	7	/* BSS load IE size */
+
+#define WLC_QBSS_LOAD_CHAN_FREE_MAX	0xff	/* max for channel free score */
+
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000		/* MSDU size is fixed */
+#define MSDU_SIZE_MASK	0x7fff		/* (Nominal or fixed) MSDU size */
+
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define	INTEGER_SHIFT	13	/* integer shift */
+#define FRACTION_MASK	0x1FFF	/* fraction mask */
+
+/** Management Notification Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;			/* DOT11_ACTION_NOTIFICATION */
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4	/* Fixed length */
+
+/** Timeout Interval IE */
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+	uint8 ti_type;
+	uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE	1
+#define TI_TYPE_KEY_LIFETIME		2
+
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST	0	/* WME ADDTS request */
+#define WME_ADDTS_RESPONSE	1	/* WME ADDTS response */
+#define WME_DELTS_REQUEST	2	/* WME DELTS request */
+
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED		0	/* WME admission accepted */
+#define WME_INVALID_PARAMETERS		1	/* WME invalide parameters */
+#define WME_ADMISSION_REFUSED		3	/* WME admission refused */
+
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM	0	/* d11 open authentication */
+#define DOT11_SHARED_KEY	1	/* d11 shared authentication */
+#define DOT11_FAST_BSS		2	/* d11 fast bss authentication */
+#define DOT11_CHALLENGE_LEN	128	/* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK		0x3	/* PVER mask */
+#define FC_PVER_SHIFT		0	/* PVER shift */
+#define FC_TYPE_MASK		0xC	/* type mask */
+#define FC_TYPE_SHIFT		2	/* type shift */
+#define FC_SUBTYPE_MASK		0xF0	/* subtype mask */
+#define FC_SUBTYPE_SHIFT	4	/* subtype shift */
+#define FC_TODS			0x100	/* to DS */
+#define FC_TODS_SHIFT		8	/* to DS shift */
+#define FC_FROMDS		0x200	/* from DS */
+#define FC_FROMDS_SHIFT		9	/* from DS shift */
+#define FC_MOREFRAG		0x400	/* more frag. */
+#define FC_MOREFRAG_SHIFT	10	/* more frag. shift */
+#define FC_RETRY		0x800	/* retry */
+#define FC_RETRY_SHIFT		11	/* retry shift */
+#define FC_PM			0x1000	/* PM */
+#define FC_PM_SHIFT		12	/* PM shift */
+#define FC_MOREDATA		0x2000	/* more data */
+#define FC_MOREDATA_SHIFT	13	/* more data shift */
+#define FC_WEP			0x4000	/* WEP */
+#define FC_WEP_SHIFT		14	/* WEP shift */
+#define FC_ORDER		0x8000	/* order */
+#define FC_ORDER_SHIFT		15	/* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT		4	/* seq. number shift */
+#define SEQNUM_MAX		0x1000	/* max seqnum + 1 */
+#define FRAGNUM_MASK		0xF	/* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG		0	/* management type */
+#define FC_TYPE_CTL		1	/* control type */
+#define FC_TYPE_DATA		2	/* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ		0	/* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP		1	/* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ		2	/* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP		3	/* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ		4	/* probe request */
+#define FC_SUBTYPE_PROBE_RESP		5	/* probe response */
+#define FC_SUBTYPE_BEACON		8	/* beacon */
+#define FC_SUBTYPE_ATIM			9	/* ATIM */
+#define FC_SUBTYPE_DISASSOC		10	/* disassoc. */
+#define FC_SUBTYPE_AUTH			11	/* authentication */
+#define FC_SUBTYPE_DEAUTH		12	/* de-authentication */
+#define FC_SUBTYPE_ACTION		13	/* action */
+#define FC_SUBTYPE_ACTION_NOACK		14	/* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_CTL_WRAPPER		7	/* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ		8	/* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK		9	/* Block Ack */
+#define FC_SUBTYPE_PS_POLL		10	/* PS poll */
+#define FC_SUBTYPE_RTS			11	/* RTS */
+#define FC_SUBTYPE_CTS			12	/* CTS */
+#define FC_SUBTYPE_ACK			13	/* ACK */
+#define FC_SUBTYPE_CF_END		14	/* CF-END */
+#define FC_SUBTYPE_CF_END_ACK		15	/* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA			0	/* Data */
+#define FC_SUBTYPE_DATA_CF_ACK		1	/* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL		2	/* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL	3	/* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL			4	/* Null */
+#define FC_SUBTYPE_CF_ACK		5	/* CF-Ack */
+#define FC_SUBTYPE_CF_POLL		6	/* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL		7	/* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA		8	/* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK	9	/* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL	10	/* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL	11	/* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL		12	/* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL		14	/* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL	15	/* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
+#define FC_SUBTYPE_ANY_QOS(s)		(((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)		(((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)	(((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)	(((s) & 1) != 0)
+#define FC_SUBTYPE_ANY_PSPOLL(s)	(((s) & 10) != 0)
+
+/* Type/Subtype Combos */
+#define FC_KIND_MASK		(FC_TYPE_MASK | FC_SUBTYPE_MASK)	/* FC kind mask */
+
+#define FC_KIND(t, s)	(((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))	/* FC kind */
+
+#define FC_SUBTYPE(fc)	(((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)	/* Subtype from FC */
+#define FC_TYPE(fc)	(((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)	/* Type from FC */
+
+#define FC_ASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)	/* assoc. request */
+#define FC_ASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)	/* assoc. response */
+#define FC_REASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)	/* reassoc. request */
+#define FC_REASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)	/* reassoc. response */
+#define FC_PROBE_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)	/* probe request */
+#define FC_PROBE_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)	/* probe response */
+#define FC_BEACON	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)		/* beacon */
+#define FC_ATIM		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM)		/* ATIM */
+#define FC_DISASSOC	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)	/* disassoc */
+#define FC_AUTH		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)		/* authentication */
+#define FC_DEAUTH	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)		/* deauthentication */
+#define FC_ACTION	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)		/* action */
+#define FC_ACTION_NOACK	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)	/* action no-ack */
+
+#define FC_CTL_WRAPPER	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)	/* Control Wrapper */
+#define FC_BLOCKACK_REQ	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)	/* Block Ack Req */
+#define FC_BLOCKACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)	/* Block Ack */
+#define FC_PS_POLL	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)	/* PS poll */
+#define FC_RTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)		/* RTS */
+#define FC_CTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)		/* CTS */
+#define FC_ACK		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)		/* ACK */
+#define FC_CF_END	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)		/* CF-END */
+#define FC_CF_END_ACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)	/* CF-END ACK */
+
+#define FC_DATA		FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)		/* data */
+#define FC_NULL_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)		/* null data */
+#define FC_DATA_CF_ACK	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)	/* data CF ACK */
+#define FC_QOS_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)	/* QoS data */
+#define FC_QOS_NULL	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)	/* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT		0	/* QoS priority shift */
+#define QOS_PRIO_MASK		0x0007	/* QoS priority mask */
+#define QOS_PRIO(qos)		(((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)	/* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT		0	/* QoS TID shift */
+#define QOS_TID_MASK		0x000f	/* QoS TID mask */
+#define QOS_TID(qos)		(((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)	/* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT		4	/* QoS End of Service Period shift */
+#define QOS_EOSP_MASK		0x0010	/* QoS End of Service Period mask */
+#define QOS_EOSP(qos)		(((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)	/* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK	0	/* Normal Ack */
+#define QOS_ACK_NO_ACK		1	/* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK	2	/* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK	3	/* Block Ack */
+#define QOS_ACK_SHIFT		5	/* QoS ACK shift */
+#define QOS_ACK_MASK		0x0060	/* QoS ACK mask */
+#define QOS_ACK(qos)		(((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)	/* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT		7	/* AMSDU shift */
+#define QOS_AMSDU_MASK		0x0080	/* AMSDU mask */
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN		2	/* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN		2	/* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN	2	/* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN		2	/* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN		6	/* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN	2	/* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN		2	/* d11 management reason length */
+#define DOT11_MNG_AID_LEN		2	/* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN		2	/* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN		8	/* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK			0x3fff	/* d11 AID mask */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED		0	/* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED		1	/* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL		2	/* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING		3	/* Deauthenticated because sending station
+						 * is leaving (or has left) IBSS or ESS
+						 */
+#define DOT11_RC_INACTIVITY		4	/* Disassociated due to inactivity */
+#define DOT11_RC_BUSY			5	/* Disassociated because AP is unable to handle
+						 * all currently associated stations
+						 */
+#define DOT11_RC_INVAL_CLASS_2		6	/* Class 2 frame received from
+						 * nonauthenticated station
+						 */
+#define DOT11_RC_INVAL_CLASS_3		7	/* Class 3 frame received from
+						 *  nonassociated station
+						 */
+#define DOT11_RC_DISASSOC_LEAVING	8	/* Disassociated because sending station is
+						 * leaving (or has left) BSS
+						 */
+#define DOT11_RC_NOT_AUTH		9	/* Station requesting (re)association is not
+						 * authenticated with responding station
+						 */
+#define DOT11_RC_BAD_PC			10	/* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS		11	/* Unacceptable supported channels element */
+/* 12 is unused */
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS	32	/* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW		33	/* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES	34	/* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP	35	/* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS		36	/* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM		37	/* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED		38	/* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT		39	/* timeout */
+
+#define DOT11_RC_MAX			23	/* Reason codes > 23 are reserved */
+
+#define DOT11_RC_TDLS_PEER_UNREACH	25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED	26
+
+/* Status Codes */
+#define DOT11_SC_SUCCESS		0	/* Successful */
+#define DOT11_SC_FAILURE		1	/* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2	/* TDLS wakeup schedule rejected but alternative  */
+					/* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3	/* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED	5	/* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ		6	/* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS		7	/* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH		10	/* Cannot support all requested
+						 * capabilities in the Capability
+						 * Information field
+						 */
+#define DOT11_SC_REASSOC_FAIL		11	/* Reassociation denied due to inability
+						 * to confirm that association exists
+						 */
+#define DOT11_SC_ASSOC_FAIL		12	/* Association denied due to reason
+						 * outside the scope of this standard
+						 */
+#define DOT11_SC_AUTH_MISMATCH		13	/* Responding station does not support
+						 * the specified authentication
+						 * algorithm
+						 */
+#define DOT11_SC_AUTH_SEQ		14	/* Received an Authentication frame
+						 * with authentication transaction
+						 * sequence number out of expected
+						 * sequence
+						 */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL	15	/* Authentication rejected because of
+						 * challenge failure
+						 */
+#define DOT11_SC_AUTH_TIMEOUT		16	/* Authentication rejected due to timeout
+						 * waiting for next frame in sequence
+						 */
+#define DOT11_SC_ASSOC_BUSY_FAIL	17	/* Association denied because AP is
+						 * unable to handle additional
+						 * associated stations
+						 */
+#define DOT11_SC_ASSOC_RATE_MISMATCH	18	/* Association denied due to requesting
+						 * station not supporting all of the
+						 * data rates in the BSSBasicRateSet
+						 * parameter
+						 */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED	19	/* Association denied due to requesting
+						 * station not supporting the Short
+						 * Preamble option
+						 */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED	20	/* Association denied due to requesting
+						 * station not supporting the PBCC
+						 * Modulation option
+						 */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED	21	/* Association denied due to requesting
+						 * station not supporting the Channel
+						 * Agility option
+						 */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED	22	/* Association denied because Spectrum
+							 * Management capability is required.
+							 */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP	23	/* Association denied because the info
+						 * in the Power Cap element is
+						 * unacceptable.
+						 */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS	24	/* Association denied because the info
+						 * in the Supported Channel element is
+						 * unacceptable
+						 */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED	25	/* Association denied due to requesting
+							 * station not supporting the Short Slot
+							 * Time option
+							 */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26	/* Association denied because requesting station
+						 * does not support the DSSS-OFDM option
+						 */
+#define DOT11_SC_ASSOC_HT_REQUIRED	27	/* Association denied because the requesting
+						 * station does not support HT features
+						 */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE	28	/* Association denied due to AP
+						 * being unable to reach the R0 Key Holder
+						 */
+#define DOT11_SC_ASSOC_TRY_LATER	30	/* Association denied temporarily, try again later
+						 */
+#define DOT11_SC_ASSOC_MFP_VIOLATION	31	/* Association denied due to Robust Management
+						 * frame policy violation
+						 */
+
+#define	DOT11_SC_DECLINED		37	/* request declined */
+#define	DOT11_SC_INVALID_PARAMS		38	/* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER	42 /* invalid pairwise cipher */
+#define	DOT11_SC_INVALID_AKMP		43	/* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP	45	/* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED	48	/* DLS is not allowed in the BSS by policy */
+#define	DOT11_SC_INVALID_PMKID		53	/* Association denied due to invalid PMKID */
+#define	DOT11_SC_INVALID_MDID		54	/* Association denied due to invalid MDID */
+#define	DOT11_SC_INVALID_FTIE		55	/* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED	59	/* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ			60	/* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER		61	/* no response from server */
+#define DOT11_SC_TIMEOUT					62	/* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE		63	/* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE			65	/* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG			70	/* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE		71	/* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE		72	/* Invalid contents of RSNIE */
+#define DOT11_SC_ASSOC_VHT_REQUIRED	104	/* Association denied because the requesting
+						 * station does not support VHT features.
+						 */
+
+#define DOT11_SC_TRANSMIT_FAILURE	79	/* transmission failure */
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN			1	/* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN		2	/* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN			3	/* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT		0	/* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD		1	/* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL		2	/* d11 management TIM BITMAP control  */
+#define DOT11_MNG_TIM_PVB			3	/* d11 management TIM PVB */
+
+/* TLV defines */
+#define TLV_TAG_OFF		0	/* tag offset */
+#define TLV_LEN_OFF		1	/* length offset */
+#define TLV_HDR_LEN		2	/* header length */
+#define TLV_BODY_OFF		2	/* body offset */
+#define TLV_BODY_LEN_MAX	255	/* max body length */
+
+/* Management Frame Information Element IDs */
+#define DOT11_MNG_SSID_ID			0	/* d11 management SSID id */
+#define DOT11_MNG_RATES_ID			1	/* d11 management rates id */
+#define DOT11_MNG_FH_PARMS_ID			2	/* d11 management FH parameter id */
+#define DOT11_MNG_DS_PARMS_ID			3	/* d11 management DS parameter id */
+#define DOT11_MNG_CF_PARMS_ID			4	/* d11 management CF parameter id */
+#define DOT11_MNG_TIM_ID			5	/* d11 management TIM id */
+#define DOT11_MNG_IBSS_PARMS_ID			6	/* d11 management IBSS parameter id */
+#define DOT11_MNG_COUNTRY_ID			7	/* d11 management country id */
+#define DOT11_MNG_HOPPING_PARMS_ID		8	/* d11 management hopping parameter id */
+#define DOT11_MNG_HOPPING_TABLE_ID		9	/* d11 management hopping table id */
+#define DOT11_MNG_REQUEST_ID			10	/* d11 management request id */
+#define DOT11_MNG_QBSS_LOAD_ID 			11	/* d11 management QBSS Load id */
+#define DOT11_MNG_EDCA_PARAM_ID			12	/* 11E EDCA Parameter id */
+#define DOT11_MNG_TSPEC_ID			13	/* d11 management TSPEC id */
+#define DOT11_MNG_TCLAS_ID			14	/* d11 management TCLAS id */
+#define DOT11_MNG_CHALLENGE_ID			16	/* d11 management chanllenge id */
+#define DOT11_MNG_PWR_CONSTRAINT_ID		32	/* 11H PowerConstraint */
+#define DOT11_MNG_PWR_CAP_ID			33	/* 11H PowerCapability */
+#define DOT11_MNG_TPC_REQUEST_ID 		34	/* 11H TPC Request */
+#define DOT11_MNG_TPC_REPORT_ID			35	/* 11H TPC Report */
+#define DOT11_MNG_SUPP_CHANNELS_ID		36	/* 11H Supported Channels */
+#define DOT11_MNG_CHANNEL_SWITCH_ID		37	/* 11H ChannelSwitch Announcement */
+#define DOT11_MNG_MEASURE_REQUEST_ID		38	/* 11H MeasurementRequest */
+#define DOT11_MNG_MEASURE_REPORT_ID		39	/* 11H MeasurementReport */
+#define DOT11_MNG_QUIET_ID			40	/* 11H Quiet */
+#define DOT11_MNG_IBSS_DFS_ID			41	/* 11H IBSS_DFS */
+#define DOT11_MNG_ERP_ID			42	/* d11 management ERP id */
+#define DOT11_MNG_TS_DELAY_ID			43	/* d11 management TS Delay id */
+#define DOT11_MNG_TCLAS_PROC_ID			44	/* d11 management TCLAS processing id */
+#define	DOT11_MNG_HT_CAP			45	/* d11 mgmt HT cap id */
+#define DOT11_MNG_QOS_CAP_ID			46	/* 11E QoS Capability id */
+#define DOT11_MNG_NONERP_ID			47	/* d11 management NON-ERP id */
+#define DOT11_MNG_RSN_ID			48	/* d11 management RSN id */
+#define DOT11_MNG_EXT_RATES_ID			50	/* d11 management ext. rates id */
+#define DOT11_MNG_AP_CHREP_ID			51	/* 11k AP Channel report id */
+#define DOT11_MNG_NEIGHBOR_REP_ID		52	/* 11k & 11v Neighbor report id */
+#define DOT11_MNG_RCPI_ID			53	/* 11k RCPI */
+#define DOT11_MNG_MDIE_ID			54	/* 11r Mobility domain id */
+#define DOT11_MNG_FTIE_ID			55	/* 11r Fast Bss Transition id */
+#define DOT11_MNG_FT_TI_ID			56	/* 11r Timeout Interval id */
+#define DOT11_MNG_RDE_ID			57	/* 11r RIC Data Element id */
+#define	DOT11_MNG_REGCLASS_ID			59	/* d11 management regulatory class id */
+#define DOT11_MNG_EXT_CSA_ID			60	/* d11 Extended CSA */
+#define	DOT11_MNG_HT_ADD			61	/* d11 mgmt additional HT info */
+#define	DOT11_MNG_EXT_CHANNEL_OFFSET		62	/* d11 mgmt ext channel offset */
+#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID	63	/* 11k bss average access delay */
+#define DOT11_MNG_ANTENNA_ID			64	/* 11k antenna id */
+#define DOT11_MNG_RSNI_ID			65	/* 11k RSNI id */
+#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID	66	/* 11k measurement pilot tx info id */
+#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID	67	/* 11k bss aval admission cap id */
+#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID	68	/* 11k bss AC access delay id */
+#define DOT11_MNG_WAPI_ID			68	/* d11 management WAPI id */
+#define DOT11_MNG_TIME_ADVERTISE_ID	69	/* 11p time advertisement */
+#define DOT11_MNG_RRM_CAP_ID		70	/* 11k radio measurement capability */
+#define DOT11_MNG_MULTIPLE_BSSID_ID		71	/* 11k multiple BSSID id */
+#define	DOT11_MNG_HT_BSS_COEXINFO_ID		72	/* d11 mgmt OBSS Coexistence INFO */
+#define	DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID	73	/* d11 mgmt OBSS Intolerant Channel list */
+#define	DOT11_MNG_HT_OBSS_ID			74	/* d11 mgmt OBSS HT info */
+#define DOT11_MNG_MMIE_ID			76	/* d11 mgmt MIC IE */
+#define DOT11_MNG_FMS_DESCR_ID			86	/* 11v FMS descriptor */
+#define DOT11_MNG_FMS_REQ_ID			87	/* 11v FMS request id */
+#define DOT11_MNG_FMS_RESP_ID			88	/* 11v FMS response id */
+#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID	90	/* 11v bss max idle id */
+#define DOT11_MNG_TFS_REQUEST_ID		91	/* 11v tfs request id */
+#define DOT11_MNG_TFS_RESPONSE_ID		92	/* 11v tfs response id */
+#define DOT11_MNG_WNM_SLEEP_MODE_ID		93	/* 11v wnm-sleep mode id */
+#define DOT11_MNG_TIMBC_REQ_ID			94	/* 11v TIM broadcast request id */
+#define DOT11_MNG_TIMBC_RESP_ID			95	/* 11v TIM broadcast response id */
+#define DOT11_MNG_CHANNEL_USAGE			97	/* 11v channel usage */
+#define DOT11_MNG_TIME_ZONE_ID			98	/* 11v time zone */
+#define DOT11_MNG_DMS_REQUEST_ID		99	/* 11v dms request id */
+#define DOT11_MNG_DMS_RESPONSE_ID		100	/* 11v dms response id */
+#define DOT11_MNG_LINK_IDENTIFIER_ID		101	/* 11z TDLS Link Identifier IE */
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID		102	/* 11z TDLS Wakeup Schedule IE */
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID	104	/* 11z TDLS Channel Switch Timing IE */
+#define DOT11_MNG_PTI_CONTROL_ID		105	/* 11z TDLS PTI Control IE */
+#define DOT11_MNG_PU_BUFFER_STATUS_ID	106	/* 11z TDLS PU Buffer Status IE */
+#define DOT11_MNG_INTERWORKING_ID		107	/* 11u interworking */
+#define DOT11_MNG_ADVERTISEMENT_ID		108	/* 11u advertisement protocol */
+#define DOT11_MNG_EXP_BW_REQ_ID			109	/* 11u expedited bandwith request */
+#define DOT11_MNG_QOS_MAP_ID			110	/* 11u QoS map set */
+#define DOT11_MNG_ROAM_CONSORT_ID		111	/* 11u roaming consortium */
+#define DOT11_MNG_EMERGCY_ALERT_ID		112	/* 11u emergency alert identifier */
+#define	DOT11_MNG_EXT_CAP_ID			127	/* d11 mgmt ext capability */
+#define	DOT11_MNG_VHT_CAP_ID			191	/* d11 mgmt VHT cap id */
+#define	DOT11_MNG_VHT_OPERATION_ID		192	/* d11 mgmt VHT op id */
+#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID		194	/* Wide BW Channel Switch IE */
+#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID	195	/* VHT transmit Power Envelope IE */
+#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID		196	/* Channel Switch Wrapper IE */
+#define DOT11_MNG_AID_ID					197	/* Association ID  IE */
+#define	DOT11_MNG_OPER_MODE_NOTIF_ID	199	/* d11 mgmt VHT oper mode notif */
+
+
+#define DOT11_MNG_WPA_ID			221	/* d11 management WPA id */
+#define DOT11_MNG_PROPR_ID			221
+/* should start using this one instead of above two */
+#define DOT11_MNG_VS_ID				221	/* d11 management Vendor Specific IE */
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
+
+#define DOT11_RATE_1M   2       /* 1  Mbps in 500kbps units */
+#define DOT11_RATE_2M   4       /* 2  Mbps in 500kbps units */
+#define DOT11_RATE_5M5  11      /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M  22      /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M   12      /* 6  Mbps in 500kbps units */
+#define DOT11_RATE_9M   18      /* 9  Mbps in 500kbps units */
+#define DOT11_RATE_12M  24      /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M  36      /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M  48      /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M  72      /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M  96      /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M  108     /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX  108     /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC                0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK                 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT         0xFF  /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT        0xFE  /* Basic 0x80 + 126, VHT Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN			1	/* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT		0x01	/* NonERP (802.11b) STAs are present
+							 *in the BSS
+							 */
+#define DOT11_MNG_USE_PROTECTION		0x02	/* Use protection mechanisms for
+							 *ERP-OFDM frames
+							 */
+#define DOT11_MNG_BARKER_PREAMBLE		0x04	/* Short Preambles: 0 == allowed,
+							 * 1 == not allowed
+							 */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN		4	/* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE			4	/* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS				0x0001	/* d11 cap. ESS */
+#define DOT11_CAP_IBSS				0x0002	/* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE			0x0004	/* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ			0x0008	/* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY			0x0010	/* d11 cap. privacy */
+#define DOT11_CAP_SHORT				0x0020	/* d11 cap. short */
+#define DOT11_CAP_PBCC				0x0040	/* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY			0x0080	/* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM			0x0100	/* d11 cap. spectrum */
+#define DOT11_CAP_QOS				0x0200	/* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT			0x0400	/* d11 cap. shortslot */
+#define DOT11_CAP_APSD				0x0800	/* d11 cap. apsd */
+#define DOT11_CAP_RRM				0x1000	/* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM			0x2000	/* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA			0x4000	/* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA			0x8000	/* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT		0
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING	2
+/* scheduled PSMP support bit position */
+#define DOT11_EXT_CAP_SPSMP			6
+/*  Flexible Multicast Service */
+#define DOT11_EXT_CAP_FMS			11
+/* proxy ARP service support bit position */
+#define DOT11_EXT_CAP_PROXY_ARP			12
+/* Traffic Filter Service */
+#define DOT11_EXT_CAP_TFS			16
+/* WNM-Sleep Mode */
+#define DOT11_EXT_CAP_WNM_SLEEP			17
+/* TIM Broadcast service */
+#define DOT11_EXT_CAP_TIMBC			18
+/* BSS Transition Management support bit position */
+#define DOT11_EXT_CAP_BSSTRANS_MGMT		19
+/* Direct Multicast Service */
+#define DOT11_EXT_CAP_DMS			26
+/* Interworking support bit position */
+#define DOT11_EXT_CAP_IW			31
+/* QoS map support bit position */
+#define DOT11_EXT_CAP_QOS_MAP		32
+/* service Interval granularity bit position and mask */
+#define DOT11_EXT_CAP_SI			41
+#define DOT11_EXT_CAP_SI_MASK			0x0E
+/* WNM notification */
+#define DOT11_EXT_CAP_WNM_NOTIF			46
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
+#define DOT11_EXT_CAP_OPER_MODE_NOTIF		62
+
+/* VHT Operating mode bit fields -  (11ac D3.0 - 8.4.1.50) */
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
+#define DOT11_OPER_MODE_RXNSS_SHIFT 4
+#define DOT11_OPER_MODE_RXNSS_MASK 0x70
+#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7
+#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+	((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+		 DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+	(((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+	((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+		 DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \
+	(((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\
+		>> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT)
+#define DOT11_OPER_MODE_RXNSS(mode) \
+	((((mode) & DOT11_OPER_MODE_RXNSS_MASK)		\
+		>> DOT11_OPER_MODE_RXNSS_SHIFT) + 1)
+#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \
+	(((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\
+		>> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT)
+
+#define DOT11_OPER_MODE_20MHZ 0
+#define DOT11_OPER_MODE_40MHZ 1
+#define DOT11_OPER_MODE_80MHZ 2
+#define DOT11_OPER_MODE_160MHZ 3
+#define DOT11_OPER_MODE_8080MHZ 3
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ)
+
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
+BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
+	uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t;
+
+#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT	0x01	/* 20/40 BSS Coexistence Management support */
+
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN		2	/* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF		0	/* category offset */
+#define DOT11_ACTION_ACT_OFF		1	/* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK	0x80	/* category error mask */
+#define DOT11_ACTION_CAT_MASK		0x7F	/* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG	0	/* category spectrum management */
+#define DOT11_ACTION_CAT_QOS		1	/* category QoS */
+#define DOT11_ACTION_CAT_DLS		2	/* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK	3	/* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC		4	/* category public */
+#define DOT11_ACTION_CAT_RRM		5	/* category radio measurements */
+#define DOT11_ACTION_CAT_FBT	6	/* category fast bss transition */
+#define DOT11_ACTION_CAT_HT		7	/* category for HT */
+#define	DOT11_ACTION_CAT_SA_QUERY	8	/* security association query */
+#define	DOT11_ACTION_CAT_PDPA		9	/* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM		10	/* category for WNM */
+#define DOT11_ACTION_CAT_UWNM		11	/* category for Unprotected WNM */
+#define DOT11_ACTION_NOTIFICATION	17
+#define DOT11_ACTION_CAT_VHT		21	/* VHT action */
+#define DOT11_ACTION_CAT_VSP		126	/* protected vendor specific */
+#define DOT11_ACTION_CAT_VS		127	/* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ		0	/* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP		1	/* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ		2	/* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP		3	/* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA		5	/* d11 extened CSA for 11n */
+
+/* QoS action ids */
+#define DOT11_QOS_ACTION_ADDTS_REQ	0	/* d11 action ADDTS request */
+#define DOT11_QOS_ACTION_ADDTS_RESP	1	/* d11 action ADDTS response */
+#define DOT11_QOS_ACTION_DELTS		2	/* d11 action DELTS */
+#define DOT11_QOS_ACTION_SCHEDULE	3	/* d11 action schedule */
+#define DOT11_QOS_ACTION_QOS_MAP	4	/* d11 action QOS map */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH	0	/* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS	1	/* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG	0	/* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_PUB_ACTION_GAS_CB_REQ	12	/* GAS Comeback Request */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ	0	/* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP	1	/* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA		2	/* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP	0x0001	/* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK	0x0002	/* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT	1	/* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK	0x003c	/* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT	2	/* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK	0xffc0	/* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT	6	/* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED	0	/* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE	1	/* immediate BA policy */
+
+/* Fast Transition action types */
+#define DOT11_FT_ACTION_FT_RESERVED		0
+#define DOT11_FT_ACTION_FT_REQ			1	/* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES			2	/* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON			3	/* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK			4	/* FBT ack */
+
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ			0	/* DLS Request */
+#define DOT11_DLS_ACTION_RESP			1	/* DLS Response */
+#define DOT11_DLS_ACTION_TD			2	/* DLS Teardown */
+
+/* Wireless Network Management (WNM) action types */
+#define DOT11_WNM_ACTION_EVENT_REQ		0
+#define DOT11_WNM_ACTION_EVENT_REP		1
+#define DOT11_WNM_ACTION_DIAG_REQ		2
+#define DOT11_WNM_ACTION_DIAG_REP		3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ		4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP		5
+#define DOT11_WNM_ACTION_BSSTRANS_QUERY		6
+#define DOT11_WNM_ACTION_BSSTRANS_REQ		7
+#define DOT11_WNM_ACTION_BSSTRANS_RESP		8
+#define DOT11_WNM_ACTION_FMS_REQ		9
+#define DOT11_WNM_ACTION_FMS_RESP		10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ	11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP	12
+#define DOT11_WNM_ACTION_TFS_REQ		13
+#define DOT11_WNM_ACTION_TFS_RESP		14
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ		15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ		16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP		17
+#define DOT11_WNM_ACTION_TIMBC_REQ		18
+#define DOT11_WNM_ACTION_TIMBC_RESP		19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD	20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ		21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP	22
+#define DOT11_WNM_ACTION_DMS_REQ		23
+#define DOT11_WNM_ACTION_DMS_RESP		24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ	25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ		26
+#define DOT11_WNM_ACTION_NOTFCTN_RESP		27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP	28
+
+/* Unprotected Wireless Network Management (WNM) action types */
+#define DOT11_UWNM_ACTION_TIM			0
+#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT	1
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF				0	/* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT			1	/* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF	2	/* Operating mode notif'n */
+
+/** DLS Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint16 cap;				/* capability */
+	uint16 timeout;			/* timeout value */
+	uint8 data[1];				/* IE:support rate, extend support rate, HT cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18	/* Fixed length */
+
+/** DLS response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	uint16 status;				/* status code field */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint8 data[1];				/* optional: capability, rate ... */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16	/* Fixed length */
+
+
+/* ************* 802.11v related definitions. ************* */
+
+/** BSS Management Transition Query frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_query (6) */
+	uint8 token;			/* dialog token */
+	uint8 reason;			/* transition query reason */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
+#define DOT11_BSSTRANS_QUERY_LEN 4	/* Fixed length */
+
+/** BSS Management Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_req (7) */
+	uint8 token;			/* dialog token */
+	uint8 reqmode;			/* transition request mode */
+	uint16 disassoc_tmr;		/* disassociation timer */
+	uint8 validity_intrvl;		/* validity interval */
+	uint8 data[1];			/* optional: BSS term duration, ... */
+						/* ...session info URL, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
+#define DOT11_BSSTRANS_REQ_LEN 7	/* Fixed length */
+
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
+#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL		0x01
+#define DOT11_BSSTRANS_REQMODE_ABRIDGED			0x02
+#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT	0x04
+#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL		0x08
+#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT	0x10
+
+/** BSS Management transition response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_resp (8) */
+	uint8 token;			/* dialog token */
+	uint8 status;			/* transition status */
+	uint8 term_delay;		/* validity interval */
+	uint8 data[1];			/* optional: BSSID target, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
+#define DOT11_BSSTRANS_RESP_LEN 5	/* Fixed length */
+
+/* BSS Mgmt Transition Response Status Field */
+#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT			0
+#define DOT11_BSSTRANS_RESP_STATUS_REJECT			1
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN		2
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP		3
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED		4
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ		5
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED	6
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS		7
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS		8
+
+
+/** BSS Max Idle Period element */
+BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
+	uint8 id;				/* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
+	uint8 len;
+	uint16 max_idle_period;			/* in unit of 1000 TUs */
+	uint8 idle_opt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN	3	/* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED	1	/* BSS max idle option */
+
+/** TIM Broadcast request element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
+	uint8 id;				/* 94, DOT11_MNG_TIMBC_REQ_ID */
+	uint8 len;
+	uint8 interval;				/* in unit of beacon interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
+#define DOT11_TIMBC_REQ_IE_LEN		1	/* Fixed length */
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* TIM broadcast request element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req dot11_timbc_req_t;
+#define DOT11_TIMBC_REQ_LEN		3	/* Fixed length */
+
+/** TIM Broadcast response element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
+	uint8 id;				/* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
+	uint8 len;
+	uint8 status;				/* status of add request */
+	uint8 interval;				/* in unit of beacon interval */
+	int32 offset;				/* in unit of ms */
+	uint16 high_rate;			/* in unit of 0.5 Mb/s */
+	uint16 low_rate;			/* in unit of 0.5 Mb/s */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
+#define DOT11_TIMBC_DENY_RESP_IE_LEN	1	/* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN	10	/* Accept. Fixed length */
+
+#define DOT11_TIMBC_STATUS_ACCEPT		0
+#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP	1
+#define DOT11_TIMBC_STATUS_DENY			2
+#define DOT11_TIMBC_STATUS_OVERRIDDEN		3
+#define DOT11_TIMBC_STATUS_RESERVED		4
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+	uint8 token;			/* dialog token */
+	uint8 data[1];			/* TIM broadcast response element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp dot11_timbc_resp_t;
+#define DOT11_TIMBC_RESP_LEN	3	/* Fixed length */
+
+/** TIM element */
+BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
+	uint8 id;			/* 5, DOT11_MNG_TIM_ID	 */
+	uint8 len;			/* 4 - 255 */
+	uint8 dtim_count;		/* DTIM decrementing counter */
+	uint8 dtim_period;		/* DTIM period */
+	uint8 bitmap_control;		/* AID 0 + bitmap offset */
+	uint8 pvb[1];			/* Partial Virtual Bitmap, variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tim_ie dot11_tim_ie_t;
+#define DOT11_TIM_IE_FIXED_LEN	3	/* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN	5	/* Fixed length, with id and len */
+
+/** TIM Broadcast frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc {
+	uint8 category;			/* category of action frame (11) */
+	uint8 action;			/* action: TIM (0) */
+	uint8 check_beacon;		/* need to check-beacon */
+	uint8 tsf[8];			/* Time Synchronization Function */
+	dot11_tim_ie_t tim_ie;		/* TIM element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc dot11_timbc_t;
+#define DOT11_TIMBC_HDR_LEN	(sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
+#define DOT11_TIMBC_FIXED_LEN	(sizeof(dot11_timbc_t) - 1)	/* Fixed length */
+#define DOT11_TIMBC_LEN			11	/* Fixed length */
+
+/** TCLAS frame classifier type */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
+	uint8 type;
+	uint8 mask;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
+#define DOT11_TCLAS_FC_HDR_LEN		2	/* Fixed length */
+
+#define DOT11_TCLAS_MASK_0		0x1
+#define DOT11_TCLAS_MASK_1		0x2
+#define DOT11_TCLAS_MASK_2		0x4
+#define DOT11_TCLAS_MASK_3		0x8
+#define DOT11_TCLAS_MASK_4		0x10
+#define DOT11_TCLAS_MASK_5		0x20
+#define DOT11_TCLAS_MASK_6		0x40
+#define DOT11_TCLAS_MASK_7		0x80
+
+#define DOT11_TCLAS_FC_0_ETH		0
+#define DOT11_TCLAS_FC_1_IP		1
+#define DOT11_TCLAS_FC_2_8021Q		2
+#define DOT11_TCLAS_FC_3_OFFSET		3
+#define DOT11_TCLAS_FC_4_IP_HIGHER	4
+#define DOT11_TCLAS_FC_5_8021D		5
+
+/** TCLAS frame classifier type 0 parameters for Ethernet */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
+	uint8 type;
+	uint8 mask;
+	uint8 sa[ETHER_ADDR_LEN];
+	uint8 da[ETHER_ADDR_LEN];
+	uint16 eth_type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
+#define DOT11_TCLAS_FC_0_ETH_LEN	16
+
+/** TCLAS frame classifier type 1 parameters for IPV4 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint32 src_ip;
+	uint32 dst_ip;
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 protocol;
+	uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
+#define DOT11_TCLAS_FC_1_IPV4_LEN	18
+
+/** TCLAS frame classifier type 2 parameters for 802.1Q */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
+	uint8 type;
+	uint8 mask;
+	uint16 tci;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
+#define DOT11_TCLAS_FC_2_8021Q_LEN	4
+
+/** TCLAS frame classifier type 3 parameters for filter offset */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
+	uint8 type;
+	uint8 mask;
+	uint16 offset;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
+#define DOT11_TCLAS_FC_3_FILTER_LEN	4
+
+/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
+#define DOT11_TCLAS_FC_4_IPV4_LEN	DOT11_TCLAS_FC_1_IPV4_LEN
+
+/** TCLAS frame classifier type 4 parameters for IPV6 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint8 saddr[16];
+	uint8 daddr[16];
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 nexthdr;
+	uint8 flow_lbl[3];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
+#define DOT11_TCLAS_FC_4_IPV6_LEN	44
+
+/** TCLAS frame classifier type 5 parameters for 802.1D */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
+	uint8 type;
+	uint8 mask;
+	uint8 pcp;
+	uint8 cfi;
+	uint16 vid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
+#define DOT11_TCLAS_FC_5_8021D_LEN	6
+
+/** TCLAS frame classifier type parameters */
+BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
+	uint8 data[1];
+	dot11_tclas_fc_hdr_t hdr;
+	dot11_tclas_fc_0_eth_t t0_eth;
+	dot11_tclas_fc_1_ipv4_t	t1_ipv4;
+	dot11_tclas_fc_2_8021q_t t2_8021q;
+	dot11_tclas_fc_3_filter_t t3_filter;
+	dot11_tclas_fc_4_ipv4_t	t4_ipv4;
+	dot11_tclas_fc_4_ipv6_t	t4_ipv6;
+	dot11_tclas_fc_5_8021d_t t5_8021d;
+} BWL_POST_PACKED_STRUCT;
+typedef union dot11_tclas_fc dot11_tclas_fc_t;
+
+#define DOT11_TCLAS_FC_MIN_LEN		4	/* Classifier Type 2 has the min size */
+#define DOT11_TCLAS_FC_MAX_LEN		254
+
+/** TCLAS element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
+	uint8 id;				/* 14, DOT11_MNG_TCLAS_ID */
+	uint8 len;
+	uint8 user_priority;
+	dot11_tclas_fc_t fc;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_ie dot11_tclas_ie_t;
+#define DOT11_TCLAS_IE_LEN		3	/* Fixed length, include id and len */
+
+/** TCLAS processing element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
+	uint8 id;				/* 44, DOT11_MNG_TCLAS_PROC_ID */
+	uint8 len;
+	uint8 process;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
+#define DOT11_TCLAS_PROC_IE_LEN		3	/* Fixed length, include id and len */
+
+#define DOT11_TCLAS_PROC_MATCHALL	0	/* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE	1	/* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH	2	/* Non match to any high level element */
+
+
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN		57	/* Fixed length */
+
+/** TFS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
+	uint8 id;				/* 91, DOT11_MNG_TFS_REQUEST_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 actcode;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
+#define DOT11_TFS_REQ_IE_LEN		2	/* Fixed length, without id and len */
+
+/** TFS request action codes (bitfield) */
+#define DOT11_TFS_ACTCODE_DELETE	1
+#define DOT11_TFS_ACTCODE_NOTIFY	2
+
+/** TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID		1
+#define DOT11_TFS_REQ_VENDOR_SE_ID	221
+
+/** TFS subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 data[1];				/* TCLAS element(s) + optional TCLAS proc */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_se dot11_tfs_se_t;
+
+
+/** TFS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
+	uint8 id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
+#define DOT11_TFS_RESP_IE_LEN		1	/* Fixed length, without id and len */
+
+/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID		1
+#define DOT11_TFS_RESP_TFS_SE_ID		2
+#define DOT11_TFS_RESP_VENDOR_SE_ID		221
+
+/** TFS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
+	uint8 sub_id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 resp_st;
+	uint8 data[1];				/* Potential dot11_tfs_se_t included */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
+#define DOT11_TFS_STATUS_SE_LEN			1	/* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A  */
+#define DOT11_TFS_STATUS_ACCEPT			0
+#define DOT11_TFS_STATUS_DENY_FORMAT		1
+#define DOT11_TFS_STATUS_DENY_RESOURCE		2
+#define DOT11_TFS_STATUS_DENY_POLICY		4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY		7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP	14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT		0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT	1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE	2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI	3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY	4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI	6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY		7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI	8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE		9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY	10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE	11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO	12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI	13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP	14
+
+/** TFS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (13) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req dot11_tfs_req_t;
+#define DOT11_TFS_REQ_LEN		3	/* Fixed length */
+
+/** TFS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (14) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp dot11_tfs_resp_t;
+#define DOT11_TFS_RESP_LEN		3	/* Fixed length */
+
+/** TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify request (15) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN	3	/* Fixed length */
+
+/** TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify response (28) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN	3	/* Fixed length */
+
+
+/** WNM-Sleep Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (16) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
+#define DOT11_WNM_SLEEP_REQ_LEN		3	/* Fixed length */
+
+/** WNM-Sleep Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (17) */
+	uint8 token;				/* dialog token */
+	uint16 key_len;				/* key data length */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
+#define DOT11_WNM_SLEEP_RESP_LEN	5	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK	0
+#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK	1
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_length;
+	uint8 rsc[8];
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN	11	/* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN	43	/* without sub_id and len */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_id;
+	uint8 pn[6];
+	uint8 key[16];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
+	uint8 id;				/* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
+	uint8 len;
+	uint8 act_type;
+	uint8 resp_status;
+	uint16 interval;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
+#define DOT11_WNM_SLEEP_IE_LEN		4	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER	0
+#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT	1
+
+#define DOT11_WNM_SLEEP_RESP_ACCEPT	0
+#define DOT11_WNM_SLEEP_RESP_UPDATE	1
+#define DOT11_WNM_SLEEP_RESP_DENY	2
+#define DOT11_WNM_SLEEP_RESP_DENY_TEMP	3
+#define DOT11_WNM_SLEEP_RESP_DENY_KEY	4
+#define DOT11_WNM_SLEEP_RESP_DENY_INUSE	5
+#define DOT11_WNM_SLEEP_RESP_LAST	6
+
+/** DMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (23) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req dot11_dms_req_t;
+#define DOT11_DMS_REQ_LEN		3	/* Fixed length */
+
+/** DMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (24) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp dot11_dms_resp_t;
+#define DOT11_DMS_RESP_LEN		3	/* Fixed length */
+
+/** DMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
+	uint8 id;				/* 99, DOT11_MNG_DMS_REQUEST_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
+#define DOT11_DMS_REQ_IE_LEN		2	/* Fixed length */
+
+/** DMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
+	uint8 id;				/* 100, DOT11_MNG_DMS_RESPONSE_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
+#define DOT11_DMS_RESP_IE_LEN		2	/* Fixed length */
+
+/** DMS request descriptor */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
+#define DOT11_DMS_REQ_DESC_LEN		3	/* Fixed length */
+
+#define DOT11_DMS_REQ_TYPE_ADD		0
+#define DOT11_DMS_REQ_TYPE_REMOVE	1
+#define DOT11_DMS_REQ_TYPE_CHANGE	2
+
+/** DMS response status */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint16 lsc;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
+#define DOT11_DMS_RESP_STATUS_LEN	5	/* Fixed length */
+
+#define DOT11_DMS_RESP_TYPE_ACCEPT	0
+#define DOT11_DMS_RESP_TYPE_DENY	1
+#define DOT11_DMS_RESP_TYPE_TERM	2
+
+#define DOT11_DMS_RESP_LSC_UNSUPPORTED	0xFFFF
+
+/** FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (9) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN		3	/* Fixed length */
+
+/** FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (10) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN		3	/* Fixed length */
+
+/** FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+	uint8 id;
+	uint8 len;
+	uint8 num_fms_cnt;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN		1	/* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX		0x8
+#define DOT11_FMS_CNTR_ID_MASK		0x7
+#define DOT11_FMS_CNTR_ID_SHIFT		0x0
+#define DOT11_FMS_CNTR_COUNT_MASK	0xf1
+#define DOT11_FMS_CNTR_SHIFT		0x3
+
+/** FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;			/* token used to identify fms stream set */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+	uint8 mask;
+	uint8 mcs_idx;
+	uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK	0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET	0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK	0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET	3
+#define DOT11_RATE_ID_FIELD_LEN		sizeof(dot11_rate_id_field_t)
+
+/** FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 interval;
+	uint8 max_interval;
+	dot11_rate_id_field_t rate;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN		6	/* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS		1	/* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS		221	/* Vendor Specific subelement */
+
+/** FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN		1	/* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS	1	/* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS	2	/* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS	221	/* Vendor Specific subelement */
+
+/** FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 status;
+	uint8 interval;
+	uint8 max_interval;
+	uint8 fmsid;
+	uint8 counter;
+	dot11_rate_id_field_t rate;
+	uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN		15	/* Fixed length */
+
+/** TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 fmsid;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint8 token;				/* identifier */
+	uint16 addba_param_set;		/* parameter set */
+	uint16 timeout;				/* timeout in seconds */
+	uint16 start_seqnum;		/* starting sequence number */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN		9	/* length of addba req frame */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba resp */
+	uint8 token;				/* identifier */
+	uint16 status;				/* status of add request */
+	uint16 addba_param_set;			/* negotiated parameter set */
+	uint16 timeout;				/* negotiated timeout in seconds */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN		9	/* length of addba resp frame */
+
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK	0x0800	/* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT	11	/* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK	0xf000	/* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT	12	/* tid shift */
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint16 delba_param_set;			/* paarmeter set */
+	uint16 reason;				/* reason for dellba */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN			6	/* length of delba frame */
+
+/* SA Query action field value */
+#define SA_QUERY_REQUEST		0
+#define SA_QUERY_RESPONSE		1
+
+/* ************* 802.11r related definitions. ************* */
+
+/** Over-the-DS Fast Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft req */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+/** Over-the-DS Fast Transition Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft resp */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint16 status;			/* status code */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+/** RDE RIC Data Element. */
+BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
+	uint8 id;			/* 11r, DOT11_MNG_RDE_ID */
+	uint8 length;
+	uint8 rde_id;			/* RDE identifier. */
+	uint8 rd_count;			/* Resource Descriptor Count. */
+	uint16 status;			/* Status Code. */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rde_ie dot11_rde_ie_t;
+
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
+#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
+
+/* ************* 802.11k related definitions. ************* */
+
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN		5	/* length of rrm cap bitmap */
+#define RCPI_IE_LEN 1
+#define RSNI_IE_LEN 1
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+	uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK		0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT	1
+#define DOT11_RRM_CAP_PARALLEL		2
+#define DOT11_RRM_CAP_REPEATED		3
+#define DOT11_RRM_CAP_BCN_PASSIVE	4
+#define DOT11_RRM_CAP_BCN_ACTIVE	5
+#define DOT11_RRM_CAP_BCN_TABLE		6
+#define DOT11_RRM_CAP_BCN_REP_COND	7
+#define DOT11_RRM_CAP_FM		8
+#define DOT11_RRM_CAP_CLM		9
+#define DOT11_RRM_CAP_NHM		10
+#define DOT11_RRM_CAP_SM		11
+#define DOT11_RRM_CAP_LCIM		12
+#define DOT11_RRM_CAP_LCIA		13
+#define DOT11_RRM_CAP_TSCM		14
+#define DOT11_RRM_CAP_TTSCM		15
+#define DOT11_RRM_CAP_AP_CHANREP	16
+#define DOT11_RRM_CAP_RMMIB		17
+/* bit18-bit26, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPTI		27
+#define DOT11_RRM_CAP_NBRTSFO		28
+#define DOT11_RRM_CAP_RCPI		29
+#define DOT11_RRM_CAP_RSNI		30
+#define DOT11_RRM_CAP_BSSAAD		31
+#define DOT11_RRM_CAP_BSSAAC		32
+#define DOT11_RRM_CAP_AI		33
+
+/* Operating Class (formerly "Regulatory Class") definitions */
+#define DOT11_OP_CLASS_NONE			255
+
+BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
+	uint8 id;
+	uint8 len;
+	uint8 reg;
+	uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct do11_ap_chrep dot11_ap_chrep_t;
+
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ		0	/* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP		1	/* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ		2	/* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP		3	/* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ		4	/* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP		5	/* Neighbor report response */
+
+/** Generic radio measurement action frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint16 reps;				/* no. of repetitions */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN	5
+
+/* Definitions for "mode" bits in rm req */
+#define DOT11_RMREQ_MODE_PARALLEL	1
+#define DOT11_RMREQ_MODE_ENABLE		2
+#define DOT11_RMREQ_MODE_REQUEST	4
+#define DOT11_RMREQ_MODE_REPORT		8
+#define DOT11_RMREQ_MODE_DURMAND	0x10	/* Duration Mandatory */
+
+/* Definitions for "mode" bits in rm rep */
+#define DOT11_RMREP_MODE_LATE		1
+#define DOT11_RMREP_MODE_INCAPABLE	2
+#define DOT11_RMREP_MODE_REFUSED	4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 bcn_mode;
+	struct ether_addr	bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN	18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 frame_info;
+	uint8 rcpi;
+	uint8 rsni;
+	struct ether_addr	bssid;
+	uint8 antenna_id;
+	uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN	26
+
+/* Beacon request measurement mode */
+#define DOT11_RMREQ_BCN_PASSIVE	0
+#define DOT11_RMREQ_BCN_ACTIVE	1
+#define DOT11_RMREQ_BCN_TABLE	2
+
+/* Sub-element IDs for Beacon Request */
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID  1
+#define DOT11_RMREQ_BCN_REPDET_ID   2
+#define DOT11_RMREQ_BCN_REQUEST_ID  10
+#define DOT11_RMREQ_BCN_APCHREP_ID  DOT11_MNG_AP_CHREP_ID
+
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED	0	/* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST	1	/* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL	2	/* All fields */
+
+/* Sub-element IDs for Beacon Report */
+#define DOT11_RMREP_BCN_FRM_BODY	1
+
+/* Sub-element IDs for Frame Report */
+#define DOT11_RMREP_FRAME_COUNT_REPORT 1
+
+/** Channel load request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
+#define DOT11_RMREQ_CHANLOAD_LEN	11
+
+/** Channel load report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 channel_load;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
+#define DOT11_RMREP_CHANLOAD_LEN	13
+
+/** Noise histogram request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
+#define DOT11_RMREQ_NOISE_LEN 11
+
+/** Noise histogram report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 antid;
+	uint8 anpi;
+	uint8 ipi0_dens;
+	uint8 ipi1_dens;
+	uint8 ipi2_dens;
+	uint8 ipi3_dens;
+	uint8 ipi4_dens;
+	uint8 ipi5_dens;
+	uint8 ipi6_dens;
+	uint8 ipi7_dens;
+	uint8 ipi8_dens;
+	uint8 ipi9_dens;
+	uint8 ipi10_dens;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
+#define DOT11_RMREP_NOISE_LEN 25
+
+/** Frame request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 req_type;
+	struct ether_addr	ta;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
+#define DOT11_RMREQ_FRAME_LEN 18
+
+/** Frame report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
+#define DOT11_RMREP_FRAME_LEN 12
+
+/** Frame report entry */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
+	struct ether_addr	ta;
+	struct ether_addr	bssid;
+	uint8 phy_type;
+	uint8 avg_rcpi;
+	uint8 last_rsni;
+	uint8 last_rcpi;
+	uint8 ant_id;
+	uint16 frame_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
+#define DOT11_RMREP_FRMENTRY_LEN 19
+
+/** STA statistics request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	struct ether_addr	peer;
+	uint16 interval;
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
+#define DOT11_RMREQ_STAT_LEN 16
+
+/** STA statistics report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
+
+/** Transmit stream/category measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 interval;
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 bin0_range;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
+
+/** Transmit stream/category measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
+	uint32 starttime[2];
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 reason;
+	uint32 txmsdu_cnt;
+	uint32 msdu_discarded_cnt;
+	uint32 msdufailed_cnt;
+	uint32 msduretry_cnt;
+	uint32 cfpolls_lost_cnt;
+	uint32 avrqueue_delay;
+	uint32 avrtx_delay;
+	uint8 bin0_range;
+	uint32 bin0;
+	uint32 bin1;
+	uint32 bin2;
+	uint32 bin3;
+	uint32 bin4;
+	uint32 bin5;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+
+/** Measurement pause request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 pause_time;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
+
+
+/* Neighbor Report subelements ID (11k & 11v) */
+#define DOT11_NGBR_TSF_INFO_SE_ID	1
+#define DOT11_NGBR_CCS_SE_ID		2
+#define DOT11_NGBR_BSSTRANS_PREF_SE_ID	3
+#define DOT11_NGBR_BSS_TERM_DUR_SE_ID	4
+#define DOT11_NGBR_BEARING_SE_ID	5
+
+/** Neighbor Report, BSS Transition Candidate Preference subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 preference;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
+#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN	1
+
+/** Neighbor Report, BSS Termination Duration subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 tsf[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
+#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN	10
+
+/* Neighbor Report BSSID Information Field */
+#define DOT11_NGBR_BI_REACHABILTY_UNKN	0x0002
+#define DOT11_NGBR_BI_REACHABILTY	0x0003
+#define DOT11_NGBR_BI_SEC		0x0004
+#define DOT11_NGBR_BI_KEY_SCOPE		0x0008
+#define DOT11_NGBR_BI_CAP		0x03f0
+#define DOT11_NGBR_BI_CAP_SPEC_MGMT	0x0010
+#define DOT11_NGBR_BI_CAP_QOS		0x0020
+#define DOT11_NGBR_BI_CAP_APSD		0x0040
+#define DOT11_NGBR_BI_CAP_RDIO_MSMT	0x0080
+#define DOT11_NGBR_BI_CAP_DEL_BA	0x0100
+#define DOT11_NGBR_BI_CAP_IMM_BA	0x0200
+#define DOT11_NGBR_BI_MOBILITY		0x0400
+#define DOT11_NGBR_BI_HT		0x0800
+
+/** Neighbor Report element (11k & 11v) */
+BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;		/* Operating class */
+	uint8 channel;
+	uint8 phytype;
+	uint8 data[1]; 		/* Variable size subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN	13
+
+
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE		0	/* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT		1	/* d11 independent */
+#define DOT11_BSSTYPE_ANY			2	/* d11 any BSS type */
+#define DOT11_SCANTYPE_ACTIVE			0	/* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE			1	/* d11 scan passive */
+
+/** Link Measurement */
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 txpwr;				/* Transmit Power Used */
+	uint8 maxtxpwr;				/* Max Transmit Power */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	dot11_tpc_rep_t tpc;			/* TPC element */
+	uint8 rxant;				/* Receive Antenna ID */
+	uint8 txant;				/* Transmit Antenna ID */
+	uint8 rcpi;				/* RCPI */
+	uint8 rsni;				/* RSNI */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN	11
+
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE		24	/* green field preamble time */
+#define PREN_MM_EXT		12	/* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT	4	/* extra preamble (multiply by unique_streams-1) */
+
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME		2	/* NPHY RIFS time */
+
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
+#define HT_SIG1_MCS_MASK        0x00007F
+#define HT_SIG1_CBW             0x000080
+#define HT_SIG1_HT_LENGTH       0xFFFF00
+
+/* HT-SIG2 */
+#define HT_SIG2_SMOOTHING       0x000001
+#define HT_SIG2_NOT_SOUNDING    0x000002
+#define HT_SIG2_RESERVED        0x000004
+#define HT_SIG2_AGGREGATION     0x000008
+#define HT_SIG2_STBC_MASK       0x000030
+#define HT_SIG2_STBC_SHIFT      4
+#define HT_SIG2_FEC_CODING      0x000040
+#define HT_SIG2_SHORT_GI        0x000080
+#define HT_SIG2_ESS_MASK        0x000300
+#define HT_SIG2_ESS_SHIFT       8
+#define HT_SIG2_CRC             0x03FC00
+#define HT_SIG2_TAIL            0x1C0000
+
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
+#define HT_T_LEG_PREAMBLE      16
+#define HT_T_L_SIG              4
+#define HT_T_SIG                8
+#define HT_T_LTF1               4
+#define HT_T_GF_LTF1            8
+#define HT_T_LTFs               4
+#define HT_T_STF                4
+#define HT_T_GF_STF             8
+#define HT_T_SYML               4
+
+#define HT_N_SERVICE           16       /* bits in SERVICE field */
+#define HT_N_TAIL               6       /* tail bits per BCC encoder */
+
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME          9       /* APHY slot time */
+#define APHY_SIFS_TIME          16      /* APHY SIFS time */
+#define APHY_DIFS_TIME          (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))  /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME      16      /* APHY preamble time */
+#define APHY_SIGNAL_TIME        4       /* APHY signal time */
+#define APHY_SYMBOL_TIME        4       /* APHY symbol time */
+#define APHY_SERVICE_NBITS      16      /* APHY service nbits */
+#define APHY_TAIL_NBITS         6       /* APHY tail nbits */
+#define APHY_CWMIN              15      /* APHY cwmin */
+
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME          20      /* BPHY slot time */
+#define BPHY_SIFS_TIME          10      /* BPHY SIFS time */
+#define BPHY_DIFS_TIME          50      /* BPHY DIFS time */
+#define BPHY_PLCP_TIME          192     /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME    96      /* BPHY PLCP short time */
+#define BPHY_CWMIN              31      /* BPHY cwmin */
+
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION	6	/* d11 OFDM signal extension */
+
+#define PHY_CWMAX		1023	/* PHY cwmax */
+
+#define	DOT11_MAXNUMFRAGS	16	/* max # fragments per MSDU */
+
+/* 802.11 VHT constants */
+
+typedef int vht_group_id_t;
+
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
+#define VHT_SIGA1_CONST_MASK            0x800004
+
+#define VHT_SIGA1_BW_MASK               0x000003
+#define VHT_SIGA1_20MHZ_VAL             0x000000
+#define VHT_SIGA1_40MHZ_VAL             0x000001
+#define VHT_SIGA1_80MHZ_VAL             0x000002
+#define VHT_SIGA1_160MHZ_VAL            0x000003
+
+#define VHT_SIGA1_STBC                  0x000008
+
+#define VHT_SIGA1_GID_MASK              0x0003f0
+#define VHT_SIGA1_GID_SHIFT             4
+#define VHT_SIGA1_GID_TO_AP             0x00
+#define VHT_SIGA1_GID_NOT_TO_AP         0x3f
+#define VHT_SIGA1_GID_MAX_GID           0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+#define VHT_SIGA1_NSTS_SHIFT            10
+
+#define VHT_SIGA1_PARTIAL_AID_MASK      0x3fe000
+#define VHT_SIGA1_PARTIAL_AID_SHIFT     13
+
+#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED   0x400000
+
+/* for VHT-A2 */
+#define VHT_SIGA2_GI_NONE               0x000000
+#define VHT_SIGA2_GI_SHORT              0x000001
+#define VHT_SIGA2_GI_W_MOD10            0x000002
+#define VHT_SIGA2_CODING_LDPC           0x000004
+#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM   0x000008
+#define VHT_SIGA2_BEAMFORM_ENABLE       0x000100
+#define VHT_SIGA2_MCS_SHIFT             4
+
+#define VHT_SIGA2_B9_RESERVED           0x000200
+#define VHT_SIGA2_TAIL_MASK             0xfc0000
+#define VHT_SIGA2_TAIL_VALUE            0x000000
+
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
+#define VHT_T_LEG_PREAMBLE      16
+#define VHT_T_L_SIG              4
+#define VHT_T_SIG_A              8
+#define VHT_T_LTF                4
+#define VHT_T_STF                4
+#define VHT_T_SIG_B              4
+#define VHT_T_SYML               4
+
+#define VHT_N_SERVICE           16	/* bits in SERVICE field */
+#define VHT_N_TAIL               6	/* tail bits per BCC encoder */
+
+
+/** dot11Counters Table - 802.11 spec., Annex D */
+typedef struct d11cnt {
+	uint32		txfrag;		/* dot11TransmittedFragmentCount */
+	uint32		txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32		txfail;		/* dot11FailedCount */
+	uint32		txretry;	/* dot11RetryCount */
+	uint32		txretrie;	/* dot11MultipleRetryCount */
+	uint32		rxdup;		/* dot11FrameduplicateCount */
+	uint32		txrts;		/* dot11RTSSuccessCount */
+	uint32		txnocts;	/* dot11RTSFailureCount */
+	uint32		txnoack;	/* dot11ACKFailureCount */
+	uint32		rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32		rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32		rxcrc;		/* dot11FCSErrorCount */
+	uint32		txfrmsnt;	/* dot11TransmittedFrameCount */
+	uint32		rxundec;	/* dot11WEPUndecryptableCount */
+} d11cnt_t;
+
+#define BRCM_PROP_OUI		"\x00\x90\x4C"
+
+
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT		0
+#define RWL_WIFI_FIND_MY_PEER		9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER		10 /* Server response to the client  */
+#define RWL_ACTION_WIFI_FRAG_TYPE	85 /* Fragment indicator for receiver */
+
+#define PROXD_AF_TYPE			11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE	        12 /* RMC action frame type */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/*
+ * This BRCM_PROP_OUI types is intended for use in events to embed additional
+ * data, and would not be expected to appear on the air -- but having an IE
+ * format allows IE frame data with extra data in events in that allows for
+ * more flexible parsing.
+ */
+#define BRCM_EVT_WL_BSS_INFO	64
+
+/**
+ * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
+ * DPT uses this format with type set to DPT_IE_TYPE
+ */
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+	uint8 id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8 len;		/* IE length */
+	uint8 oui[3];		/* Proprietary OUI, BRCM_PROP_OUI */
+	uint8 type;		/* type of this IE */
+	uint16 cap;		/* DPT capabilities */
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+
+#define BRCM_PROP_IE_LEN	6	/* len of fixed part of brcm_prop ie */
+
+#define DPT_IE_TYPE             2
+
+
+#define BRCM_SYSCAP_IE_TYPE	3
+#define WET_TUNNEL_IE_TYPE	3
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL	0x0100	/* Device with WET_TUNNEL support */
+
+#define BRCM_OUI		"\x00\x10\x18"	/* Broadcom OUI */
+
+/** BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	ver;		/* type/ver of this IE */
+	uint8	assoc;		/* # of assoc STAs */
+	uint8	flags;		/* misc flags */
+	uint8	flags1;		/* misc flags */
+	uint16	amsdu_mtu_pref;	/* preferred A-MSDU MTU */
+} BWL_POST_PACKED_STRUCT;
+typedef	struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN		11	/* BRCM IE length */
+#define BRCM_IE_VER		2	/* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER	1	/* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define	BRF_ABCAP		0x1	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_ABRQRD		0x2	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_LZWDS		0x4	/* lazy wds enabled */
+#define	BRF_BLOCKACK		0x8	/* BlockACK capable */
+#define BRF_ABCOUNTER_MASK	0xf0	/* afterburner is obsolete,  defined for backward compat */
+#define BRF_PROP_11N_MCS	0x10	/* re-use afterburner bit */
+
+/**
+ * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since afterburner is not
+ * used anymore. Checks for BRF_ABCAP to stay compliant with 'old' images in the field.
+ */
+#define GET_BRF_PROP_11N_MCS(brcm_ie) \
+	(!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
+
+/* brcm_ie flags1 */
+#define	BRF1_AMSDU		0x1	/* A-MSDU capable */
+#define BRF1_WMEPS		0x4	/* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX		0x8	/* AP has fixed PS mode out-of-order packets */
+#define	BRF1_RX_LARGE_AGG	0x10	/* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS	0x20    /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP		0x40    /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS		0x80    /* DWDS capable */
+
+/** Vendor IE structure */
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1]; 	/* Variable size data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN		2	/* id + len field */
+#define VNDR_IE_MIN_LEN		3	/* size of the oui field */
+#define VNDR_IE_FIXED_LEN	(VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
+
+#define VNDR_IE_MAX_LEN		255	/* vendor IE max length, without ID and len */
+
+/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
+BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
+	uchar id;
+	uchar len;
+	uchar oui[3];
+	uint8	type;           /* type indicates what follows */
+	struct ether_addr ea;   /* Device Primary MAC Adrress */
+} BWL_POST_PACKED_STRUCT;
+typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
+
+#define MEMBER_OF_BRCM_PROP_IE_LEN		10	/* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN	        (sizeof(member_of_brcm_prop_ie_t))
+#define MEMBER_OF_BRCM_PROP_IE_TYPE		54
+
+/** BRCM Reliable Multicast IE */
+BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
+	uint8 id;
+	uint8 len;
+	uint8 oui[3];
+	uint8 type;           /* type indicates what follows */
+	struct ether_addr ea;   /* The ack sender's MAC Adrress */
+	struct ether_addr mcast_ea;  /* The multicast MAC address */
+	uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
+} BWL_POST_PACKED_STRUCT;
+typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
+
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN	(sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+
+#define RELMCAST_BRCM_PROP_IE_TYPE	55
+
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN	16	/* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM	(128)	/* max mcs number = 128 */
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16	cap;
+	uint8	params;
+	uint8	supp_mcs[MCSSET_LEN];
+	uint16	ext_htcap;
+	uint32	txbf_cap;
+	uint8	as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie {
+	uint8	id;
+	uint8	len;
+	ht_cap_ie_t ht_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
+
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD	4	/* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN		26	/* HT capability len (based on .11n d2.0) */
+#define HT_CAP_IE_TYPE		51
+
+#define HT_CAP_LDPC_CODING	0x0001	/* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK	0x000C  /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF	0x0003	/* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS	0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON	0x0000	/* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF		0x0010	/* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20	0x0020	/* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40	0x0040	/* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK	0x0300	/* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define HT_CAP_DELAYED_BA	0x0400	/* delayed BA support */
+#define HT_CAP_MAX_AMSDU	0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK	0x1000	/* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP		0x2000	/* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000	/* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP	0x8000	/* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+
+#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX	0x1
+#define HT_CAP_TXBF_CAP_NDP_RX			0x8
+#define HT_CAP_TXBF_CAP_NDP_TX			0x10
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI		0x100
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING	0x200
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING	0x400
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK	0x1800
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT	11
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK	0x6000
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT	13
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK	0x18000
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT	15
+#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT	19
+#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT	21
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT		23
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK		0x1800000
+
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT	27
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK		0x18000000
+
+#define HT_CAP_TXBF_FB_TYPE_NONE 	0
+#define HT_CAP_TXBF_FB_TYPE_DELAYED 	1
+#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 	2
+#define HT_CAP_TXBF_FB_TYPE_BOTH 	3
+
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK	0x400
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT	10
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
+
+#define VHT_MAX_MPDU		11454	/* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA	56		/* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
+#define VHT_MAX_AMSDU		(VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU		7935	/* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU		3835	/* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK	0x03	/* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK		0x1C	/* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT	2	/* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY  7       /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE      0       /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1       /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2       /* 1/2 us density */
+#define AMPDU_DENSITY_1_US      3       /*   1 us density */
+#define AMPDU_DENSITY_2_US      4       /*   2 us density */
+#define AMPDU_DENSITY_4_US      5       /*   4 us density */
+#define AMPDU_DENSITY_8_US      6       /*   8 us density */
+#define AMPDU_DENSITY_16_US     7       /*  16 us density */
+#define AMPDU_RX_FACTOR_8K      0       /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K     1       /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K     2       /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K     3       /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K    4       /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K    5       /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K    6       /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K   7       /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE    8*1024  /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR	13	/* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN	4	/* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX	63	/* max length of ampdu delimiter(enforced in HW) */
+
+#define HT_CAP_EXT_PCO			0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK	0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT	1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK	0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT	8
+#define HT_CAP_EXT_HTC			0x0400
+#define HT_CAP_EXT_RD_RESP		0x0800
+
+/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8	ctl_ch;			/* control channel number */
+	uint8	byte1;			/* ext ch,rec. ch. width, RIFS support */
+	uint16	opmode;			/* operation mode */
+	uint16	misc_bits;		/* misc bits */
+	uint8	basic_mcs[MCSSET_LEN];  /* required MCS set */
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;		/* indicates what follows */
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN	22
+#define HT_ADD_IE_TYPE	52
+
+/* byte1 defn's */
+#define HT_BW_ANY		0x04	/* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED     	0x08	/* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK	        0x0003	/* protection mode mask */
+#define HT_OPMODE_SHIFT		0	/* protection mode shift */
+#define HT_OPMODE_PURE		0x0000	/* protection mode PURE */
+#define HT_OPMODE_OPTIONAL	0x0001	/* protection mode optional */
+#define HT_OPMODE_HT20IN40	0x0002	/* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED	0x0003	/* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF	0x0004	/* protection mode non-GF */
+#define DOT11N_TXBURST		0x0008	/* Tx burst limit */
+#define DOT11N_OBSS_NONHT	0x0010	/* OBSS Non-HT STA present */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS	0x007f	/* basic STBC MCS */
+#define HT_DUAL_STBC_PROT	0x0080	/* Dual STBC Protection */
+#define HT_SECOND_BCN		0x0100	/* Secondary beacon support */
+#define HT_LSIG_TXOP		0x0200	/* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE		0x0400	/* PCO active */
+#define HT_PCO_PHASE		0x0800	/* PCO phase */
+#define HT_DUALCTS_PROTECTION	0x0080	/* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT	6160	/* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT	3080	/* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
+#define GET_HT_OPMODE(add_ie)		((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED)	/* mixed mode present */
+#define HT_HT20_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)	/* 20MHz HT present */
+#define HT_OPTIONAL_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)	/* Optional protection present */
+#define HT_USE_PROTECTION(add_ie)	(HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
+#define HT_NONGF_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF)	/* non-GF present */
+#define DOT11N_TXBURST_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)	/* Tx Burst present */
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)	/* OBSS Non-HT present */
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16	passive_dwell;
+	uint16	active_dwell;
+	uint16	bss_widthscan_interval;
+	uint16	passive_total;
+	uint16	active_total;
+	uint16	chanwidth_transition_dly;
+	uint16	activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8	id;
+	uint8	len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN	sizeof(obss_params_t)	/* HT OBSS len (based on 802.11n d3.0) */
+
+/* HT control field */
+#define HT_CTRL_LA_TRQ		0x00000002	/* sounding request */
+#define HT_CTRL_LA_MAI		0x0000003C	/* MCS request or antenna selection indication */
+#define HT_CTRL_LA_MAI_SHIFT	2
+#define HT_CTRL_LA_MAI_MRQ	0x00000004	/* MCS request */
+#define HT_CTRL_LA_MAI_MSI	0x00000038	/* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI		0x000001C0	/* MFB sequence identifier */
+#define HT_CTRL_LA_MFSI_SHIFT	6
+#define HT_CTRL_LA_MFB_ASELC	0x0000FE00	/* MCS feedback, antenna selection command/data */
+#define HT_CTRL_LA_MFB_ASELC_SH	9
+#define HT_CTRL_LA_ASELC_CMD	0x00000C00	/* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA	0x0000F000	/* ASEL data */
+#define HT_CTRL_CAL_POS		0x00030000	/* Calibration position */
+#define HT_CTRL_CAL_SEQ		0x000C0000	/* Calibration sequence */
+#define HT_CTRL_CSI_STEERING	0x00C00000	/* CSI/Steering */
+#define HT_CTRL_CSI_STEER_SHIFT	22
+#define HT_CTRL_CSI_STEER_NFB	0		/* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI	1		/* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM	2		/* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM	3		/* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE	0x01000000	/* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT	0x40000000	/* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU	0x80000000	/* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/**
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+	uint32  vht_cap_info;
+	/* supported MCS set - 64 bit field */
+	uint16	rx_mcs_map;
+	uint16  rx_max_rate;
+	uint16  tx_mcs_map;
+	uint16	tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+/* 4B cap_info + 8B supp_mcs */
+#define VHT_CAP_IE_LEN 12
+
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK          0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK       0x0000000c
+#define VHT_CAP_INFO_LDPC                       0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ                  0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ                 0x00000040
+#define VHT_CAP_INFO_TX_STBC                    0x00000080
+#define VHT_CAP_INFO_RX_STBC_MASK               0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT              8
+#define VHT_CAP_INFO_SU_BEAMFMR                 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE                0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK         0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT        13
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK      0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT     16
+#define VHT_CAP_INFO_MU_BEAMFMR                 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE                0x00100000
+#define VHT_CAP_INFO_TXOPPS                     0x00200000
+#define VHT_CAP_INFO_HTCVHT                     0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK      0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT     23
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK        0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT       26
+
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_MCS_MAP_0_7                     0
+#define VHT_CAP_MCS_MAP_0_8                     1
+#define VHT_CAP_MCS_MAP_0_9                     2
+#define VHT_CAP_MCS_MAP_NONE                    3
+#define VHT_CAP_MCS_MAP_S                       2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M                       0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
+#define VHT_CAP_MCS_MAP_NONE_ALL                0xffff
+/* mcsmap with MCS0-9 for Nss = 3 */
+#define VHT_CAP_MCS_MAP_0_9_NSS3 \
+	        ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+
+#define VHT_CAP_MCS_MAP_NSS_MAX                 8
+
+/* get mcsmap with given mcs for given nss streams */
+#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
+	do { \
+		int i; \
+		for (i = 1; i <= nss; i++) { \
+			VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \
+		} \
+	} while (0)
+
+/* Map the mcs code to mcs bit map */
+#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
+	((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0)
+
+/* Map the mcs bit map to mcs code */
+#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
+	((mcs_map == 0xff)  ? VHT_CAP_MCS_MAP_0_7 : \
+	 (mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \
+	 (mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+/** VHT Capabilities Supported Channel Width */
+typedef enum vht_cap_chan_width {
+	VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160       = 0x04,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080  = 0x08
+} vht_cap_chan_width_t;
+
+/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
+typedef enum vht_cap_max_mpdu_len {
+	VHT_CAP_MPDU_MAX_4K     = 0x00,
+	VHT_CAP_MPDU_MAX_8K     = 0x01,
+	VHT_CAP_MPDU_MAX_11K    = 0x02
+} vht_cap_max_mpdu_len_t;
+
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
+#define VHT_MPDU_LIMIT_4K        3895
+#define VHT_MPDU_LIMIT_8K        7991
+#define VHT_MPDU_LIMIT_11K      11454
+
+
+/**
+ * VHT Operation IE (sec 8.4.2.161)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+	uint8	chan_width;
+	uint8	chan1;
+	uint8	chan2;
+	uint16	supp_mcs;  /*  same def as above in vht cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+/* 3B VHT Op info + 2B Basic MCS */
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+	VHT_OP_CHAN_WIDTH_20_40	= 0,
+	VHT_OP_CHAN_WIDTH_80	= 1,
+	VHT_OP_CHAN_WIDTH_160	= 2,
+	VHT_OP_CHAN_WIDTH_80_80	= 3
+} vht_op_chan_width_t;
+
+/* AID length */
+#define AID_IE_LEN		2
+/**
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates  bitmap.
+ * Bit0:		5G MCS 0-9 BW 160MHz
+ * Bit1:		5G MCS 0-9 support BW 80MHz
+ * Bit2:		5G MCS 0-9 support BW 20MHz
+ * Bit3:		2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7	Reserved for future use
+ *
+ */
+#define VHT_FEATURES_IE_TYPE	0x4
+BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
+	uint8 oui[3];
+	uint8 type;		/* type of this IE = 4 */
+	uint8 rate_mask;	/* VHT rate mask */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
+
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
+#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
+	(((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \
+	do { \
+	 (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \
+	 (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \
+	} while (0)
+#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \
+		 (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
+
+
+/* ************* WPA definitions. ************* */
+#define WPA_OUI			"\x00\x50\xF2"	/* WPA OUI */
+#define WPA_OUI_LEN		3		/* WPA OUI length */
+#define WPA_OUI_TYPE		1
+#define WPA_VERSION		1		/* WPA version */
+#define WPA2_OUI		"\x00\x0F\xAC"	/* WPA2 OUI */
+#define WPA2_OUI_LEN		3		/* WPA2 OUI length */
+#define WPA2_VERSION		1		/* WPA2 version */
+#define WPA2_VERSION_LEN	2		/* WAP2 version length */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI			"\x00\x50\xF2"	/* WPS OUI */
+#define WPS_OUI_LEN		3		/* WPS OUI length */
+#define WPS_OUI_TYPE		4
+
+/* ************* WFA definitions. ************* */
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI			MAC_OUI
+#else
+#define WFA_OUI			"\x50\x6F\x9A"	/* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN		3		/* WFA OUI length */
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P	MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_TPC	8
+#define WFA_OUI_TYPE_P2P	9
+#endif
+
+#define WFA_OUI_TYPE_TPC	8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_TPQ	4	/* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS	5	/* WFD Tunneled Probe ReSponse */
+#define WFA_OUI_TYPE_WFD	10
+#endif /* WTDLS */
+#define WFA_OUI_TYPE_HS20	0x10
+#define WFA_OUI_TYPE_OSEN	0x12
+#define WFA_OUI_TYPE_NAN	0x13
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE		0	/* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED	1	/* Over 802.1x */
+#define RSN_AKM_PSK		2	/* Pre-shared Key */
+#define RSN_AKM_FBT_1X		3	/* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK		4	/* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_MFP_1X		5	/* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK		6	/* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK			7	/* TPK(TDLS Peer Key) handshake */
+
+/* OSEN authenticated key managment suite */
+#define OSEN_AKM_UNSPECIFIED	RSN_AKM_UNSPECIFIED	/* Over 802.1x */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS	4	/* number of default keys */
+#define DOT11_MAX_IGTK_KEYS		2
+#define DOT11_MAX_KEY_SIZE	32	/* max size of any key */
+#define DOT11_MAX_IV_SIZE	16	/* max size of any IV */
+#define DOT11_EXT_IV_FLAG	(1<<5)	/* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN   8       /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE		5	/* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE	10	/* size of WEP key in hex. */
+#define WEP128_KEY_SIZE		13	/* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE	26	/* size of WEP key in hex. */
+#define TKIP_MIC_SIZE		8	/* size of TKIP MIC */
+#define TKIP_EOM_SIZE		7	/* max size of TKIP EOM */
+#define TKIP_EOM_FLAG		0x5a	/* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE		32	/* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE		16
+#define TKIP_MIC_KEY_SIZE	8
+#define TKIP_MIC_AUTH_TX	16	/* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX	24	/* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX		TKIP_MIC_AUTH_TX	/* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX		TKIP_MIC_AUTH_RX	/* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE		16	/* size of AES key */
+#define AES_MIC_SIZE		8	/* size of AES MIC */
+#define BIP_KEY_SIZE		16	/* size of BIP key */
+#define BIP_MIC_SIZE		8   /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE	16	/* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE		32	/* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE		16	/* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI			"\x00\x50\xf2"	/* WCN OUI */
+#define WCN_TYPE		4	/* WCN type */
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN		16
+#define SMS4_WPI_CBC_MAC_LEN	16
+#endif
+
+/* 802.11r protocol definitions */
+
+/** Mobility Domain IE */
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mdid;		/* Mobility Domain Id */
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS	0x01	/* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP	0x02	/* Resource request protocol support */
+
+/** Fast Bss Transition IE */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mic_control;		/* Mic Control */
+	uint8 mic[16];
+	uint8 anonce[32];
+	uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED		0
+#define TIE_TYPE_REASSOC_DEADLINE	1
+#define TIE_TYPE_KEY_LIEFTIME		2
+#define TIE_TYPE_ASSOC_COMEBACK		3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+	uint8 id;
+	uint8 len;
+	uint8 type;		/* timeout interval type */
+	uint32 value;		/* timeout interval value */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+/** GTK ie */
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+	uint8 id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_len;
+	uint8 rsc[8];
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+/** Management MIC ie */
+BWL_PRE_PACKED_STRUCT struct mmic_ie {
+	uint8   id;					/* IE ID: DOT11_MNG_MMIE_ID */
+	uint8   len;				/* IE length */
+	uint16  key_id;				/* key id */
+	uint8   ipn[6];				/* ipn */
+	uint8   mic[16];			/* mic */
+} BWL_POST_PACKED_STRUCT;
+typedef struct mmic_ie mmic_ie_t;
+
+#define BSSID_INVALID           "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST         "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 	20	/* WAPI IE min length */
+#define WAPI_VERSION		1	/* WAPI version */
+#define WAPI_VERSION_LEN	2	/* WAPI version length */
+#define WAPI_OUI		"\x00\x14\x72"	/* WAPI OUI */
+#define WAPI_OUI_LEN		DOT11_OUI_LEN	/* WAPI OUI length */
+#endif /* BCMWAPI_WAI */
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI			"\x00\x50\xF2"	/* WNN OUI */
+#define WMM_OUI_LEN		3		/* WMM OUI length */
+#define WMM_OUI_TYPE	2		/* WMM OUT type */
+#define WMM_VERSION		1
+#define WMM_VERSION_LEN	1
+
+/* WMM OUI subtype */
+#define WMM_OUI_SUBTYPE_PARAMETER	1
+#define WMM_PARAMETER_IE_LEN		24
+
+/** Link Identifier Element */
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr	bssid;
+	struct ether_addr	tdls_init_mac;
+	struct ether_addr	tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN		18
+
+/** Link Wakeup Schedule Element */
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+	uint8 id;
+	uint8 len;
+	uint32 offset;			/* in ms between TSF0 and start of 1st Awake Window */
+	uint32 interval;		/* in ms bwtween the start of 2 Awake Windows */
+	uint32 awake_win_slots;	/* in backof slots, duration of Awake Window */
+	uint32 max_wake_win;	/* in ms, max duration of Awake Window */
+	uint16 idle_cnt;		/* number of consecutive Awake Windows */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN		18
+
+/** Channel Switch Timing Element */
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+	uint8 id;
+	uint8 len;
+	uint16 switch_time;		/* in ms, time to switch channels */
+	uint16 switch_timeout;	/* in ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN		4
+
+/** PTI Control Element */
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+	uint8 id;
+	uint8 len;
+	uint8 tid;
+	uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN		3
+
+/** PU Buffer Status Element */
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+	uint8 id;
+	uint8 len;
+	uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN	1
+#define TDLS_PU_BUFFER_STATUS_AC_BK		1
+#define TDLS_PU_BUFFER_STATUS_AC_BE		2
+#define TDLS_PU_BUFFER_STATUS_AC_VI		4
+#define TDLS_PU_BUFFER_STATUS_AC_VO		8
+
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ				0
+#define TDLS_SETUP_RESP				1
+#define TDLS_SETUP_CONFIRM			2
+#define TDLS_TEARDOWN				3
+#define TDLS_PEER_TRAFFIC_IND			4
+#define TDLS_CHANNEL_SWITCH_REQ			5
+#define TDLS_CHANNEL_SWITCH_RESP		6
+#define TDLS_PEER_PSM_REQ			7
+#define TDLS_PEER_PSM_RESP			8
+#define TDLS_PEER_TRAFFIC_RESP			9
+#define TDLS_DISCOVERY_REQ			10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP			14
+
+/* 802.11u GAS action frames */
+#define GAS_REQUEST_ACTION_FRAME				10
+#define GAS_RESPONSE_ACTION_FRAME				11
+#define GAS_COMEBACK_REQUEST_ACTION_FRAME		12
+#define GAS_COMEBACK_RESPONSE_ACTION_FRAME		13
+
+/* 802.11u interworking access network options */
+#define IW_ANT_MASK				0x0f
+#define IW_INTERNET_MASK		0x10
+#define IW_ASRA_MASK			0x20
+#define IW_ESR_MASK				0x40
+#define IW_UESA_MASK			0x80
+
+/* 802.11u interworking access network type */
+#define IW_ANT_PRIVATE_NETWORK					0
+#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST		1
+#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK		2
+#define IW_ANT_FREE_PUBLIC_NETWORK				3
+#define IW_ANT_PERSONAL_DEVICE_NETWORK			4
+#define IW_ANT_EMERGENCY_SERVICES_NETWORK		5
+#define IW_ANT_TEST_NETWORK						14
+#define IW_ANT_WILDCARD_NETWORK					15
+
+/* 802.11u advertisement protocol */
+#define ADVP_ANQP_PROTOCOL_ID	0
+
+/* 802.11u advertisement protocol masks */
+#define ADVP_QRL_MASK					0x7f
+#define ADVP_PAME_BI_MASK				0x80
+
+/* 802.11u advertisement protocol values */
+#define ADVP_QRL_REQUEST				0x00
+#define ADVP_QRL_RESPONSE				0x7f
+#define ADVP_PAME_BI_DEPENDENT			0x00
+#define ADVP_PAME_BI_INDEPENDENT		ADVP_PAME_BI_MASK
+
+/* 802.11u ANQP information ID */
+#define ANQP_ID_QUERY_LIST							256
+#define ANQP_ID_CAPABILITY_LIST						257
+#define ANQP_ID_VENUE_NAME_INFO						258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO			259
+#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO	260
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST				261
+#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO	262
+#define ANQP_ID_NAI_REALM_LIST						263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO			264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION				265
+#define ANQP_ID_AP_CIVIC_LOCATION					266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI			267
+#define ANQP_ID_DOMAIN_NAME_LIST					268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI				269
+#define ANQP_ID_EMERGENCY_NAI						271
+#define ANQP_ID_VENDOR_SPECIFIC_LIST				56797
+
+/* 802.11u ANQP OUI */
+#define ANQP_OUI_SUBTYPE	9
+
+/* 802.11u venue name */
+#define VENUE_LANGUAGE_CODE_SIZE		3
+#define VENUE_NAME_SIZE					255
+
+/* 802.11u venue groups */
+#define VENUE_UNSPECIFIED				0
+#define VENUE_ASSEMBLY					1
+#define VENUE_BUSINESS					2
+#define VENUE_EDUCATIONAL				3
+#define VENUE_FACTORY					4
+#define VENUE_INSTITUTIONAL				5
+#define VENUE_MERCANTILE				6
+#define VENUE_RESIDENTIAL				7
+#define VENUE_STORAGE					8
+#define VENUE_UTILITY					9
+#define VENUE_VEHICULAR					10
+#define VENUE_OUTDOOR					11
+
+/* 802.11u network authentication type indicator */
+#define NATI_UNSPECIFIED							-1
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS			0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED			1
+#define NATI_HTTP_HTTPS_REDIRECTION					2
+#define NATI_DNS_REDIRECTION						3
+
+/* 802.11u IP address type availability - IPv6 */
+#define IPA_IPV6_SHIFT						0
+#define IPA_IPV6_MASK						(0x03 << IPA_IPV6_SHIFT)
+#define	IPA_IPV6_NOT_AVAILABLE				0x00
+#define IPA_IPV6_AVAILABLE					0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY		0x02
+
+/* 802.11u IP address type availability - IPv4 */
+#define IPA_IPV4_SHIFT						2
+#define IPA_IPV4_MASK						(0x3f << IPA_IPV4_SHIFT)
+#define	IPA_IPV4_NOT_AVAILABLE				0x00
+#define IPA_IPV4_PUBLIC						0x01
+#define IPA_IPV4_PORT_RESTRICT				0x02
+#define IPA_IPV4_SINGLE_NAT					0x03
+#define IPA_IPV4_DOUBLE_NAT					0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT	0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT	0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY		0x07
+
+/* 802.11u NAI realm encoding */
+#define REALM_ENCODING_RFC4282	0
+#define REALM_ENCODING_UTF8		1
+
+/* 802.11u IANA EAP method type numbers */
+#define REALM_EAP_TLS					13
+#define REALM_EAP_LEAP					17
+#define REALM_EAP_SIM					18
+#define REALM_EAP_TTLS					21
+#define REALM_EAP_AKA					23
+#define REALM_EAP_PEAP					25
+#define REALM_EAP_FAST					43
+#define REALM_EAP_PSK					47
+#define REALM_EAP_AKAP					50
+#define REALM_EAP_EXPANDED				254
+
+/* 802.11u authentication ID */
+#define REALM_EXPANDED_EAP						1
+#define REALM_NON_EAP_INNER_AUTHENTICATION		2
+#define REALM_INNER_AUTHENTICATION_EAP			3
+#define REALM_EXPANDED_INNER_EAP				4
+#define REALM_CREDENTIAL						5
+#define REALM_TUNNELED_EAP_CREDENTIAL			6
+#define REALM_VENDOR_SPECIFIC_EAP				221
+
+/* 802.11u non-EAP inner authentication type */
+#define REALM_RESERVED_AUTH			0
+#define REALM_PAP					1
+#define REALM_CHAP					2
+#define REALM_MSCHAP				3
+#define REALM_MSCHAPV2				4
+
+/* 802.11u credential type */
+#define REALM_SIM					1
+#define REALM_USIM					2
+#define REALM_NFC					3
+#define REALM_HARDWARE_TOKEN		4
+#define REALM_SOFTOKEN				5
+#define REALM_CERTIFICATE			6
+#define REALM_USERNAME_PASSWORD		7
+#define REALM_SERVER_SIDE			8
+#define REALM_RESERVED_CRED			9
+#define REALM_VENDOR_SPECIFIC_CRED	10
+
+/* 802.11u 3GPP PLMN */
+#define G3PP_GUD_VERSION		0
+#define G3PP_PLMN_LIST_IE		0
+
+/** hotspot2.0 indication element (vendor specific) */
+BWL_PRE_PACKED_STRUCT struct hs20_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 config;
+} BWL_POST_PACKED_STRUCT;
+typedef struct hs20_ie hs20_ie_t;
+#define HS20_IE_LEN 5	/* HS20 IE length */
+
+/** IEEE 802.11 Annex E */
+typedef enum {
+	DOT11_2GHZ_20MHZ_CLASS_12		= 81,	/* Ch 1-11			 */
+	DOT11_5GHZ_20MHZ_CLASS_1		= 115,	/* Ch 36-48			 */
+	DOT11_5GHZ_20MHZ_CLASS_2_DFS	= 118,	/* Ch 52-64			 */
+	DOT11_5GHZ_20MHZ_CLASS_3		= 124,	/* Ch 149-161		 */
+	DOT11_5GHZ_20MHZ_CLASS_4_DFS	= 121,	/* Ch 100-140		 */
+	DOT11_5GHZ_20MHZ_CLASS_5		= 125,	/* Ch 149-165		 */
+	DOT11_5GHZ_40MHZ_CLASS_22		= 116,	/* Ch 36-44,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_23_DFS 	= 119,	/* Ch 52-60,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_24_DFS	= 122,	/* Ch 100-132, lower */
+	DOT11_5GHZ_40MHZ_CLASS_25		= 126,	/* Ch 149-157, lower */
+	DOT11_5GHZ_40MHZ_CLASS_27		= 117,	/* Ch 40-48,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_28_DFS	= 120,	/* Ch 56-64,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_29_DFS	= 123,	/* Ch 104-136, upper */
+	DOT11_5GHZ_40MHZ_CLASS_30		= 127,	/* Ch 153-161, upper */
+	DOT11_2GHZ_40MHZ_CLASS_32		= 83,	/* Ch 1-7,     lower */
+	DOT11_2GHZ_40MHZ_CLASS_33		= 84,	/* Ch 5-11,    upper */
+} dot11_op_class_t;
+
+/* QoS map */
+#define QOS_MAP_FIXED_LENGTH	(8 * 2)	/* DSCP ranges fixed with 8 entries */
+
+/* BCM proprietary IE type for AIBSS */
+#define BCM_AIBSS_IE_TYPE 56
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 0000000000000000000000000000000000000000..1a53542d90188eb6cb325b43153243e76c9fd273
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,27 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: 802.11_bta.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT		"\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP				1
+#define	BTA_PROT_ACTIVITY_REPORT		2
+#define BTA_PROT_SECURITY			3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST	4
+#define BTA_PROT_LINK_SUPERVISION_REPLY		5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS			1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS		2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS		3
+#define BTA_TYPE_ID_CAPABILITIES		4
+#define BTA_TYPE_ID_VERSION			5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 0000000000000000000000000000000000000000..914d2781a4e36b0ec0c521fc28a6fefe64d7fd2d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,120 @@
+/*
+ * 802.11e protocol header file
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: 802.11e.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		1000	/* default ADDTS response timeout in ms */
+						/* DEFVAL dot11ADDTSResponseTimeout = 1s */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+#ifdef BCMCCX
+#define CCX_STATUS_ASSOC_DENIED_UNKNOWN    0xc8	/* unspecified QoS related failure */
+#define CCX_STATUS_ASSOC_DENIED_AP_POLICY  0xc9	/* TSPEC refused due to AP policy */
+#define CCX_STATUS_ASSOC_DENIED_NO_BW	   0xca	/* Assoc denied due to AP insufficient BW */
+#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM  0xcb	/* one or more TSPEC with invalid parameter */
+#endif	/* BCMCCX */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 0000000000000000000000000000000000000000..e7eceb093bb3fdab5def80a1ba774a89e6c1f7d0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,32 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+/* 802.1D priority defines */
+#define	PRIO_8021D_NONE		2	/* None = - */
+#define	PRIO_8021D_BK		1	/* BK - Background */
+#define	PRIO_8021D_BE		0	/* BE - Best-effort */
+#define	PRIO_8021D_EE		3	/* EE - Excellent-effort */
+#define	PRIO_8021D_CL		4	/* CL - Controlled Load */
+#define	PRIO_8021D_VI		5	/* Vi - Video */
+#define	PRIO_8021D_VO		6	/* Vo - Voice */
+#define	PRIO_8021D_NC		7	/* NC - Network Control */
+#define	MAXPRIO			7	/* 0-7 */
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	/* All prioirty */
+
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif /* _802_1_D__ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/bcmdhd/include/proto/802.3.h
new file mode 100644
index 0000000000000000000000000000000000000000..9901a23be127ef5b39954432e5b7a3de83341861
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.3.h
@@ -0,0 +1,34 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental constants relating to 802.3
+ *
+ * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $
+ */
+
+#ifndef _802_3_h_
+#define _802_3_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define SNAP_HDR_LEN	6	/* 802.3 SNAP header length */
+#define DOT3_OUI_LEN	3	/* 802.3 oui length */
+
+BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT3_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _802_3_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h
new file mode 100644
index 0000000000000000000000000000000000000000..5a7695e4987cb259d4ceda746f49bc4a90ddee96
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * $Id: bcmdhcp.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _bcmdhcp_h_
+#define _bcmdhcp_h_
+
+/* DHCP params */
+#define DHCP_TYPE_OFFSET	0	/* DHCP type (request|reply) offset */
+#define DHCP_TID_OFFSET		4	/* DHCP transition id offset */
+#define DHCP_FLAGS_OFFSET	10	/* DHCP flags offset */
+#define DHCP_CIADDR_OFFSET	12	/* DHCP client IP address offset */
+#define DHCP_YIADDR_OFFSET	16	/* DHCP your IP address offset */
+#define DHCP_GIADDR_OFFSET	24	/* DHCP relay agent IP address offset */
+#define DHCP_CHADDR_OFFSET	28	/* DHCP client h/w address offset */
+#define DHCP_OPT_OFFSET		236	/* DHCP options offset */
+
+#define DHCP_OPT_MSGTYPE	53	/* DHCP message type */
+#define DHCP_OPT_MSGTYPE_REQ	3
+#define DHCP_OPT_MSGTYPE_ACK	5	/* DHCP message type - ACK */
+
+#define DHCP_OPT_CODE_OFFSET	0	/* Option identifier */
+#define DHCP_OPT_LEN_OFFSET	1	/* Option data length */
+#define DHCP_OPT_DATA_OFFSET	2	/* Option data */
+
+#define DHCP_OPT_CODE_CLIENTID	61	/* Option identifier */
+
+#define DHCP_TYPE_REQUEST	1	/* DHCP request (discover|request) */
+#define DHCP_TYPE_REPLY		2	/* DHCP reply (offset|ack) */
+
+#define DHCP_PORT_SERVER	67	/* DHCP server UDP port */
+#define DHCP_PORT_CLIENT	68	/* DHCP client UDP port */
+
+#define DHCP_FLAG_BCAST	0x8000	/* DHCP broadcast flag */
+
+#define DHCP_FLAGS_LEN	2	/* DHCP flags field length */
+
+#define DHCP6_TYPE_SOLICIT	1	/* DHCP6 solicit */
+#define DHCP6_TYPE_ADVERTISE	2	/* DHCP6 advertise */
+#define DHCP6_TYPE_REQUEST	3	/* DHCP6 request */
+#define DHCP6_TYPE_CONFIRM	4	/* DHCP6 confirm */
+#define DHCP6_TYPE_RENEW	5	/* DHCP6 renew */
+#define DHCP6_TYPE_REBIND	6	/* DHCP6 rebind */
+#define DHCP6_TYPE_REPLY	7	/* DHCP6 reply */
+#define DHCP6_TYPE_RELEASE	8	/* DHCP6 release */
+#define DHCP6_TYPE_DECLINE	9	/* DHCP6 decline */
+#define DHCP6_TYPE_RECONFIGURE	10	/* DHCP6 reconfigure */
+#define DHCP6_TYPE_INFOREQ	11	/* DHCP6 information request */
+#define DHCP6_TYPE_RELAYFWD	12	/* DHCP6 relay forward */
+#define DHCP6_TYPE_RELAYREPLY	13	/* DHCP6 relay reply */
+
+#define DHCP6_TYPE_OFFSET	0	/* DHCP6 type offset */
+
+#define	DHCP6_MSG_OPT_OFFSET	4	/* Offset of options in client server messages */
+#define	DHCP6_RELAY_OPT_OFFSET	34	/* Offset of options in relay messages */
+
+#define	DHCP6_OPT_CODE_OFFSET	0	/* Option identifier */
+#define	DHCP6_OPT_LEN_OFFSET	2	/* Option data length */
+#define	DHCP6_OPT_DATA_OFFSET	4	/* Option data */
+
+#define	DHCP6_OPT_CODE_CLIENTID	1	/* DHCP6 CLIENTID option */
+#define	DHCP6_OPT_CODE_SERVERID	2	/* DHCP6 SERVERID option */
+
+#define DHCP6_PORT_SERVER	547	/* DHCP6 server UDP port */
+#define DHCP6_PORT_CLIENT	546	/* DHCP6 client UDP port */
+
+#endif	/* #ifndef _bcmdhcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 0000000000000000000000000000000000000000..165efabe90912245e7d79b973934065534bf8135
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,94 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmeth.h 445746 2013-12-30 12:57:26Z $
+ */
+
+/*
+ * Broadcom Ethernet protocol defines
+ */
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ *     8 bit subtype (0-127)
+ *     8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ *     16 bit big-endian subtype
+ *     16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
+
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL		3 */
+#define BCMILCP_BCM_SUBTYPE_DPT                 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+
+/* These fields are stored in network order */
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	/* Vendor specific..32769 */
+	uint16	length;
+	uint8	version;	/* Version is 0 */
+	uint8	oui[3];		/* Broadcom OUI */
+	/* user specific Data */
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/*  _BCMETH_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 0000000000000000000000000000000000000000..c1d19fd9ae7aa9e72c192888c41189000ef116af
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,503 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h 490387 2014-07-10 15:12:52Z $
+ *
+ */
+
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
+#include <proto/bcmeth.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		2	/* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX		16	/* max length of interface name */
+
+/* flags */
+#define WLC_EVENT_MSG_LINK		0x01	/* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	/* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP		0x04	/* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS		0x08	/* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF		0x10	/* unknown source OS i/f */
+
+/* these fields are stored in network order */
+
+/* version 1 */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+/* the current version */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+	uint8	ifidx;			/* destination OS i/f index */
+	uint8	bsscfgidx;		/* source bsscfg index */
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+/* used by driver msgs */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	/* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+/* Event messages */
+#define WLC_E_SET_SSID		0	/* indicates status of set SSID */
+#define WLC_E_JOIN		1	/* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START		2	/* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH		3	/* 802.11 AUTH request */
+#define WLC_E_AUTH_IND		4	/* 802.11 AUTH indication */
+#define WLC_E_DEAUTH		5	/* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND	6	/* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC		7	/* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND		8	/* 802.11 ASSOC indication */
+#define WLC_E_REASSOC		9	/* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND	10	/* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC		11	/* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND	12	/* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START	13	/* 802.11h Quiet period started */
+#define WLC_E_QUIET_END		14	/* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX		15	/* BEACONS received/lost indication */
+#define WLC_E_LINK		16	/* generic link indication */
+#define WLC_E_MIC_ERROR		17	/* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK		18	/* NDIS style link indication */
+#define WLC_E_ROAM		19	/* roam attempt occurred: indicate status & reason */
+#define WLC_E_TXFAIL		20	/* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE	21	/* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF	22	/* current AP's TSF value went backward */
+#define WLC_E_PRUNE		23	/* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH		24	/* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG		25	/* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE	26	/* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND		27	/* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND		28	/* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND	29	/* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG		30	/* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG	31	/* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP		32	/* before attempting to roam */
+#define WLC_E_PFN_NET_FOUND	33	/* PFN network found event */
+#define WLC_E_PFN_NET_LOST	34	/* PFN network lost event */
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	/* PSM microcode watchdog fired */
+#if defined(BCMCCX) && defined(CCX_SDK)
+#define WLC_E_CCX_ASSOC_START	42	/* CCX association start */
+#define WLC_E_CCX_ASSOC_ABORT	43	/* CCX association abort */
+#endif /* BCMCCX && CCX_SDK */
+#define WLC_E_PROBREQ_MSG       44      /* probe request received */
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP		46	/* WPA Handshake fail */
+#define WLC_E_COUNTRY_CODE_CHANGED	47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME	48	/* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR		49	/* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR	50	/* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR	51	/* Unsupported multicast encrypted frame */
+#define WLC_E_TRACE		52
+#ifdef WLBTAMP
+#define WLC_E_BTA_HCI_EVENT	53	/* BT-AMP HCI event */
+#endif
+#define WLC_E_IF		54	/* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE	55	/* listen state expires */
+#define WLC_E_RSSI		56	/* indicate RSSI change based on configured levels */
+#define WLC_E_PFN_SCAN_COMPLETE	57	/* PFN completed scan of network list */
+/* PFN best network batching event, re-use obsolete WLC_E_PFN_SCAN_COMPLETE */
+#define WLC_E_PFN_BEST_BATCHING	57
+#define WLC_E_EXTLOG_MSG	58
+#define WLC_E_ACTION_FRAME      59	/* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE	60	/* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND	61	/* assoc request received */
+#define WLC_E_PRE_REASSOC_IND	62	/* re-assoc request received */
+#define WLC_E_CHANNEL_ADOPTED	63
+#define WLC_E_AP_STARTED	64	/* AP started */
+#define WLC_E_DFS_AP_STOP	65	/* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME	66	/* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT	67	/* WAI stations event */
+#define WLC_E_WAI_MSG 		68	/* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 	69	/* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 	70	/* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG	71	/* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG	72	/* P2P Probe request received */
+#define WLC_E_DCS_REQUEST	73
+#define WLC_E_FIFO_CREDIT_MAP	74	/* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX	75	/* Received action frame event WITH
+					 * wl_event_rx_frame_data_t header
+					 */
+#define WLC_E_WAKE_EVENT	76	/* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE	77	/* Radio measurement complete */
+#define WLC_E_HTSFSYNC		78	/* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ	79	/* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND		80	/* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT	81	/* excess PM Wake Event to inform host  */
+#define WLC_E_PFN_SCAN_NONE		82	/* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with  WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND	82
+#define WLC_E_PFN_SCAN_ALLGONE		83	/* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST	83
+#define WLC_E_GTK_PLUMBED		84
+#define WLC_E_ASSOC_IND_NDIS		85	/* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS		86	/* 802.11 REASSOC indication for NDIS only */
+#define WLC_E_ASSOC_REQ_IE		87
+#define WLC_E_ASSOC_RESP_IE		88
+#define WLC_E_ASSOC_RECREATED		89	/* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS	90	/* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ			91	/* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT		92	/* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL	93	/* fast assoc recreation failed */
+#define WLC_E_NATIVE			94	/* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND		95	/* event for tx pkt delay suddently jump */
+#define WLC_E_PSTA_PRIMARY_INTF_IND	99	/* psta primary interface indication */
+#define WLC_E_NAN			100     /* NAN event */
+#define WLC_E_BEACON_FRAME_RX		101
+#define WLC_E_SERVICE_FOUND		102	/* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX		103	/* GAS fragment received */
+#define WLC_E_GAS_COMPLETE		104	/* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE		105	/* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE		106	/* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP		107	/* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH		108	/* Indication of MAC tx failures (exhaustion of
+						 * 802.11 retries) exceeding threshold(s)
+						 */
+#define WLC_E_PROXD			109	/* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE		110	/* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL		110	/* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_BSS_LOAD			114	/* Inform host of beacon bss load */
+#define WLC_E_CSA_START_IND		121
+#define WLC_E_CSA_DONE_IND		122
+#define WLC_E_CSA_FAILURE_IND		123
+#define WLC_E_CCA_CHAN_QUAL		124	/* CCA based channel quality report */
+#define WLC_E_BSSID		125	/* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR		126	/* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT	127	/* credit check for BCMC supported */
+#define WLC_E_BT_WIFI_HANDOVER_REQ	130	/* Handover Request Initiated */
+#define WLC_E_SPW_TXINHIBIT		131     /* Southpaw TxInhibit notification */
+#define WLC_E_FBT_AUTH_REQ_IND		132	/* FBT Authentication Request Indication */
+#define WLC_E_RSSI_LQM			133	/* Enhancement addition for WLC_E_RSSI */
+#define WLC_E_PFN_GSCAN_FULL_RESULT		134 /* Full probe/beacon (IEs etc) results */
+#define WLC_E_PFN_SWC		135 /* Significant change in rssi of bssids being tracked */
+#define WLC_E_RMC_EVENT			139	/* RMC event */
+#define WLC_E_LAST			140	/* highest val + 1 for range checking */
+
+#if (WLC_E_LAST > 140)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 140."
+#endif /* WLC_E_LAST */
+
+/* define an API for getting the string name of an event */
+extern const char *bcmevent_get_name(uint event_type);
+
+
+
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS		0	/* operation was successful */
+#define WLC_E_STATUS_FAIL		1	/* operation failed */
+#define WLC_E_STATUS_TIMEOUT		2	/* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS	3	/* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT		4	/* operation was aborted */
+#define WLC_E_STATUS_NO_ACK		5	/* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED	6	/* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT		7	/* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL		8	/* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN		9	/* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC		10	/* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET		11	/* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS		12	/* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS		13	/* no allowable channels to scan */
+#ifdef BCMCCX
+#define WLC_E_STATUS_CCXFASTRM		14	/* scan aborted due to CCX fast roam */
+#endif /* BCMCCX */
+#define WLC_E_STATUS_CS_ABORT		15	/* abort channel select */
+#define WLC_E_STATUS_ERROR		16	/* request failed due to error */
+#define WLC_E_STATUS_INVALID 0xff  /* Invalid status code to init variables. */
+
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC	0	/* initial assoc */
+#define WLC_E_REASON_LOW_RSSI		1	/* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH		2	/* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC		3	/* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST		4	/* roamed due to lost beacons */
+
+/* Roam codes used primarily by CCX */
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	/* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM	6	/* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED	7	/* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP		8	/* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE		9	/* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL		10	/* We can hear AP, but AP can't hear us */
+/* retained for precommit auto-merging errors; remove once all branches are synced */
+#define WLC_E_REASON_REQUESTED_ROAM	11
+#define WLC_E_REASON_BSSTRANS_REQ	11	/* roamed due to BSS Transition request by AP */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	/* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID		2	/* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY		3	/* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA		4	/* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV		5	/* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT		6	/* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR		7	/* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH		8	/* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	/* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES		10	/* STA does not support all basic rates of BSS */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_CCXFAST_PREVAP	11	/* CCX FAST ROAM: prune previous AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_CIPHER_NA		12	/* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA		13	/* AP is already known to us as a STA */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_CCXFAST_DROAM	14	/* CCX FAST ROAM: prune unqualified AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_WDS_PEER		15	/* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD		16	/* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP		17	/* prune home AP */
+#ifdef BCMCCX
+#define WLC_E_PRUNE_AP_BLOCKED		18	/* prune blocked AP */
+#define WLC_E_PRUNE_NO_DIAG_SUPPORT	19	/* prune due to diagnostic mode not supported */
+#endif /* BCMCCX */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER			0	/* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	/* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	/* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	/* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	/* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER		5	/* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	/* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	/* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	/* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK		9	/* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	/* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	/* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	/* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL		13	/* message send failure */
+#define WLC_E_SUP_DEAUTH		14	/* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO		15	/* WPA PSK 4-way handshake timeout */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+	uint16	version;
+	uint16	channel;	/* Matches chanspec_t format from bcmwifi_channels.h */
+	int32	rssi;
+	uint32	mactime;
+	uint32	rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+/* WLC_E_IF event data */
+typedef struct wl_event_data_if {
+	uint8 ifidx;		/* RTE virtual device index (for dongle) */
+	uint8 opcode;		/* see I/F opcode */
+	uint8 reserved;		/* bit mask (WLC_E_IF_FLAGS_XXX ) */
+	uint8 bssidx;		/* bsscfg index */
+	uint8 role;		/* see I/F role */
+} wl_event_data_if_t;
+
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD		1	/* bsscfg add */
+#define WLC_E_IF_DEL		2	/* bsscfg delete */
+#define WLC_E_IF_CHANGE		3	/* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA		0	/* Infra STA */
+#define WLC_E_IF_ROLE_AP		1	/* Access Point */
+#define WLC_E_IF_ROLE_WDS		2	/* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO		3	/* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT	4	/* P2P Client */
+#ifdef WLBTAMP
+#define WLC_E_IF_ROLE_BTA_CREATOR	5	/* BT-AMP Creator */
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR	6	/* BT-AMP Acceptor */
+#endif
+
+/* WLC_E_RSSI event data */
+typedef struct wl_event_data_rssi {
+	int32 rssi;
+	int32 snr;
+	int32 noise;
+} wl_event_data_rssi_t;
+
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF	0x1	/* no host I/F creation needed */
+
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS	1	/* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC	2	/* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC	3	/* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS	4	/* Link down due to bsscfg down */
+
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD		0	/* overlay download request */
+#define WLC_E_OVL_UPDATE_IND	1	/* device indication of host overlay update */
+
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED		0	/* peer is ready to establish TDLS */
+#define WLC_E_TDLS_PEER_CONNECTED		1
+#define WLC_E_TDLS_PEER_DISCONNECTED	2
+
+/* reason codes for WLC_E_RMC_EVENT event */
+#define WLC_E_REASON_RMC_NONE		0
+#define WLC_E_REASON_RMC_AR_LOST		1
+#define WLC_E_REASON_RMC_AR_NO_ACK		2
+
+
+/* GAS event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
+	uint16	channel;		/* channel of GAS protocol */
+	uint8	dialog_token;	/* GAS dialog token */
+	uint8	fragment_id;	/* fragment id */
+	uint16	status_code;	/* status code on GAS completion */
+	uint16 	data_len;		/* length of data to follow */
+	uint8	data[1];		/* variable length specified by data_len */
+} BWL_POST_PACKED_STRUCT wl_event_gas_t;
+
+/* service discovery TLV */
+typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
+	uint16	length;			/* length of response_data */
+	uint8	protocol;		/* service protocol type */
+	uint8	transaction_id;		/* service transaction id */
+	uint8	status_code;		/* status code */
+	uint8	data[1];		/* response data */
+} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
+
+/* service discovery event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
+	uint16	channel;		/* channel */
+	uint8	count;			/* number of tlvs */
+	wl_sd_tlv_t	tlv[1];		/* service discovery TLV */
+} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND		1	/* Found a proximity device */
+#define WLC_E_PROXD_GONE		2	/* Lost a proximity device */
+#define WLC_E_PROXD_START		3	/* used by: target  */
+#define WLC_E_PROXD_STOP		4	/* used by: target   */
+#define WLC_E_PROXD_COMPLETED		5	/* used by: initiator completed */
+#define WLC_E_PROXD_ERROR		6	/* used by both initiator and target */
+#define WLC_E_PROXD_COLLECT_START	7	/* used by: target & initiator */
+#define WLC_E_PROXD_COLLECT_STOP	8	/* used by: target */
+#define WLC_E_PROXD_COLLECT_COMPLETED	9	/* used by: initiator completed */
+#define WLC_E_PROXD_COLLECT_ERROR	10	/* used by both initiator and target */
+#define WLC_E_PROXD_NAN_EVENT		11	/* used by both initiator and target */
+
+/*  proxd_event data */
+typedef struct ftm_sample {
+	uint32 value;	/* RTT in ns */
+	int8 rssi;	/* RSSI */
+} ftm_sample_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
+	uint16 ver;			/* version */
+	uint16 mode;			/* mode: target/initiator */
+	uint16 method;			/* method: rssi/TOF/AOA */
+	uint8  err_code;		/* error classification */
+	uint8  TOF_type;		/* one way or two way TOF */
+	uint8  OFDM_frame_type;		/* legacy or VHT */
+	uint8  bandwidth;		/* Bandwidth is 20, 40,80, MHZ */
+	struct ether_addr peer_mac;	/* (e.g for tgt:initiator's */
+	uint32 distance;		/* dst to tgt, units meter */
+	uint32 meanrtt;			/* mean delta */
+	uint32 modertt;			/* Mode delta */
+	uint32 medianrtt;		/* median RTT */
+	uint32 sdrtt;			/* Standard deviation of RTT */
+	int    gdcalcresult;		/* Software or Hardware Kind of redundant, but if */
+					/* frame type is VHT, then we should do it by hardware */
+	int16  avg_rssi;		/* avg rssi accroos the ftm frames */
+	int16  validfrmcnt;		/* Firmware's valid frame counts */
+	char  *peer_router_info;	/* Peer router information if available in TLV, */
+					/* We will add this field later  */
+	int32 var1;			/* average of group delay */
+	int32 var2;			/* average of threshold crossing */
+	int32 var3;			/* difference between group delay and threshold crossing */
+					/* raw Fine Time Measurements (ftm) data */
+	uint16 ftm_unit;		/* ftm cnt resolution in picoseconds , 6250ps - default */
+	uint16 ftm_cnt;			/*  num of rtd measurments/length in the ftm buffer  */
+	ftm_sample_t ftm_buff[1];	/* 1 ... ftm_cnt  */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION		1
+#define INTFER_STREAM_TYPE_NONTCP	1
+#define INTFER_STREAM_TYPE_TCP		2
+#define WLINTFER_STATS_NSMPLS		4
+typedef struct wl_intfer_event {
+	uint16 version;			/* version */
+	uint16 status;			/* status */
+	uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+	struct ether_addr prim_ea;	/* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+
+/*  **********  NAN protocol events/subevents  ********** */
+#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
+/* nan application events to the host driver */
+enum nan_app_events {
+	WL_NAN_EVENT_START = 1,     /* NAN cluster started */
+	WL_NAN_EVENT_JOIN = 2,      /* Joined to a NAN cluster */
+	WL_NAN_EVENT_ROLE = 3,      /* Role or State changed */
+	WL_NAN_EVENT_SCAN_COMPLETE = 4,
+	WL_NAN_EVENT_DISCOVERY_RESULT = 5,
+	WL_NAN_EVENT_REPLIED = 6,
+	WL_NAN_EVENT_TERMINATED = 7,	/* the instance ID will be present in the ev data */
+	WL_NAN_EVENT_RECEIVE = 8,
+	WL_NAN_EVENT_STATUS_CHG = 9,  /* generated on any change in nan_mac status */
+	WL_NAN_EVENT_MERGE = 10,      /* Merged to a NAN cluster */
+	WL_NAN_EVENT_STOP = 11,       /* NAN stopped */
+	WL_NAN_EVENT_INVALID = 12,	/* delimiter for max value */
+};
+#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
+/*  ******************* end of NAN section *************** */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _BCMEVENT_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 0000000000000000000000000000000000000000..e427bdc3b9133381438cf3712281314d7539b9bd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,227 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET		0x0	/* offset to version field */
+#define IP_VER_MASK		0xf0	/* version mask */
+#define IP_VER_SHIFT		4	/* version shift */
+#define IP_VER_4		4	/* version number for IPV4 */
+#define IP_VER_6		6	/* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	/* ICMP protocol */
+#define IP_PROT_IGMP		0x2	/* IGMP protocol */
+#define IP_PROT_TCP		0x6	/* TCP protocol */
+#define IP_PROT_UDP		0x11	/* UDP protocol type */
+#define IP_PROT_ICMP6		0x3a	/* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET      0       /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET         1       /* type of service offset */
+#define IPV4_PKTLEN_OFFSET      2       /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET     6       /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET        9       /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET      10      /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET      12      /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET     16      /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET     20      /* IP options offset */
+#define IPV4_MIN_HEADER_LEN     20      /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK		0xf0	/* IPV4 version mask */
+#define IPV4_VER_SHIFT		4	/* IPV4 version shift */
+
+#define IPV4_HLEN_MASK		0x0f	/* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	/* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	/* DiffServ codepoint mask */
+#define	IPV4_TOS_DSCP_SHIFT	2	/* DiffServ codepoint shift */
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	/* Historical precedence mask */
+#define	IPV4_TOS_PREC_SHIFT	5	/* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY	0x10	/* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT	0x8	/* Best throughput requested */
+#define IPV4_TOS_RELIABILITY	0x4	/* Most reliable delivery requested */
+
+#define IPV4_TOS_ROUTINE        0
+#define IPV4_TOS_PRIORITY       1
+#define IPV4_TOS_IMMEDIATE      2
+#define IPV4_TOS_FLASH          3
+#define IPV4_TOS_FLASHOVERRIDE  4
+#define IPV4_TOS_CRITICAL       5
+#define IPV4_TOS_INETWORK_CTRL  6
+#define IPV4_TOS_NETWORK_CTRL   7
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	/* Reserved */
+#define IPV4_FRAG_DONT		0x4000	/* Don't fragment */
+#define IPV4_FRAG_MORE		0x2000	/* More fragments */
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	/* Fragment offset */
+
+#define IPV4_ADDR_STR_LEN	16	/* Max IP address length in string format */
+
+/* IPV4 packet formats */
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		/* Version and Internet Header Length */
+	uint8	tos;			/* Type Of Service */
+	uint16	tot_len;		/* Number of bytes in packet (max 65535) */
+	uint16	id;
+	uint16	frag;			/* 3 flag bits and fragment offset */
+	uint8	ttl;			/* Time To Live */
+	uint8	prot;			/* Protocol */
+	uint16	hdr_chksum;		/* IP header checksum */
+	uint8	src_ip[IPV4_ADDR_LEN];	/* Source IP Address */
+	uint8	dst_ip[IPV4_ADDR_LEN];	/* Destination IP Address */
+} BWL_POST_PACKED_STRUCT;
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET	4	/* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET	6	/* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET	7	/* hop limit offset */
+#define IPV6_SRC_IP_OFFSET	8	/* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET	24	/* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	/* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+
+/* IPV4 or IPV6 Protocol Classifier or 0 */
+#define IP_PROT46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0)
+
+/* IPV6 extension headers (options) */
+#define IPV6_EXTHDR_HOP		0
+#define IPV6_EXTHDR_ROUTING	43
+#define IPV6_EXTHDR_FRAGMENT	44
+#define IPV6_EXTHDR_AUTH	51
+#define IPV6_EXTHDR_NONE	59
+#define IPV6_EXTHDR_DEST	60
+
+#define IPV6_EXTHDR(prot)	(((prot) == IPV6_EXTHDR_HOP) || \
+	                         ((prot) == IPV6_EXTHDR_ROUTING) || \
+	                         ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+	                         ((prot) == IPV6_EXTHDR_AUTH) || \
+	                         ((prot) == IPV6_EXTHDR_NONE) || \
+	                         ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 		40
+
+#define IPV6_EXTHDR_LEN(eh)	((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+	uint8	nexthdr;
+	uint8	hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+	uint8	nexthdr;
+	uint8	rsvd;
+	uint16	frag_off;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+	uint16 len = 0, hlen;
+	struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+	while (IPV6_EXTHDR(eh->nexthdr)) {
+		if (eh->nexthdr == IPV6_EXTHDR_NONE)
+			return -1;
+		else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+			hlen = 8;
+		else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+			hlen = (eh->hdrlen + 2) << 2;
+		else
+			hlen = IPV6_EXTHDR_LEN(eh);
+
+		len += hlen;
+		eh = (struct ipv6_exthdr *)(h + len);
+	}
+
+	*proto = eh->nexthdr;
+	return len;
+}
+
+#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+	ether[0] = 0x01; \
+	ether[1] = 0x00; \
+	ether[2] = 0x5E; \
+	ether[3] = (ipv4 & 0x7f0000) >> 16; \
+	ether[4] = (ipv4 & 0xff00) >> 8; \
+	ether[5] = (ipv4 & 0xff); \
+}
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr)	((uint32)addr & 0xff000000) >> 24, \
+								((uint32)addr & 0x00ff0000) >> 16, \
+								((uint32)addr & 0x0000ff00) >> 8, \
+								((uint32)addr & 0x000000ff)
+
+#endif	/* _bcmip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
new file mode 100644
index 0000000000000000000000000000000000000000..fff148525632433df747fc4260a5fc81a9218b14
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
@@ -0,0 +1,142 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * $Id: bcmipv6.h 439574 2013-11-27 06:37:37Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Extension headers */
+#define IPV6_EXT_HOP	0
+#define IPV6_EXT_ROUTE	43
+#define IPV6_EXT_FRAG	44
+#define IPV6_EXT_DEST	60
+#define IPV6_EXT_ESEC	50
+#define IPV6_EXT_AUTH	51
+
+/* Minimum size (extension header "word" length) */
+#define IPV6_EXT_WORD	8
+
+/* Offsets for most extension headers */
+#define IPV6_EXT_NEXTHDR	0
+#define IPV6_EXT_HDRLEN		1
+
+/* Constants specific to fragmentation header */
+#define IPV6_FRAG_MORE_MASK	0x0001
+#define IPV6_FRAG_MORE_SHIFT	0
+#define IPV6_FRAG_OFFS_MASK	0xfff8
+#define IPV6_FRAG_OFFS_SHIFT	3
+
+/* For icmpv6 */
+#define ICMPV6_HEADER_TYPE	0x3A
+#define ICMPV6_PKT_TYPE_RA	134
+#define ICMPV6_PKT_TYPE_NS	135
+#define ICMPV6_PKT_TYPE_NA	136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC	2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define IPV6_VERSION 	6
+#define IPV6_HOP_LIMIT 	255
+
+#define IPV6_ADDR_NULL(a)	((a[0] | a[1] | a[2] | a[3] | a[4] | \
+							 a[5] | a[6] | a[7] | a[8] | a[9] | \
+							 a[10] | a[11] | a[12] | a[13] | \
+							 a[14] | a[15]) == 0)
+
+#define IPV6_ADDR_LOCAL(a)	(((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+		uint8		addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+	uint8	icmp6_type;
+	uint8	icmp6_code;
+	uint16	icmp6_cksum;
+	BWL_PRE_PACKED_STRUCT union {
+		uint32 reserved;
+		BWL_PRE_PACKED_STRUCT struct nd_advt {
+			uint32	reserved1:5,
+				override:1,
+				solicited:1,
+				router:1,
+				reserved2:24;
+		} BWL_POST_PACKED_STRUCT nd_advt;
+	} BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+	uint8	priority:4,
+		version:4;
+	uint8	flow_lbl[3];
+	uint16	payload_len;
+	uint8	nexthdr;
+	uint8 	hop_limit;
+	struct	ipv6_addr	saddr;
+	struct	ipv6_addr	daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg {
+	struct icmp6_hdr	icmph;
+	struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+	uint8 type;
+	uint8 len;
+	uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Fragmentation Header */
+BWL_PRE_PACKED_STRUCT struct ipv6_frag {
+	uint8	nexthdr;
+	uint8	reserved;
+	uint16	frag_offset;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+static const struct ipv6_addr all_node_ipv6_maddr = {
+									{ 0xff, 0x2, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 1
+									}};
+
+#define IPV6_ISMULTI(a) (a[0] == 0xff)
+
+#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \
+{ \
+	ether[0] = 0x33; \
+	ether[1] = 0x33; \
+	ether[2] = ipv6[12]; \
+	ether[3] = ipv6[13]; \
+	ether[4] = ipv6[14]; \
+	ether[5] = ipv6[15]; \
+}
+
+#endif	/* !defined(_bcmipv6_h_) */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h
new file mode 100644
index 0000000000000000000000000000000000000000..09cf24044e3ce9deddddee3d27c223119c9fe20a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h
@@ -0,0 +1,72 @@
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmtcp.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define TCP_SRC_PORT_OFFSET	0	/* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET	2	/* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET	4	/* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET	8	/* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET		12	/* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET	13	/* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET	16	/* TCP body checksum offset */
+
+#define TCP_PORT_LEN		2	/* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG            0x20
+#define TCP_FLAG_ACK            0x10
+#define TCP_FLAG_PSH            0x08
+#define TCP_FLAG_RST            0x04
+#define TCP_FLAG_SYN            0x02
+#define TCP_FLAG_FIN            0x01
+
+#define TCP_HLEN_MASK           0xf000
+#define TCP_HLEN_SHIFT          12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint32	seq_num;	/* TCP Sequence Number */
+	uint32	ack_num;	/* TCP Sequence Number */
+	uint16	hdrlen_rsvd_flags;	/* Header length, reserved bits and flags */
+	uint16	tcpwin;		/* TCP window */
+	uint16	chksum;		/* Segment checksum with pseudoheader */
+	uint16	urg_ptr;	/* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK  0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31))		/* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31))		/* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b)		/* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b)		/* a < b */
+
+#endif	/* #ifndef _bcmtcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h
new file mode 100644
index 0000000000000000000000000000000000000000..32407f3a33059258766c4639045de4ea1e5b99aa
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to UDP Protocol
+ *
+ * $Id: bcmudp.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _bcmudp_h_
+#define _bcmudp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* UDP header */
+#define UDP_DEST_PORT_OFFSET	2	/* UDP dest port offset */
+#define UDP_LEN_OFFSET		4	/* UDP length offset */
+#define UDP_CHKSUM_OFFSET	6	/* UDP body checksum offset */
+
+#define UDP_HDR_LEN	8	/* UDP header length */
+#define UDP_PORT_LEN	2	/* UDP port length */
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmudp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint16	len;		/* Number of bytes in datagram including header */
+	uint16	chksum;		/* entire datagram checksum with pseudoheader */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _bcmudp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc91f8421cc369394074ce08812e7741a2e13a3c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,423 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bt_amp_hci.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+	uint16 opcode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE		OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE		255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf)	((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode)		((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode)		((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter		HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter	HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality			HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info			HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request	HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link		HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel			HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify			HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode		HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode			HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset				HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush			HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2		HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info		HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands	HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size			HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size		HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];			/* length so far */
+	uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];
+	uint8 len[2];
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+	uint8 plh;
+	uint8 key_length;
+	uint8 key_type;
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+	uint8 plh;
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+	uint8 id;
+	uint8 service_type;
+	uint8 max_sdu[2];
+	uint8 sdu_ia_time[4];
+	uint8 access_latency[4];
+	uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+	uint8 llh[2];
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+	uint8 plh;
+	uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+	uint16 bredr;
+	plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+	hci_handle_t handle;
+	uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+	uint8 llh[2];
+	uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+	uint8 llh[2];
+	uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC		0
+#define EFS_SVCTYPE_BEST_EFFORT		1
+#define EFS_SVCTYPE_GUARANTEED		2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+	uint8 ecode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE			OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete			0x0E
+#define HCI_Command_Status			0x0F
+#define HCI_Flush_Occurred			0x11
+#define HCI_Enhanced_Flush_Complete		0x39
+#define HCI_Physical_Link_Complete		0x40
+#define HCI_Channel_Select			0x41
+#define HCI_Disconnect_Physical_Link_Complete	0x42
+#define HCI_Logical_Link_Complete		0x45
+#define HCI_Disconnect_Logical_Link_Complete	0x46
+#define HCI_Flow_Spec_Modify_Complete		0x47
+#define HCI_Number_of_Completed_Data_Blocks	0x48
+#define HCI_Short_Range_Mode_Change_Complete	0x4C
+#define HCI_Status_Change_Event			0x4D
+#define HCI_Vendor_Specific			0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask			0x0001
+#define HCI_Channel_Select_Event_Mask				0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask	0x0004
+#define HCI_Logical_Link_Complete_Event_Mask			0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask		0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask		0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask		0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask		0x1000
+#define HCI_Status_Change_Event_Mask				0x2000
+#define HCI_All_Event_Mask					0x31e7
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+	uint8 status;
+	uint8 cmdpkts;
+	uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+	uint8 cmdpkts;
+	uint16 opcode;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint16 len;
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+	uint8 status;
+	uint8 AMP_status;
+	uint32 bandwidth;
+	uint32 gbandwidth;
+	uint32 latency;
+	uint32 PDU_size;
+	uint8 ctrl_type;
+	uint16 PAL_cap;
+	uint16 AMP_ASSOC_len;
+	uint32 max_flush_timeout;
+	uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+	uint8 status;
+	uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+	uint8 status;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+	uint8 status;
+	uint16 ACL_pkt_len;
+	uint16 data_block_len;
+	uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+	uint16 handle;
+	uint16 pkts;
+	uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+	uint16 num_blocks;
+	uint8 num_handles;
+	data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+	uint8 status;
+	uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+	uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+	uint8 status;
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+	uint8 len;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+	uint8 status;
+	uint8 hci_version;
+	uint16 hci_revision;
+	uint8 pal_version;
+	uint16 mfg_name;
+	uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE	64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+	uint8 status;
+	uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+	uint8 status;
+	uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS				0x00
+#define HCI_ERR_ILLEGAL_COMMAND			0x01
+#define HCI_ERR_NO_CONNECTION			0x02
+#define HCI_ERR_MEMORY_FULL			0x07
+#define HCI_ERR_CONNECTION_TIMEOUT		0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS		0x09
+#define HCI_ERR_CONNECTION_EXISTS		0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED		0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT	0x10
+#define HCI_ERR_UNSUPPORTED_VALUE		0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT		0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST		0x16
+#define HCI_ERR_UNSPECIFIED			0x1F
+#define HCI_ERR_UNIT_KEY_USED			0x26
+#define HCI_ERR_QOS_REJECTED			0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE		0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL		0x39
+#define HCI_ERR_CHANNEL_MOVE			0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+	uint16	handle;			/* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+	uint16	dlen;			/* data total length */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE	OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS		(0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS		(0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle)	((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle)	((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+	uint8	ScheduleKnown;
+	uint8	NumReports;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+	uint32	StartTime;
+	uint32	Duration;
+	uint32	Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN		0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 0000000000000000000000000000000000000000..d3bff33ab8b7b8f8530e0b8ae3ed44d0e0e711c0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,194 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright Open Broadcom Corporation
+ *
+ * $Id: eapol.h 452703 2014-01-31 20:33:06Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_OSEN	0x0
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_DESC_V3		0x03
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK	9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	key_id;
+	uint8	ipn[6];
+	uint8	key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 	8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 0000000000000000000000000000000000000000..a166c5f84c0e8e9eaa561a6aa59a21e9eba9cffb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,210 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: ethernet.h 473238 2014-04-28 19:14:56Z $
+ */
+
+#ifndef _NET_ETHERNET_H_	/* use native BSD ethernet.h when available */
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define	ETHER_ADDR_LEN		6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define	ETHER_TYPE_LEN		2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define	ETHER_CRC_LEN		4
+
+/*
+ * The length of the combined header.
+ */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define	ETHER_MIN_LEN		64
+
+/*
+ * The minimum packet user data length.
+ */
+#define	ETHER_MIN_DATA		46
+
+/*
+ * The maximum packet length.
+ */
+#define	ETHER_MAX_LEN		1518
+
+/*
+ * The maximum packet user data length.
+ */
+#define	ETHER_MAX_DATA		1500
+
+/* ether types */
+#define ETHER_TYPE_MIN		0x0600		/* Anything less than MIN is a length */
+#define	ETHER_TYPE_IP		0x0800		/* IP */
+#define ETHER_TYPE_ARP		0x0806		/* ARP */
+#define ETHER_TYPE_8021Q	0x8100		/* 802.1Q */
+#define	ETHER_TYPE_IPV6		0x86dd		/* IPv6 */
+#define	ETHER_TYPE_BRCM		0x886c		/* Broadcom Corp. */
+#define	ETHER_TYPE_802_1X	0x888e		/* 802.1x */
+#ifdef PLC
+#define	ETHER_TYPE_88E1		0x88e1		/* GIGLE */
+#define	ETHER_TYPE_8912		0x8912		/* GIGLE */
+#define ETHER_TYPE_GIGLED	0xffff		/* GIGLE */
+#endif /* PLC */
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	/* 802.1x preauthentication */
+#define ETHER_TYPE_WAI		0x88b4		/* WAI */
+#define ETHER_TYPE_89_0D	0x890d		/* 89-0d frame for TDLS */
+
+#define ETHER_TYPE_PPP_SES	0x8864		/* PPPoE Session */
+
+#define ETHER_TYPE_IAPP_L2_UPDATE	0x6	/* IAPP L2 update frame */
+
+/* Broadcom subtype follows ethertype;  First 2 bytes are reserved; Next 2 are subtype; */
+#define	ETHER_BRCM_SUBTYPE_LEN	4	/* Broadcom 4 byte subtype */
+
+/* ether header */
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	/* dest address offset */
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	/* src address offset */
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	/* ether type offset */
+
+/*
+ * A macro to validate a length with
+ */
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) {		\
+		((uint8 *)ea)[0] = 0x01;			\
+		((uint8 *)ea)[1] = 0x00;			\
+		((uint8 *)ea)[2] = 0x5e;			\
+		((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f;	\
+		((uint8 *)ea)[4] = ((mgrp_ip) >>  8) & 0xff;	\
+		((uint8 *)ea)[5] = ((mgrp_ip) >>  0) & 0xff;	\
+}
+
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	/* !__INCif_etherh Quick and ugly hack for VxWorks */
+
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+/* Takes a pointer, marks unicast address bit in the MAC address */
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
+#define eacmp(a, b)	((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+	                 (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+	                 (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
+
+#define	ether_cmp(a, b)	eacmp(a, b)
+
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
+#define eacopy(s, d) \
+do { \
+	((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+	((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+	((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \
+} while (0)
+
+#define	ether_copy(s, d) eacopy(s, d)
+
+/* Copy an ethernet address in reverse order */
+#define	ether_rcopy(s, d) \
+do { \
+	((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
+	((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \
+	((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
+} while (0)
+
+/* Copy 14B ethernet header: 32bit aligned source and destination. */
+#define ehcopy32(s, d) \
+do { \
+	((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \
+	((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \
+	((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \
+	((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
+} while (0)
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
+
+#define ETHER_ISBCAST(ea)	((((const uint8 *)(ea))[0] &		\
+	                          ((const uint8 *)(ea))[1] &		\
+				  ((const uint8 *)(ea))[2] &		\
+				  ((const uint8 *)(ea))[3] &		\
+				  ((const uint8 *)(ea))[4] &		\
+				  ((const uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((const uint8 *)(ea))[0] |		\
+				  ((const uint8 *)(ea))[1] |		\
+				  ((const uint8 *)(ea))[2] |		\
+				  ((const uint8 *)(ea))[3] |		\
+				  ((const uint8 *)(ea))[4] |		\
+				  ((const uint8 *)(ea))[5]) == 0)
+
+#define ETHER_ISNULLDEST(da)	((((const uint16 *)(da))[0] |           \
+				  ((const uint16 *)(da))[1] |           \
+				  ((const uint16 *)(da))[2]) == 0)
+#define ETHER_ISNULLSRC(sa)	ETHER_ISNULLDEST(sa)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+	struct ether_header t; \
+	t = *(struct ether_header *)(s); \
+	*(struct ether_header *)(d) = t; \
+} while (0)
+
+#define  ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NET_ETHERNET_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 0000000000000000000000000000000000000000..f50d5ed7eaa58d67f83d14bd5152f369ee4a3650
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,692 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h 457033 2014-02-20 19:39:45Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI			WFA_OUI			/* WiFi P2P OUI */
+#define P2P_VER			WFA_OUI_TYPE_P2P	/* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID		0xdd			/* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+	uint8	id;		/* IE ID: 0xDD */
+	uint8	len;		/* IE length */
+	uint8	OUI[3];		/* WiFi P2P specific OUI: P2P_OUI */
+	uint8	oui_type;	/* Identifies P2P version: P2P_VER */
+	uint8	subelts[1];	/* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN	6
+
+#define P2P_ATTR_ID_OFF		0
+#define P2P_ATTR_LEN_OFF	1
+#define P2P_ATTR_DATA_OFF	3
+
+#define P2P_ATTR_ID_LEN		1	/* ID filed length */
+#define P2P_ATTR_LEN_LEN	2	/* length field length */
+#define P2P_ATTR_HDR_LEN	3 /* ID + 2-byte length field spec 1.02 */
+
+#define P2P_WFDS_HASH_LEN		6
+#define P2P_WFDS_MAX_SVC_NAME_LEN	32
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS			0	/* Status */
+#define P2P_SEID_MINOR_RC		1	/* Minor Reason Code */
+#define P2P_SEID_P2P_INFO		2	/* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID			3	/* P2P Device ID */
+#define P2P_SEID_INTENT			4	/* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT		5	/* Configuration Timeout */
+#define P2P_SEID_CHANNEL		6	/* Listen channel */
+#define P2P_SEID_GRP_BSSID		7	/* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING		8	/* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR		9	/* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY		10	/* P2P Manageability */
+#define P2P_SEID_CHAN_LIST		11	/* Channel List */
+#define P2P_SEID_ABSENCE		12	/* Notice of Absence */
+#define P2P_SEID_DEV_INFO		13	/* Device Info */
+#define P2P_SEID_GROUP_INFO		14	/* Group Info */
+#define P2P_SEID_GROUP_ID		15	/* Group ID */
+#define P2P_SEID_P2P_IF			16	/* P2P Interface */
+#define P2P_SEID_OP_CHANNEL		17	/* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS		18	/* Invitation Flags */
+#define P2P_SEID_SERVICE_HASH		21	/* Service hash */
+#define P2P_SEID_SESSION		22	/* Session information */
+#define P2P_SEID_CONNECT_CAP		23	/* Connection capability */
+#define P2P_SEID_ADVERTISE_ID		24	/* Advertisement ID */
+#define P2P_SEID_ADVERTISE_SERVICE	25	/* Advertised service */
+#define P2P_SEID_SESSION_ID		26	/* Session ID */
+#define P2P_SEID_FEATURE_CAP		27	/* Feature capability */
+#define	P2P_SEID_PERSISTENT_GROUP	28	/* Persistent group */
+#define P2P_SEID_SESSION_INFO_RESP	29	/* Session Information Response */
+#define P2P_SEID_VNDR			221	/* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES	0x1b
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_INFO */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	dev;		/* Device Capability Bitmap */
+	uint8	group;		/* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS	0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS	0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT	0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN		0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT			0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC		0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER			0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP		0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT			0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS		0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT		0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT	0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION		0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTENT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	intent;		/* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CFG_TIMEOUT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	go_tmo;		/* GO config timeout in units of 10 ms */
+	uint8	client_tmo;	/* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+/* WiFi P2P IE subelement: Listen Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+/* WiFi P2P IE subelement: P2P Group BSSID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GRP_BSSID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P group bssid */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+/* WiFi P2P IE subelement: P2P Group ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GROUP_ID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ssid[1];	/* ssid. device id. variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+/* WiFi P2P IE subelement: P2P Interface */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_IF */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ifaddrs;	/* P2P Interface Address count */
+	uint8	ifaddr[1][6];	/* P2P Interface Address list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	status;		/* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS			0
+				/* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL	1
+				/* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP			P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+				/* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS		2
+				/* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED		3
+				/* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS		4
+				/* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM		5
+				/* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR		6
+				/* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN		7
+				/* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP		8
+				/* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT			9
+				/* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS		10
+				/* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT		11
+				/* Failed, rejected by user */
+#define P2P_STATSE_SUCCESS_USER_ACCEPT		12
+				/* Success, accepted by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+	uint8	eltId;		/* ID: P2P_SEID_EXT_TIMING */
+	uint8	len[2];		/* length not including eltId, len fields */
+	uint8	avail[2];	/* availibility period */
+	uint8	interval[2];	/* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN	10	/* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTINTADDR */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	band;		/* Regulatory Class (band) */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+	uint8	band;						/* Regulatory Class (band) */
+	uint8	num_channels;				/* # of channels in the channel list */
+	uint8	channels[WL_NUMCHANNELS];	/* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHAN_LIST */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	num_entries;	/* # of channel entries */
+	wifi_p2p_chanlist_entry_t	entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+						/* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi Primary Device Type structure */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+	uint16	cat_id;		/* Category ID */
+	uint8	OUI[3];		/* WFA OUI: 0x0050F2 */
+	uint8	oui_type;	/* WPS_OUI_TYPE */
+	uint16	sub_cat_id;	/* Sub Category ID */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL		1
+#define P2P_DISE_CATEGORY_COMPUTER		1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL		8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE		2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL	9
+#define P2P_DISE_CATEGORY_PRINTER		3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL		5
+#define P2P_DISE_CATEGORY_CAMERA		4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL		4
+#define P2P_DISE_CATEGORY_STORAGE		5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL		1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA		6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL	4
+#define P2P_DISE_CATEGORY_DISPLAY		7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL		4
+#define P2P_DISE_CATEGORY_MULTIMEDIA		8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL		6
+#define P2P_DISE_CATEGORY_GAMING		9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL		5
+#define P2P_DISE_CATEGORY_TELEPHONE		10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL		5
+#define P2P_DISE_CATEGORY_AUDIO			11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL		6
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_DEVINFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+	uint8	mac[6];			/* P2P Device MAC address */
+	uint16	wps_cfg_meths;		/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pri_devtype[8];		/* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN	8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+	uint8	len;
+	uint8	devaddr[ETHER_ADDR_LEN];	/* P2P Device Address */
+	uint8	ifaddr[ETHER_ADDR_LEN];		/* P2P Interface Address */
+	uint8	devcap;				/* Device Capability */
+	uint8	cfg_meths[2];			/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pridt[P2P_DEV_TYPE_LEN];	/* Primary Device Type */
+	uint8	secdts;				/* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+	uint8	eltId;
+	uint8	len[2];
+	struct ether_addr	addr;			/* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_MGBTY */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mg_bitmap;	/* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG   0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_GROUP_INFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+/* WiFi IE subelement: Operating Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_OP_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+/* WiFi IE subelement: INVITATION FLAGS */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INVITE_FLAGS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	flags;		/* Flags */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+/* WiFi P2P IE subelement: Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SERVICE_HASH */
+	uint8	len[2];			/* SE length not including eltId, len fields
+					 * in multiple of 6 Bytes
+					*/
+	uint8	hash[1];		/* Variable length - SHA256 hash of
+					 * service names (can be more than one hashes)
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t;
+
+/* WiFi P2P IE subelement: Service Instance Data */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	ssn_info[1];		/* Variable length - Session information as specified by
+					 * the service layer, type matches serv. name
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
+
+/* WiFi P2P IE subelement: Connection capability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_CONNECT_CAP */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	conn_cap;		/* 1byte capability as specified by the
+					 * service layer, valid bitmask/values
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
+
+/* WiFi P2P IE subelement: Advertisement ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	advt_id[4];		/* 4byte Advertisement ID of the peer device sent in
+					 * PROV Disc in Network byte order
+					*/
+	uint8	advt_mac[6];			/* P2P device address of the service advertiser */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
+	uint8	advt_id[4];		/* SE Advertise ID for the service */
+	uint16	nw_cfg_method;	/* SE Network Config method for the service */
+	uint8	serv_name_len;	/* SE length of the service name */
+	uint8	serv_name[1];	/* Variable length service name field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_SERVICE */
+	uint8	len[2];			/* SE length not including eltId, len fields mutiple len of
+					 * wifi_p2p_adv_serv_info_t entries
+					*/
+	wifi_p2p_adv_serv_info_t	p_advt_serv_info[1]; /* Variable length
+								of multiple instances
+								of the advertise service info
+								*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
+
+/* WiFi P2P IE subelement: Session ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	ssn_id[4];		/* 4byte Session ID of the peer device sent in
+							 * PROV Disc in Network byte order
+							 */
+	uint8	ssn_mac[6];		/* P2P device address of the seeker - session mac */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
+
+#define P2P_ADVT_SERV_SE_FIXED_LEN	3	/* Includes only the element ID and len */
+#define P2P_ADVT_SERV_INFO_FIXED_LEN	7	/* Per ADV Service Instance advt_id +
+						 * nw_config_method + serv_name_len
+						 */
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+	uint8	category;	/* P2P_AF_CATEGORY */
+	uint8	OUI[3];		/* OUI - P2P_OUI */
+	uint8	type;		/* OUI Type - P2P_VER */
+	uint8	subtype;	/* OUI Subtype - P2P_AF_* */
+	uint8	dialog_token;	/* nonzero, identifies req/resp tranaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY		0x7f
+
+#define P2P_AF_FIXED_LEN	7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE	0	/* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ		1	/* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP		2	/* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ		3	/* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+	uint8	category;	/* P2P_PUB_AF_CATEGORY */
+	uint8	action;		/* P2P_PUB_AF_ACTION */
+	uint8	oui[3];		/* P2P_OUI */
+	uint8	oui_type;	/* OUI type - P2P_VER */
+	uint8	subtype;	/* OUI subtype - P2P_TYPE_* */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN	8
+#define P2P_PUB_AF_CATEGORY	0x04
+#define P2P_PUB_AF_ACTION	0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP		1	/* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF	2	/* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ	3	/* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP	4	/* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ	5	/* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP	6	/* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ	7	/* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP	8	/* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID	255	/* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ		P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP		P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF		P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+	uint8	cnt_type;	/* Count/Type */
+	uint32	duration;	/* Duration */
+	uint32	interval;	/* Interval */
+	uint32	start;		/* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+	uint8	eltId;		/* Subelement ID */
+	uint8	len[2];		/* Length */
+	uint8	index;		/* Index */
+	uint8	ops_ctw_parms;	/* CTWindow and OppPS Parameters */
+	wifi_p2p_noa_desc_t	desc[1];	/* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN	5
+
+#define P2P_NOA_SE_MAX_DESC	2	/* max NoA descriptors in presence request */
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED	0	/* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT		255	/* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED	1	/* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE	2	/* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK	0x7f
+#define P2P_NOA_OPS_MASK	0x80
+#define P2P_NOA_OPS_SHIFT	7
+
+#define P2P_CTW_MIN	10	/* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define	P2PSD_ACTION_CATEGORY		0x04
+				/* Public action frame */
+#define	P2PSD_ACTION_ID_GAS_IREQ	0x0a
+				/* Action value for GAS Initial Request AF */
+#define	P2PSD_ACTION_ID_GAS_IRESP	0x0b
+				/* Action value for GAS Initial Response AF */
+#define	P2PSD_ACTION_ID_GAS_CREQ	0x0c
+				/* Action value for GAS Comback Request AF */
+#define	P2PSD_ACTION_ID_GAS_CRESP	0x0d
+				/* Action value for GAS Comback Response AF */
+#define P2PSD_AD_EID				0x6c
+				/* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI	0x00
+				/* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID			0x00
+				/* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI				P2P_OUI
+				/* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE		P2P_VER
+				/* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID		0xDDDD
+				/* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY		0x00
+				/* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+	SVC_RPOTYPE_ALL = 0,
+	SVC_RPOTYPE_BONJOUR = 1,
+	SVC_RPOTYPE_UPNP = 2,
+	SVC_RPOTYPE_WSD = 3,
+	SVC_RPOTYPE_WFDS = 11,
+	SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+	P2PSD_RESP_STATUS_SUCCESS = 0,
+	P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+	P2PSD_RESP_STATUS_DATA_NA = 2,
+	P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+	uint8	llm_pamebi;	/* Query Response Length Limit bit 0-6, set to 0 plus
+				* Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+				*/
+	uint8	adp_id;		/* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+	uint8	id;		/* IE ID: 0x6c - 108 */
+	uint8	len;	/* IE length */
+	wifi_p2psd_adp_tpl_t adp_tpl;  /* Advertisement Protocol Tuple field. Only one
+				* tuple is defined for P2P Service Discovery
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+	uint8	oui_subtype;	/* OUI Subtype: 0x09 */
+	uint16	svc_updi;		/* Service Update Indicator */
+	uint8	svc_tlvs[1];	/* wifi_p2psd_qreq_tlv_t type for service request,
+				* wifi_p2psd_qresp_tlv_t type for service response
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+	uint16	len;			/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;		/* Service Protocol Type */
+	uint8	svc_tscid;		/* Service Transaction ID */
+	uint8	query_data[1];	/* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Length of service request TLV, 5 plus the size of request data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+	wifi_p2psd_adp_ie_t		adp_ie;		/* Advertisement Protocol IE */
+	uint16					qreq_len;	/* Query Request Length */
+	uint8	qreq_frm[1];	/* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+	uint16	len;				/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;			/* Service Protocol Type */
+	uint8	svc_tscid;			/* Service Transaction ID */
+	uint8	status;				/* Value defined in Table 57 of P2P spec. */
+	uint8	query_data[1];		/* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Lenth of service response TLV, 6 plus the size of resp data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16		qresp_len;	/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint8	fragment_id;	/* Fragmentation ID */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16	qresp_len;		/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+	uint8	category;		/* 0x04 Public Action Frame */
+	uint8	action;			/* 0x6c Advertisement Protocol */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	query_data[1];	/* Query Data. wifi_p2psd_gas_ireq_frame_t
+					 * or wifi_p2psd_gas_iresp_frame_t format
+					 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 0000000000000000000000000000000000000000..647a217bbbcd83895ddf2e6870110d99a3dac87d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,57 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: sdspi.h 382882 2013-02-04 23:24:31Z $
+ */
+#ifndef	_SD_SPI_H
+#define	_SD_SPI_H
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 0000000000000000000000000000000000000000..ca1f461f5b91e1f6c0e62d665d105b2c35661981
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,77 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: vlan.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef	 VLAN_VID_MASK
+#define VLAN_VID_MASK		0xfff	/* low 12 bits are vlan id */
+#endif
+
+#define	VLAN_CFI_SHIFT		12	/* canonical format indicator bit */
+#define VLAN_PRI_SHIFT		13	/* user priority */
+
+#define VLAN_PRI_MASK		7	/* 3 bits of priority */
+
+#define	VLAN_TPID_OFFSET	12	/* offset of tag protocol id field */
+#define	VLAN_TCI_OFFSET		14	/* offset of tag ctrl info field */
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	/* offset in Ethernet II packet only */
+
+#define VLAN_TPID		0x8100	/* VLAN ethertype/Tag Protocol ID */
+
+struct vlan_header {
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+};
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+	uint16	ether_type;
+};
+
+struct dot3_mac_llc_snapvlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[3];				/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	vlan_type;			/* 0x8100 */
+	uint16	vlan_tag;			/* priority, cfi and vid */
+	uint16	ether_type;			/* ethertype */
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+	struct ethervlan_header t; \
+	t = *(struct ethervlan_header *)(s); \
+	*(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif /* _vlan_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 0000000000000000000000000000000000000000..1a27aedbf21f3096b4cbf4c9b4060e95d9dc9bb7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,198 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wpa.h 450928 2014-01-23 14:13:38Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE		13	/* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE		14	/* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT		15	/* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	/* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH	17	/* WPA IE in 4-way handshake differs from
+						 * (re-)assoc. request/probe response
+						 */
+#define DOT11_RC_INVALID_MC_CIPHER	18	/* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER	19	/* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP		20	/* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION	21	/* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP	22	/* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL	23	/* 802.1X authentication failure */
+
+#define WPA2_PMKID_LEN	16
+
+/* WPA IE fixed portion */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	uint8 oui[3];	/* IE OUI */
+	uint8 oui_type;	/* OUI type */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+#define WFA_OSEN_IE_FIXED_LEN	6
+
+/* WPA suite/multicast suite */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+/* WPA unicast suite list/key management suite list */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE		0	/* None */
+#define WPA_CIPHER_WEP_40	1	/* WEP (40-bit) */
+#define WPA_CIPHER_TKIP		2	/* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB	3	/* AES (OCB) */
+#define WPA_CIPHER_AES_CCM	4	/* AES (CCM) */
+#define WPA_CIPHER_WEP_104	5	/* WEP (104-bit) */
+#define WPA_CIPHER_BIP		6	/* WEP (104-bit) */
+#define WPA_CIPHER_TPK		7	/* Group addressed traffic not allowed */
+#ifdef BCMCCX
+#define WPA_CIPHER_CKIP		8	/* KP with no MIC */
+#define WPA_CIPHER_CKIP_MMH	9	/* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */
+#define WPA_CIPHER_WEP_MMH	10	/* MIC with no KP ("WEP/MMH", "CMIC") */
+
+#define IS_CCX_CIPHER(cipher)	((cipher) == WPA_CIPHER_CKIP || \
+				 (cipher) == WPA_CIPHER_CKIP_MMH || \
+				 (cipher) == WPA_CIPHER_WEP_MMH)
+#endif
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE	WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4	11
+
+#define WAPI_CSE_WPI_SMS4	1
+#endif /* BCMWAPI_WAI */
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM || \
+				 (cipher) == WPA_CIPHER_TPK)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher)	((cipher) == WAPI_CIPHER_NONE || \
+				 (cipher) == WAPI_CSE_WPI_SMS4)
+
+/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+				WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+				WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif /* BCMWAPI_WAI */
+
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT	60	/* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK	60	/* countermeasures active window (seconds) */
+
+/* RSN IE defines */
+#define RSN_CAP_LEN		2	/* Length of RSN capabilities field (2 octets) */
+
+/* RSN Capabilities defined in 802.11i */
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+#define RSN_CAP_MFPR			0x0040
+#define RSN_CAP_MFPC			0x0080
+#define RSN_CAP_SPPC			0x0400
+#define RSN_CAP_SPPR			0x0800
+
+/* WPA capabilities defined in 802.11i */
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE		(0x1 << 1)	/* bit 9 */
+
+/* WPA Specific defines */
+#define WPA_CAP_LEN	RSN_CAP_LEN	/* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN	2 	/* Length of RSN PMKID count (2 octests) */
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN	2
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH		RSN_CAP_PREAUTH
+
+/* Other WAI definition */
+#define WAPI_WAI_REQUEST		0x00F1
+#define WAPI_UNICAST_REKEY		0x00F2
+#define WAPI_STA_AGING			0x00F3
+#define WAPI_MUTIL_REKEY		0x00F4
+#define WAPI_STA_STATS			0x00F5
+
+#define WAPI_USK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#define WAPI_MSK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#endif /* BCMWAPI_WAI */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _proto_wpa_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 0000000000000000000000000000000000000000..597bc752c050dc19a6f4a945980e8377315b026c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,3628 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 474281 2014-04-30 18:24:55Z $
+ *
+ * $Copyright Open Broadcom Corporation$
+ */
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/**
+ * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
+ * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
+ * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead
+ * be assigned their respective chipc-specific address space and connected to the Always On
+ * Backplane via the APB interface.
+ */
+typedef volatile struct {
+	uint32  PAD[384];
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[20];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+} pmuregs_t;
+
+typedef struct eci_prerev35 {
+	uint32	eci_output;
+	uint32	eci_control;
+	uint32	eci_inputlo;
+	uint32	eci_inputmi;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolaritymi;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskmi;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventmi;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskmi;
+	uint32	eci_eventmaskhi;
+	uint32	PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+	uint32	eci_outputlo;
+	uint32	eci_outputhi;
+	uint32	eci_controllo;
+	uint32	eci_controlhi;
+	uint32	eci_inputlo;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskhi;
+	uint32	eci_auxtx;
+	uint32	eci_auxrx;
+	uint32	eci_datatag;
+	uint32	eci_uartescvalue;
+	uint32	eci_autobaudctr;
+	uint32	eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+	uint32	PAD[19];
+	/* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
+	uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+	uint32	chipid;			/* 0x0 */
+	uint32	capabilities;
+	uint32	corecontrol;		/* corerev >= 1 */
+	uint32	bist;
+
+	/* OTP */
+	uint32	otpstatus;		/* 0x10, corerev >= 10 */
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	otplayout;		/* corerev >= 23 */
+
+	/* Interrupt control */
+	uint32	intstatus;		/* 0x20 */
+	uint32	intmask;
+
+	/* Chip specific regs */
+	uint32	chipcontrol;		/* 0x28, rev >= 11 */
+	uint32	chipstatus;		/* 0x2c, rev >= 11 */
+
+	/* Jtag Master */
+	uint32	jtagcmd;		/* 0x30, rev >= 10 */
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	/* serial flash interface registers */
+	uint32	flashcontrol;		/* 0x40 */
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	otplayoutextension;	/* rev >= 35 */
+
+	/* Silicon backplane configuration broadcast control */
+	uint32	broadcastaddress;	/* 0x50 */
+	uint32	broadcastdata;
+
+	/* gpio - cleared only by power-on-reset */
+	uint32	gpiopullup;		/* 0x58, corerev >= 20 */
+	uint32	gpiopulldown;		/* 0x5c, corerev >= 20 */
+	uint32	gpioin;			/* 0x60 */
+	uint32	gpioout;		/* 0x64 */
+	uint32	gpioouten;		/* 0x68 */
+	uint32	gpiocontrol;		/* 0x6C */
+	uint32	gpiointpolarity;	/* 0x70 */
+	uint32	gpiointmask;		/* 0x74 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	/* Watchdog timer */
+	uint32	watchdog;		/* 0x80 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioeventintpolarity;
+
+	/* GPIO based LED powersave registers corerev >= 16 */
+	uint32  gpiotimerval;		/* 0x88 */
+	uint32  gpiotimeroutmask;
+
+	/* clock control */
+	uint32	clockcontrol_n;		/* 0x90 */
+	uint32	clockcontrol_sb;	/* aka m0 */
+	uint32	clockcontrol_pci;	/* aka m1 */
+	uint32	clockcontrol_m2;	/* mii/uart/mipsref */
+	uint32	clockcontrol_m3;	/* cpu */
+	uint32	clkdiv;			/* corerev >= 3 */
+	uint32	gpiodebugsel;		/* corerev >= 28 */
+	uint32	capabilities_ext;               	/* 0xac  */
+
+	/* pll delay registers (corerev >= 4) */
+	uint32	pll_on_delay;		/* 0xb0 */
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		/* 5 < corerev < 10 */
+	uint32	PAD;
+
+	/* Instaclock registers (corerev >= 10) */
+	uint32	system_clk_ctl;		/* 0xc0 */
+	uint32	clkstatestretch;
+	uint32	PAD[2];
+
+	/* Indirect backplane access (corerev >= 22) */
+	uint32	bp_addrlow;		/* 0xd0 */
+	uint32	bp_addrhigh;
+	uint32	bp_data;
+	uint32	PAD;
+	uint32	bp_indaccess;
+	/* SPI registers, corerev >= 37 */
+	uint32	gsioctrl;
+	uint32	gsioaddress;
+	uint32	gsiodata;
+
+	/* More clock dividers (corerev >= 32) */
+	uint32	clkdiv2;
+	/* FAB ID (corerev >= 40) */
+	uint32	otpcontrol1;
+	uint32	fabid;			/* 0xf8 */
+
+	/* In AI chips, pointer to erom */
+	uint32	eromptr;		/* 0xfc */
+
+	/* ExtBus control registers (corerev >= 3) */
+	uint32	pcmcia_config;		/* 0x100 */
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32  SECI_config;		/* 0x130 SECI configuration */
+	uint32	SECI_status;
+	uint32	SECI_statusmask;
+	uint32	SECI_rxnibchanged;
+
+	uint32	PAD[20];
+
+	/* SROM interface (corerev >= 32) */
+	uint32	sromcontrol;		/* 0x190 */
+	uint32	sromaddress;
+	uint32	sromdata;
+	uint32	PAD[1];				/* 0x19C */
+	/* NAND flash registers for BCM4706 (corerev = 31) */
+	uint32  nflashctrl;         /* 0x1a0 */
+	uint32  nflashconf;
+	uint32  nflashcoladdr;
+	uint32  nflashrowaddr;
+	uint32  nflashdata;
+	uint32  nflashwaitcnt0;		/* 0x1b4 */
+	uint32  PAD[2];
+
+	uint32  seci_uart_data;		/* 0x1C0 */
+	uint32  seci_uart_bauddiv;
+	uint32  seci_uart_fcr;
+	uint32  seci_uart_lcr;
+	uint32  seci_uart_mcr;
+	uint32  seci_uart_lsr;
+	uint32  seci_uart_msr;
+	uint32  seci_uart_baudadj;
+	/* Clock control and hardware workarounds (corerev >= 20) */
+	uint32	clk_ctl_st;		/* 0x1e0 */
+	uint32	hw_war;
+	uint32	PAD[70];
+
+	/* UARTs */
+	uint8	uart0data;		/* 0x300 */
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		/* corerev >= 1 */
+
+	uint8	uart1data;		/* 0x400 */
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;		/* 0x407 */
+	uint32	PAD[62];
+
+	/* save/restore, corerev >= 48 */
+	uint32	sr_capability;		/* 0x500 */
+	uint32	sr_control0;		/* 0x504 */
+	uint32	sr_control1;		/* 0x508 */
+	uint32  gpio_control;		/* 0x50C */
+	uint32	PAD[60];
+
+	/* PMU registers (corerev >= 20) */
+	/* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+	 * The CPU must read them twice, compare, and retry if different.
+	 */
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[20];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+	uint32	PAD[47];
+	uint16	sromotp[512];		/* 0x800 */
+#ifdef NFLASH_SUPPORT
+	/* Nand flash MLC controller registers (corerev >= 38) */
+	uint32	nand_revision;		/* 0xC00 */
+	uint32	nand_cmd_start;
+	uint32	nand_cmd_addr_x;
+	uint32	nand_cmd_addr;
+	uint32	nand_cmd_end_addr;
+	uint32	nand_cs_nand_select;
+	uint32	nand_cs_nand_xor;
+	uint32	PAD;
+	uint32	nand_spare_rd0;
+	uint32	nand_spare_rd4;
+	uint32	nand_spare_rd8;
+	uint32	nand_spare_rd12;
+	uint32	nand_spare_wr0;
+	uint32	nand_spare_wr4;
+	uint32	nand_spare_wr8;
+	uint32	nand_spare_wr12;
+	uint32	nand_acc_control;
+	uint32	PAD;
+	uint32	nand_config;
+	uint32	PAD;
+	uint32	nand_timing_1;
+	uint32	nand_timing_2;
+	uint32	nand_semaphore;
+	uint32	PAD;
+	uint32	nand_devid;
+	uint32	nand_devid_x;
+	uint32	nand_block_lock_status;
+	uint32	nand_intfc_status;
+	uint32	nand_ecc_corr_addr_x;
+	uint32	nand_ecc_corr_addr;
+	uint32	nand_ecc_unc_addr_x;
+	uint32	nand_ecc_unc_addr;
+	uint32	nand_read_error_count;
+	uint32	nand_corr_stat_threshold;
+	uint32	PAD[2];
+	uint32	nand_read_addr_x;
+	uint32	nand_read_addr;
+	uint32	nand_page_program_addr_x;
+	uint32	nand_page_program_addr;
+	uint32	nand_copy_back_addr_x;
+	uint32	nand_copy_back_addr;
+	uint32	nand_block_erase_addr_x;
+	uint32	nand_block_erase_addr;
+	uint32	nand_inv_read_addr_x;
+	uint32	nand_inv_read_addr;
+	uint32	PAD[2];
+	uint32	nand_blk_wr_protect;
+	uint32	PAD[3];
+	uint32	nand_acc_control_cs1;
+	uint32	nand_config_cs1;
+	uint32	nand_timing_1_cs1;
+	uint32	nand_timing_2_cs1;
+	uint32	PAD[20];
+	uint32	nand_spare_rd16;
+	uint32	nand_spare_rd20;
+	uint32	nand_spare_rd24;
+	uint32	nand_spare_rd28;
+	uint32	nand_cache_addr;
+	uint32	nand_cache_data;
+	uint32	nand_ctrl_config;
+	uint32	nand_ctrl_status;
+#endif /* NFLASH_SUPPORT */
+	uint32  gci_corecaps0; /* GCI starting at 0xC00 */
+	uint32  gci_corecaps1;
+	uint32  gci_corecaps2;
+	uint32  gci_corectrl;
+	uint32  gci_corestat; /* 0xC10 */
+	uint32  gci_intstat; /* 0xC14 */
+	uint32  gci_intmask; /* 0xC18 */
+	uint32  gci_wakemask; /* 0xC1C */
+	uint32  gci_levelintstat; /* 0xC20 */
+	uint32  gci_eventintstat; /* 0xC24 */
+	uint32  PAD[6];
+	uint32  gci_indirect_addr; /* 0xC40 */
+	uint32  gci_gpioctl; /* 0xC44 */
+	uint32	gci_gpiostatus;
+	uint32  gci_gpiomask; /* 0xC4C */
+	uint32  PAD;
+	uint32  gci_miscctl; /* 0xC54 */
+	uint32	gci_gpiointmask;
+	uint32	gci_gpiowakemask;
+	uint32  gci_input[32]; /* C60 */
+	uint32  gci_event[32]; /* CE0 */
+	uint32  gci_output[4]; /* D60 */
+	uint32  gci_control_0; /* 0xD70 */
+	uint32  gci_control_1; /* 0xD74 */
+	uint32  gci_intpolreg; /* 0xD78 */
+	uint32  gci_levelintmask; /* 0xD7C */
+	uint32  gci_eventintmask; /* 0xD80 */
+	uint32  PAD[3];
+	uint32  gci_inbandlevelintmask; /* 0xD90 */
+	uint32  gci_inbandeventintmask; /* 0xD94 */
+	uint32  PAD[2];
+	uint32  gci_seciauxtx; /* 0xDA0 */
+	uint32  gci_seciauxrx; /* 0xDA4 */
+	uint32  gci_secitx_datatag; /* 0xDA8 */
+	uint32  gci_secirx_datatag; /* 0xDAC */
+	uint32  gci_secitx_datamask; /* 0xDB0 */
+	uint32  gci_seciusef0tx_reg; /* 0xDB4 */
+	uint32  gci_secif0tx_offset; /* 0xDB8 */
+	uint32  gci_secif0rx_offset; /* 0xDBC */
+	uint32  gci_secif1tx_offset; /* 0xDC0 */
+	uint32	gci_rxfifo_common_ctrl; /* 0xDC4 */
+	uint32	gci_rxfifoctrl; /* 0xDC8 */
+	uint32	gci_uartreadid; /* DCC */
+	uint32  gci_uartescval; /* DD0 */
+	uint32	PAD;
+	uint32	gci_secififolevel; /* DD8 */
+	uint32	gci_seciuartdata; /* DDC */
+	uint32  gci_secibauddiv; /* DE0 */
+	uint32  gci_secifcr; /* DE4 */
+	uint32  gci_secilcr; /* DE8 */
+	uint32  gci_secimcr; /* DEC */
+	uint32	gci_secilsr; /* DF0 */
+	uint32	gci_secimsr; /* DF4 */
+	uint32  gci_baudadj; /* DF8 */
+	uint32  PAD;
+	uint32  gci_chipctrl; /* 0xE00 */
+	uint32  gci_chipsts; /* 0xE04 */
+	uint32	gci_gpioout; /* 0xE08 */
+	uint32	gci_gpioout_read; /* 0xE0C */
+	uint32	gci_mpwaketx; /* 0xE10 */
+	uint32	gci_mpwakedetect; /* 0xE14 */
+	uint32	gci_seciin_ctrl; /* 0xE18 */
+	uint32	gci_seciout_ctrl; /* 0xE1C */
+	uint32	gci_seciin_auxfifo_en; /* 0xE20 */
+	uint32	gci_seciout_txen_txbr; /* 0xE24 */
+	uint32	gci_seciin_rxbrstatus; /* 0xE28 */
+	uint32	gci_seciin_rxerrstatus; /* 0xE2C */
+	uint32	gci_seciin_fcstatus; /* 0xE30 */
+	uint32	gci_seciout_txstatus; /* 0xE34 */
+	uint32	gci_seciout_txbrstatus; /* 0xE38 */
+} chipcregs_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define	CC_CHIPST		0x2c
+#define	CC_EROMPTR		0xfc
+
+#define	CC_OTPST		0x10
+#define	CC_INTSTATUS		0x20
+#define	CC_INTMASK		0x24
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_GPIOPU		0x58
+#define	CC_GPIOPD		0x5c
+#define	CC_GPIOIN		0x60
+#define	CC_GPIOOUT		0x64
+#define	CC_GPIOOUTEN		0x68
+#define	CC_GPIOCTRL		0x6c
+#define	CC_GPIOPOL		0x70
+#define	CC_GPIOINTM		0x74
+#define	CC_GPIOEVENT		0x78
+#define	CC_GPIOEVENTMASK	0x7c
+#define	CC_WATCHDOG		0x80
+#define	CC_GPIOEVENTPOL		0x84
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_RES_PENDING		0x610
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define CC_CHIPCTL_ADDR         0x650
+#define CC_CHIPCTL_DATA         0x654
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR 	0x660
+#define PMU_PLL_CONTROL_DATA 	0x664
+#define CC_SROM_CTRL		0x190
+#define	CC_SROM_OTP		0x800		/* SROM/OTP address space */
+#define CC_GCI_INDIRECT_ADDR_REG	0xC40
+#define CC_GCI_CHIP_CTRL_REG	0xE00
+#define CC_GCI_CC_OFFSET_2	2
+#define CC_GCI_CC_OFFSET_5	5
+#define CC_SWD_CTRL		0x380
+#define CC_SWD_REQACK		0x384
+#define CC_SWD_DATA		0x388
+
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL					(1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL 			(1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 		0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT		17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 		0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT		16
+
+#ifdef SR_DEBUG
+#define SUBCORE_POWER_ON 0x0001
+#define PHY_POWER_ON 0x0010
+#define VDDM_POWER_ON 0x0100
+#define MEMLPLDO_POWER_ON 0x1000
+#define SUBCORE_POWER_ON_CHK 0x00040000
+#define PHY_POWER_ON_CHK 0x00080000
+#define VDDM_POWER_ON_CHK 0x00100000
+#define MEMLPLDO_POWER_ON_CHK 0x00200000
+#endif /* SR_DEBUG */
+
+#ifdef NFLASH_SUPPORT
+/* NAND flash support */
+#define CC_NAND_REVISION	0xC00
+#define CC_NAND_CMD_START	0xC04
+#define CC_NAND_CMD_ADDR	0xC0C
+#define CC_NAND_SPARE_RD_0	0xC20
+#define CC_NAND_SPARE_RD_4	0xC24
+#define CC_NAND_SPARE_RD_8	0xC28
+#define CC_NAND_SPARE_RD_C	0xC2C
+#define CC_NAND_CONFIG		0xC48
+#define CC_NAND_DEVID		0xC60
+#define CC_NAND_DEVID_EXT	0xC64
+#define CC_NAND_INTFC_STATUS	0xC6C
+#endif /* NFLASH_SUPPORT */
+
+/* chipid */
+#define	CID_ID_MASK		0x0000ffff	/* Chip Id mask */
+#define	CID_REV_MASK		0x000f0000	/* Chip Revision mask */
+#define	CID_REV_SHIFT		16		/* Chip Revision shift */
+#define	CID_PKG_MASK		0x00f00000	/* Package Option mask */
+#define	CID_PKG_SHIFT		20		/* Package Option shift */
+#define	CID_CC_MASK		0x0f000000	/* CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	/* Chip Type */
+#define CID_TYPE_SHIFT		28
+
+/* capabilities */
+#define	CC_CAP_UARTS_MASK	0x00000003	/* Number of UARTs */
+#define CC_CAP_MIPSEB		0x00000004	/* MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL		0x00000018	/* UARTs clock select */
+#define CC_CAP_UINTCLK		0x00000008	/* UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO		0x00000020	/* UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK	0x000000c0	/* External bus mask */
+#define CC_CAP_EXTBUS_NONE	0x00000000	/* No ExtBus present */
+#define CC_CAP_EXTBUS_FULL	0x00000040	/* ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG	0x00000080	/* ExtBus: ProgIf only */
+#define	CC_CAP_FLASH_MASK	0x00000700	/* Type of flash */
+#define	CC_CAP_PLL_MASK		0x00038000	/* Type of PLL */
+#define CC_CAP_PWR_CTL		0x00040000	/* Power control */
+#define CC_CAP_OTPSIZE		0x00380000	/* OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT	19		/* OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE	5		/* OTP Size base */
+#define CC_CAP_JTAGP		0x00400000	/* JTAG Master Present */
+#define CC_CAP_ROM		0x00800000	/* Internal boot rom active */
+#define CC_CAP_BKPLN64		0x08000000	/* 64-bit backplane */
+#define	CC_CAP_PMU		0x10000000	/* PMU Present, rev >= 20 */
+#define	CC_CAP_ECI		0x20000000	/* ECI Present, rev >= 21 */
+#define	CC_CAP_SROM		0x40000000	/* Srom Present, rev >= 32 */
+#define	CC_CAP_NFLASH		0x80000000	/* Nand flash present, rev >= 35 */
+
+#define	CC_CAP2_SECI		0x00000001	/* SECI Present, rev >= 36 */
+#define	CC_CAP2_GSIO		0x00000002	/* GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT	0x00000001    /* SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT	0x00000002    /* GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT  0x00000004    /* GCI present */
+#define CC_CAP_EXT_AOB_PRESENT  0x00000040    /* AOB present */
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
+#define GCI_WL_CHN_INFO_MASK 	(0xFF00)
+/* PLL type */
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	/* 48MHz base, 3 dividers */
+#define PLL_TYPE2		0x00020000	/* 48MHz, 4 dividers */
+#define PLL_TYPE3		0x00030000	/* 25MHz, 2 dividers */
+#define PLL_TYPE4		0x00008000	/* 48MHz, 4 dividers */
+#define PLL_TYPE5		0x00018000	/* 25MHz, 4 dividers */
+#define PLL_TYPE6		0x00028000	/* 100/200 or 120/240 only */
+#define PLL_TYPE7		0x00038000	/* 25MHz, 4 dividers */
+
+/* ILP clock */
+#define	ILP_CLOCK		32000
+
+/* ALP clock on pre-PMU chips */
+#define	ALP_CLOCK		20000000
+
+#ifdef CFG_SIM
+#define NS_ALP_CLOCK		84922
+#define NS_SLOW_ALP_CLOCK	84922
+#define NS_CPU_CLOCK		534500
+#define NS_SLOW_CPU_CLOCK	534500
+#define NS_SI_CLOCK		271750
+#define NS_SLOW_SI_CLOCK	271750
+#define NS_FAST_MEM_CLOCK	271750
+#define NS_MEM_CLOCK		271750
+#define NS_SLOW_MEM_CLOCK	271750
+#else
+#define NS_ALP_CLOCK		125000000
+#define NS_SLOW_ALP_CLOCK	100000000
+#define NS_CPU_CLOCK		1000000000
+#define NS_SLOW_CPU_CLOCK	800000000
+#define NS_SI_CLOCK		250000000
+#define NS_SLOW_SI_CLOCK	200000000
+#define NS_FAST_MEM_CLOCK	800000000
+#define NS_MEM_CLOCK		533000000
+#define NS_SLOW_MEM_CLOCK	400000000
+#endif /* CFG_SIM */
+
+/* HT clock */
+#define	HT_CLOCK		80000000
+
+/* corecontrol */
+#define CC_UARTCLKO		0x00000001	/* Drive UART with internal clock */
+#define	CC_SE			0x00000002	/* sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO	0x00000004	/* 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN		0x00000008	/* enable UART Clock (corerev > = 21 */
+
+/* 4321 chipcontrol */
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	/* serdes PLL down override */
+
+/* Fields in the otpstatus register in rev >= 21 */
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	/* manuf row is locked */
+#define OTPS_OL_OR1		0x00000002	/* otp redundancy row 1 is locked */
+#define OTPS_OL_OR2		0x00000004	/* otp redundancy row 2 is locked */
+#define OTPS_OL_GU		0x00000008	/* general use region is locked */
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	/* h/w subregion is programmed */
+#define OTPS_GUP_SW		0x00000200	/* s/w subregion is programmed */
+#define OTPS_GUP_CI		0x00000400	/* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE		0x00000800	/* fuse subregion is programmed */
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	/* redundancy entry valid */
+#define OTPS_RV_MASK		0x0fff0000
+#define OTPS_PROGOK     0x40000000
+
+/* Fields in the otpcontrol register in rev >= 21 */
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
+#define OTPC_40NM_PROGSEL_SHIFT	0
+#define OTPC_40NM_PCOUNT_SHIFT	1
+#define OTPC_40NM_PCOUNT_WR	0xA
+#define OTPC_40NM_PCOUNT_V1X	0xB
+#define OTPC_40NM_REGCSEL_SHIFT	5
+#define OTPC_40NM_REGCSEL_DEF	0x4
+#define OTPC_40NM_PROGIN_SHIFT	8
+#define OTPC_40NM_R2X_SHIFT	10
+#define OTPC_40NM_ODM_SHIFT	11
+#define OTPC_40NM_DF_SHIFT	15
+#define OTPC_40NM_VSEL_SHIFT	16
+#define OTPC_40NM_VSEL_WR	0xA
+#define OTPC_40NM_VSEL_V1X	0xA
+#define OTPC_40NM_VSEL_R1X	0x5
+#define OTPC_40NM_COFAIL_SHIFT	30
+
+#define OTPC1_CPCSEL_SHIFT	0
+#define OTPC1_CPCSEL_DEF	6
+#define OTPC1_TM_SHIFT		8
+#define OTPC1_TM_WR		0x84
+#define OTPC1_TM_V1X		0x84
+#define OTPC1_TM_R1X		0x4
+#define OTPC1_CLK_EN_MASK	0x00020000
+#define OTPC1_CLK_DIV_MASK	0x00FC0000
+
+/* Fields in otpprog in rev >= 21 and HND OTP */
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_MASK9		0x0001ff00		/* for ccrev >= 49 */
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+#define	OTPP_READ		0x40000000	/* HND OTP */
+
+/* Fields in otplayout register */
+#define OTPL_HWRGN_OFF_MASK	0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT	0
+#define OTPL_WRAP_REVID_MASK	0x00F80000
+#define OTPL_WRAP_REVID_SHIFT	19
+#define OTPL_WRAP_TYPE_MASK	0x00070000
+#define OTPL_WRAP_TYPE_SHIFT	16
+#define OTPL_WRAP_TYPE_65NM	0
+#define OTPL_WRAP_TYPE_40NM	1
+#define OTPL_ROW_SIZE_MASK	0x0000F000
+#define OTPL_ROW_SIZE_SHIFT	12
+
+/* otplayout reg corerev >= 36 */
+#define OTP_CISFORMAT_NEW	0x80000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+/* Opcodes for OTPP_OC field (40NM) */
+#define OTPPOC_READ_40NM	0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM	2
+#define OTPPOC_VERIFY_40NM	3
+#define OTPPOC_WORD_VERIFY_1_40NM	4
+#define OTPPOC_ROW_LOCK_40NM	5
+#define OTPPOC_STBY_40NM	6
+#define OTPPOC_WAKEUP_40NM	7
+#define OTPPOC_WORD_VERIFY_0_40NM	8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM	10
+#define OTPPOC_WORDPROG_40NM	11
+#define OTPPOC_BURNIN_40NM	12
+#define OTPPOC_AUTORELOAD_40NM	13
+#define OTPPOC_OVST_READ_40NM	14
+#define OTPPOC_OVST_PROG_40NM	15
+
+/* Fields in otplayoutextension */
+#define OTPLAYOUTEXT_FUSE_MASK	0x3FF
+
+
+/* Jtagm characteristics that appeared at a given corerev */
+#define	JTAGM_CREV_OLD		10	/* Old command set, 16bit max IR */
+#define	JTAGM_CREV_IRP		22	/* Able to do pause-ir */
+#define	JTAGM_CREV_RTI		28	/* Able to do return-to-idle */
+
+/* jtagcmd */
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	/* Test-logic-reset */
+#define JCMD_STATE_PIR		0x20000000	/* Pause IR */
+#define JCMD_STATE_PDR		0x40000000	/* Pause DR */
+#define JCMD_STATE_RTI		0x60000000	/* Run-test-idle */
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	/* Changes for corerev 11 */
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	/* rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I		0x00080000	/* rev 28: return to run-test-idle */
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+/* jtagctrl */
+#define JCTRL_FORCE_CLK		4		/* Force clock */
+#define JCTRL_EXT_EN		2		/* Enable external targets */
+#define JCTRL_EN		1		/* Enable Jtag master */
+
+#define JCTRL_TAPSEL_BIT	0x00000008	/* JtagMasterCtrl tap_sel bit */
+
+/* Fields in clkdiv */
+#define	CLKD_SFLASH		0x0f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+#define	CLKD2_SROM		0x00000003
+
+/* intstatus/intmask */
+#define	CI_GPIO			0x00000001	/* gpio intr */
+#define	CI_EI			0x00000002	/* extif intr (corerev >= 3) */
+#define	CI_TEMP			0x00000004	/* temp. ctrl intr (corerev >= 15) */
+#define	CI_SIRQ			0x00000008	/* serial IRQ intr (corerev >= 15) */
+#define	CI_ECI			0x00000010	/* eci intr (corerev >= 21) */
+#define	CI_PMU			0x00000020	/* pmu intr (corerev >= 21) */
+#define	CI_UART			0x00000040	/* uart intr (corerev >= 21) */
+#define	CI_WDRESET		0x80000000	/* watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK		0x00000007	/* slow clock source mask */
+#define	SCC_SS_LPO		0x00000000	/* source of slow clock is LPO */
+#define	SCC_SS_XTAL		0x00000001	/* source of slow clock is crystal */
+#define	SCC_SS_PCI		0x00000002	/* source of slow clock is PCI */
+#define SCC_LF			0x00000200	/* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP			0x00000400	/* LPOPowerDown, 1: LPO is disabled,
+						 * 0: LPO is enabled
+						 */
+#define SCC_FS			0x00000800	/* ForceSlowClk, 1: sb/cores running on slow clock,
+						 * 0: power logic control
+						 */
+#define SCC_IP			0x00001000	/* IgnorePllOffReq, 1/0: power logic ignores/honors
+						 * PLL clock disable requests from core
+						 */
+#define SCC_XC			0x00002000	/* XtalControlEn, 1/0: power logic does/doesn't
+						 * disable crystal when appropriate
+						 */
+#define SCC_XP			0x00004000	/* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK		0xffff0000	/* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT		16
+
+/* system_clk_ctl */
+#define	SYCC_IE			0x00000001	/* ILPen: Enable Idle Low Power */
+#define	SYCC_AE			0x00000002	/* ALPen: Enable Active Low Power */
+#define	SYCC_FP			0x00000004	/* ForcePLLOn */
+#define	SYCC_AR			0x00000008	/* Force ALP (or HT if ALPen is not set */
+#define	SYCC_HR			0x00000010	/* Force HT */
+#define SYCC_CD_MASK		0xffff0000	/* ClkDiv  (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT		16
+
+/* Indirect backplane access */
+#define	BPIA_BYTEEN		0x0000000f
+#define	BPIA_SZ1		0x00000001
+#define	BPIA_SZ2		0x00000003
+#define	BPIA_SZ4		0x00000007
+#define	BPIA_SZ8		0x0000000f
+#define	BPIA_WRITE		0x00000100
+#define	BPIA_START		0x00000200
+#define	BPIA_BUSY		0x00000200
+#define	BPIA_ERROR		0x00000400
+
+/* pcmcia/prog/flash_config */
+#define	CF_EN			0x00000001	/* enable */
+#define	CF_EM_MASK		0x0000000e	/* mode */
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		/* flash/asynchronous mode */
+#define	CF_EM_SYNC		2		/* synchronous mode */
+#define	CF_EM_PCMCIA		4		/* pcmcia mode */
+#define	CF_DS			0x00000010	/* destsize:  0=8bit, 1=16bit */
+#define	CF_BS			0x00000020	/* byteswap */
+#define	CF_CD_MASK		0x000000c0	/* clock divider */
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	/* backplane/2 */
+#define	CF_CD_DIV3		0x00000040	/* backplane/3 */
+#define	CF_CD_DIV4		0x00000080	/* backplane/4 */
+#define	CF_CE			0x00000100	/* clock enable */
+#define	CF_SB			0x00000200	/* size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define	PM_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PM_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PM_W3_SHIFT		24
+
+/* pcmcia_attrwait */
+#define	PA_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PA_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PA_W3_SHIFT		24
+
+/* pcmcia_iowait */
+#define	PI_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PI_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PI_W3_SHIFT		24
+
+/* prog_waitcount */
+#define	PW_W0_MASK		0x0000001f	/* waitcount0 */
+#define	PW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+/* flash_waitcount */
+#define	FW_W0_MASK		0x0000003f	/* waitcount0 */
+#define	FW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	FW_W3_SHIFT		24
+
+/* When Srom support present, fields in sromcontrol */
+#define	SRC_START		0x80000000
+#define	SRC_BUSY		0x80000000
+#define	SRC_OPCODE		0x60000000
+#define	SRC_OP_READ		0x00000000
+#define	SRC_OP_WRITE		0x20000000
+#define	SRC_OP_WRDIS		0x40000000
+#define	SRC_OP_WREN		0x60000000
+#define	SRC_OTPSEL		0x00000010
+#define SRC_OTPPRESENT		0x00000020
+#define	SRC_LOCK		0x00000008
+#define	SRC_SIZE_MASK		0x00000006
+#define	SRC_SIZE_1K		0x00000000
+#define	SRC_SIZE_4K		0x00000002
+#define	SRC_SIZE_16K		0x00000004
+#define	SRC_SIZE_SHIFT		1
+#define	SRC_PRESENT		0x00000001
+
+/* Fields in pmucontrol */
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_LQ_REQ_EN		0x00008000
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	/* rev 2 */
+#define PCTL_NOILP_ON_WAIT	0x00000200	/* rev 1 */
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+/*  Retention Control */
+#define PMU_RCTL_CLK_DIV_SHIFT		0
+#define PMU_RCTL_CHAIN_LEN_SHIFT	12
+#define PMU_RCTL_MACPHY_DISABLE_SHIFT	26
+#define PMU_RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define PMU_RCTL_LOGIC_DISABLE_SHIFT	27
+#define PMU_RCTL_LOGIC_DISABLE_MASK	(1 << 27)
+#define PMU_RCTL_MEMSLP_LOG_SHIFT	28
+#define PMU_RCTL_MEMSLP_LOG_MASK	(1 << 28)
+#define PMU_RCTL_MEMRETSLP_LOG_SHIFT	29
+#define PMU_RCTL_MEMRETSLP_LOG_MASK	(1 << 29)
+
+/*  Retention Group Control */
+#define PMU_RCTLGRP_CHAIN_LEN_SHIFT	0
+#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT	14
+#define PMU_RCTLGRP_RMODE_ENABLE_MASK	(1 << 14)
+#define PMU_RCTLGRP_DFT_ENABLE_SHIFT	15
+#define PMU_RCTLGRP_DFT_ENABLE_MASK	(1 << 15)
+#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT	16
+#define PMU_RCTLGRP_NSRST_DISABLE_MASK	(1 << 16)
+/*  Retention Group Control special for 4334 */
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0	338
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1	315
+/*  Retention Group Control special for 43341 */
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0	366
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1	330
+
+/* Fields in clkstretch */
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT	16
+
+/* clockcontrol_n */
+#define	CN_N1_MASK		0x3f		/* n1 control */
+#define	CN_N2_MASK		0x3f00		/* n2 control */
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		/* pll control */
+#define	CN_PLLC_SHIFT		16
+
+/* clockcontrol_sb/pci/uart */
+#define	CC_M1_MASK		0x3f		/* m1 control */
+#define	CC_M2_MASK		0x3f00		/* m2 control */
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	/* m3 control */
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	/* mux control */
+#define	CC_MC_SHIFT		24
+
+/* N3M Clock control magic field values */
+#define	CC_F6_2			0x02		/* A factor of 2 in */
+#define	CC_F6_3			0x03		/* 6-bit fields like */
+#define	CC_F6_4			0x05		/* N1, M1 or M3 */
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		/* 5-bit fields get this added */
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+/* Type 2 Clock control magic field values */
+#define	CC_T2_BIAS		2		/* n1, n2, m1 & m3 bias */
+#define	CC_T2M2_BIAS		3		/* m2 bias */
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+/* Type 6 Clock control magic field values */
+#define	CC_T6_MMASK		1		/* bits of interest in m */
+#define	CC_T6_M0		120000000	/* sb clock for m = 0 */
+#define	CC_T6_M1		100000000	/* sb clock for m = 1 */
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+/* Common clock base */
+#define	CC_CLOCK_BASE1		24000000	/* Half the clock freq */
+#define CC_CLOCK_BASE2		12500000	/* Alternate crystal on some PLLs */
+
+/* Clock control values for 200MHz in 5350 */
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE		0x000		/* No flash */
+#define SFLASH_ST		0x100		/* ST serial flash */
+#define SFLASH_AT		0x200		/* Atmel serial flash */
+#define NFLASH			0x300
+#define	PFLASH			0x700		/* Parallel flash */
+#define QSPIFLASH_ST		0x800
+#define QSPIFLASH_AT		0x900
+
+/* Bits in the ExtBus config registers */
+#define	CC_CFG_EN		0x0001		/* Enable */
+#define	CC_CFG_EM_MASK		0x000e		/* Extif Mode */
+#define	CC_CFG_EM_ASYNC		0x0000		/*   Async/Parallel flash */
+#define	CC_CFG_EM_SYNC		0x0002		/*   Synchronous */
+#define	CC_CFG_EM_PCMCIA	0x0004		/*   PCMCIA */
+#define	CC_CFG_EM_IDE		0x0006		/*   IDE */
+#define	CC_CFG_DS		0x0010		/* Data size, 0=8bit, 1=16bit */
+#define	CC_CFG_CD_MASK		0x00e0		/* Sync: Clock divisor, rev >= 20 */
+#define	CC_CFG_CE		0x0100		/* Sync: Clock enable, rev >= 20 */
+#define	CC_CFG_SB		0x0200		/* Sync: Size/Bytestrobe, rev >= 20 */
+#define	CC_CFG_IS		0x0400		/* Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define	CC_EB_BASE		0x1a000000	/* Chipc ExtBus base address */
+#define	CC_EB_PCMCIA_MEM	0x1a000000	/* PCMCIA 0 memory base address */
+#define	CC_EB_PCMCIA_IO		0x1a200000	/* PCMCIA 0 I/O base address */
+#define	CC_EB_PCMCIA_CFG	0x1a400000	/* PCMCIA 0 config base address */
+#define	CC_EB_IDE		0x1a800000	/* IDE memory base */
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	/* PCMCIA 1 memory base address */
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	/* PCMCIA 1 I/O base address */
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	/* PCMCIA 1 config base address */
+#define	CC_EB_PROGIF		0x1b000000	/* ProgIF Async/Sync base address */
+
+
+/* Start/busy bit in flashcontrol */
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	/* Chip Select Active, rev >= 20 */
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+/* flashcontrol action codes */
+#define	SFLASH_ACT_OPONLY	0x0000		/* Issue opcode only */
+#define	SFLASH_ACT_OP1D		0x0100		/* opcode + 1 data byte */
+#define	SFLASH_ACT_OP3A		0x0200		/* opcode + 3 addr bytes */
+#define	SFLASH_ACT_OP3A1D	0x0300		/* opcode + 3 addr & 1 data bytes */
+#define	SFLASH_ACT_OP3A4D	0x0400		/* opcode + 3 addr & 4 data bytes */
+#define	SFLASH_ACT_OP3A4X4D	0x0500		/* opcode + 3 addr, 4 don't care & 4 data bytes */
+#define	SFLASH_ACT_OP3A1X4D	0x0700		/* opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN		0x0006		/* Write Enable */
+#define SFLASH_ST_WRDIS		0x0004		/* Write Disable */
+#define SFLASH_ST_RDSR		0x0105		/* Read Status Register */
+#define SFLASH_ST_WRSR		0x0101		/* Write Status Register */
+#define SFLASH_ST_READ		0x0303		/* Read Data Bytes */
+#define SFLASH_ST_PP		0x0302		/* Page Program */
+#define SFLASH_ST_SE		0x02d8		/* Sector Erase */
+#define SFLASH_ST_BE		0x00c7		/* Bulk Erase */
+#define SFLASH_ST_DP		0x00b9		/* Deep Power-down */
+#define SFLASH_ST_RES		0x03ab		/* Read Electronic Signature */
+#define SFLASH_ST_CSA		0x1000		/* Keep chip select asserted */
+#define SFLASH_ST_SSE		0x0220		/* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID	0x0390		/* Read Manufacture ID */
+#define SFLASH_MXIC_MFID	0xc2		/* MXIC Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP		0x01		/* Write In Progress */
+#define SFLASH_ST_WEL		0x02		/* Write Enable Latch */
+#define SFLASH_ST_BP_MASK	0x1c		/* Block Protect */
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		/* Status Register Write Disable */
+
+/* flashcontrol action+opcodes for Atmel flashes */
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+/* Status register bits for Atmel flashes */
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+/* SPI register bits, corerev >= 37 */
+#define GSIO_START			0x80000000
+#define GSIO_BUSY			GSIO_START
+
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register.  These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#define UART_RX		0	/* In:  Receive buffer (DLAB=0) */
+#define UART_TX		0	/* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL	0	/* Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER	1	/* In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM	1	/* Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR	2	/* In: Interrupt Identity Register  */
+#define UART_FCR	2	/* Out: FIFO Control Register */
+#define UART_LCR	3	/* Out: Line Control Register */
+#define UART_MCR	4	/* Out: Modem Control Register */
+#define UART_LSR	5	/* In:  Line Status Register */
+#define UART_MSR	6	/* In:  Modem Status Register */
+#define UART_SCR	7	/* I/O: Scratch Register */
+#define UART_LCR_DLAB	0x80	/* Divisor latch access bit */
+#define UART_LCR_WLEN8	0x03	/* Word length: 8 bits */
+#define UART_MCR_OUT2	0x08	/* MCR GPIO out 2 */
+#define UART_MCR_LOOP	0x10	/* Enable loopback test mode */
+#define UART_LSR_RX_FIFO 	0x80	/* Receive FIFO error */
+#define UART_LSR_TDHR		0x40	/* Data-hold-register empty */
+#define UART_LSR_THRE		0x20	/* Transmit-hold-register empty */
+#define UART_LSR_BREAK		0x10	/* Break interrupt */
+#define UART_LSR_FRAMING	0x08	/* Framing error */
+#define UART_LSR_PARITY		0x04	/* Parity error */
+#define UART_LSR_OVERRUN	0x02	/* Overrun error */
+#define UART_LSR_RXRDY		0x01	/* Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1	/* FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK	0xc0	/* IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK	0xf	/* IIR interrupt ID source */
+#define UART_IIR_MDM_CHG	0x0	/* Modem status changed */
+#define UART_IIR_NOINT		0x1	/* No interrupt pending */
+#define UART_IIR_THRE		0x2	/* THR empty */
+#define UART_IIR_RCVD_DATA	0x4	/* Received data available */
+#define UART_IIR_RCVR_STATUS 	0x6	/* Receiver status */
+#define UART_IIR_CHAR_TIME 	0xc	/* Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME	128	/* Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI	8	/* enable modem status interrupt */
+#define UART_IER_ELSI	4	/* enable receiver line status interrupt */
+#define UART_IER_ETBEI  2	/* enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI	1	/* enable data available interrupt */
+
+/* pmustatus */
+#define PST_SLOW_WR_PENDING 0x0400
+#define PST_EXTLPOAVAIL	0x0100
+#define PST_WDRESET	0x0080
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+
+/* pmucapabilities */
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	/* PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+#define PRRT_HQ_REQ 0x4000
+
+/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
+#define RSRC_INTR_MASK_TIMER_INT_0 1
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit)	(1 << (bit))
+
+/* PMU resource number limit */
+#define PMURES_MAX_RESNUM	30
+
+/* PMU chip control0 register */
+#define	PMU_CHIPCTL0		0
+#define PMU43143_CC0_SDIO_DRSTR_OVR	(1 << 31) /* sdio drive strength override enable */
+
+/* clock req types */
+#define PMU_CC1_CLKREQ_TYPE_SHIFT	19
+#define PMU_CC1_CLKREQ_TYPE_MASK	(1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN		0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL		1
+
+/* PMU chip control1 register */
+#define	PMU_CHIPCTL1			1
+#define	PMU_CC1_RXC_DLL_BYPASS		0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN	0x00000010
+
+#define PMU_CC1_IF_TYPE_MASK   		0x00000030
+#define PMU_CC1_IF_TYPE_RMII    	0x00000000
+#define PMU_CC1_IF_TYPE_MII     	0x00000010
+#define PMU_CC1_IF_TYPE_RGMII   	0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK    	0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY    	0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 	0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII	0x00000080
+#define PMU_CC1_SW_TYPE_RGMII   	0x000000c0
+
+/* PMU chip control2 register */
+#define	PMU_CHIPCTL2		2
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON   	(1 << 18)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON   	(1 << 19)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON   	(1 << 20)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON   	(1 << 21)
+
+/* PMU chip control3 register */
+#define	PMU_CHIPCTL3		3
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT  19
+#define PMU_CC3_ENABLE_RF_SHIFT           22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT   23
+
+/* PMU chip control5 register */
+#define PMU_CHIPCTL5                    5
+
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6                    6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP    (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP   (1 << 6)
+
+/* PMU chip control7 register */
+#define PMU_CHIPCTL7				7
+#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN	(1 << 25)
+#define PMU_CC7_ENABLE_MDIO_RESET_WAR		(1 << 27)
+
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+/* PC0_DIV_ARM for PLLOUT_ARM */
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 /* Default */
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0		0
+#define PMU1_PLL0_PC0_P1DIV_MASK	0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT	20
+#define PMU1_PLL0_PC0_P2DIV_MASK	0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU1_PLL0_PLLCTL1		1
+#define PMU1_PLL0_PC1_M1DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT	0
+#define PMU1_PLL0_PC1_M2DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT	8
+#define PMU1_PLL0_PC1_M3DIV_MASK	0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT	16
+#define PMU1_PLL0_PC1_M4DIV_MASK	0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT	24
+#define PMU1_PLL0_PC1_M4DIV_BY_9	9
+#define PMU1_PLL0_PC1_M4DIV_BY_18	0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36	0x24
+#define PMU1_PLL0_PC1_M4DIV_BY_60	0x3C
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU1_PLL0_PLLCTL2		2
+#define PMU1_PLL0_PC2_M5DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT	0
+#define PMU1_PLL0_PC2_M5DIV_BY_12	0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT	8
+#define PMU1_PLL0_PC2_M6DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT	17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH	1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB	2	/* recommended for 4319 */
+#define PMU1_PLL0_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU1_PLL0_PLLCTL3		3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU1_PLL0_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU1_PLL0_PLLCTL5		5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+#define PMU1_PLL0_PLLCTL6		6
+#define PMU1_PLL0_PLLCTL7		7
+
+#define PMU1_PLL0_PLLCTL8		8
+#define PMU1_PLLCTL8_OPENLOOP_MASK	0x2
+
+/* PMU rev 2 control words */
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU2_PLL_PLLCTL0		0
+#define PMU2_PLL_PC0_P1DIV_MASK 	0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT	20
+#define PMU2_PLL_PC0_P2DIV_MASK 	0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU2_PLL_PLLCTL1		1
+#define PMU2_PLL_PC1_M1DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT	0
+#define PMU2_PLL_PC1_M2DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT	8
+#define PMU2_PLL_PC1_M3DIV_MASK 	0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT	16
+#define PMU2_PLL_PC1_M4DIV_MASK 	0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT	24
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU2_PLL_PLLCTL2		2
+#define PMU2_PLL_PC2_M5DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT	0
+#define PMU2_PLL_PC2_M6DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT	8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT	17
+#define PMU2_PLL_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU2_PLL_PLLCTL3		3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU2_PLL_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU2_PLL_PLLCTL5		5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK	0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT	8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK	0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT	12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK	0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT	16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK	0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT	20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK	0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT	24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK	0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT	28
+
+/* PMU rev 5 (& 6) */
+#define	PMU5_PLL_P1P2_OFF		0
+#define	PMU5_PLL_P1_MASK		0x0f000000
+#define	PMU5_PLL_P1_SHIFT		24
+#define	PMU5_PLL_P2_MASK		0x00f00000
+#define	PMU5_PLL_P2_SHIFT		20
+#define	PMU5_PLL_M14_OFF		1
+#define	PMU5_PLL_MDIV_MASK		0x000000ff
+#define	PMU5_PLL_MDIV_WIDTH		8
+#define	PMU5_PLL_NM5_OFF		2
+#define	PMU5_PLL_NDIV_MASK		0xfff00000
+#define	PMU5_PLL_NDIV_SHIFT		20
+#define	PMU5_PLL_NDIV_MODE_MASK		0x000e0000
+#define	PMU5_PLL_NDIV_MODE_SHIFT	17
+#define	PMU5_PLL_FMAB_OFF		3
+#define	PMU5_PLL_MRAT_MASK		0xf0000000
+#define	PMU5_PLL_MRAT_SHIFT		28
+#define	PMU5_PLL_ABRAT_MASK		0x08000000
+#define	PMU5_PLL_ABRAT_SHIFT		27
+#define	PMU5_PLL_FDIV_MASK		0x07ffffff
+#define	PMU5_PLL_PLLCTL_OFF		4
+#define	PMU5_PLL_PCHI_OFF		5
+#define	PMU5_PLL_PCHI_MASK		0x0000003f
+
+/* pmu XtalFreqRatio */
+#define	PMU_XTALFREQ_REG_ILPCTR_MASK	0x00001FFF
+#define	PMU_XTALFREQ_REG_MEASURE_MASK	0x80000000
+#define	PMU_XTALFREQ_REG_MEASURE_SHIFT	31
+
+/* Divider allocation in 4716/47162/5356/5357 */
+#define	PMU5_MAINPLL_CPU		1
+#define	PMU5_MAINPLL_MEM		2
+#define	PMU5_MAINPLL_SI			3
+
+/* 4706 PMU */
+#define PMU4706_MAINPLL_PLL0	0
+#define PMU6_4706_PROCPLL_OFF	4	/* The CPU PLL */
+#define PMU6_4706_PROC_P2DIV_MASK		0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT	16
+#define PMU6_4706_PROC_P1DIV_MASK		0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT	12
+#define PMU6_4706_PROC_NDIV_INT_MASK	0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT	3
+#define PMU6_4706_PROC_NDIV_MODE_MASK		0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT	0
+
+#define PMU7_PLL_PLLCTL7                7
+#define PMU7_PLL_CTL7_M4DIV_MASK	0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 	24
+#define PMU7_PLL_CTL7_M4DIV_BY_6	6
+#define PMU7_PLL_CTL7_M4DIV_BY_12	0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL8                8
+#define PMU7_PLL_CTL8_M5DIV_MASK	0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT	0
+#define PMU7_PLL_CTL8_M5DIV_BY_8	8
+#define PMU7_PLL_CTL8_M5DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24	0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK	0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT	8
+#define PMU7_PLL_CTL8_M6DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL11		11
+#define PMU7_PLL_PLLCTL11_MASK		0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL		0x22222200
+
+/* PMU rev 15 */
+#define PMU15_PLL_PLLCTL0		0
+#define PMU15_PLL_PC0_CLKSEL_MASK	0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT	0
+#define PMU15_PLL_PC0_FREQTGT_MASK	0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT	2
+#define PMU15_PLL_PC0_PRESCALE_MASK	0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT	22
+#define PMU15_PLL_PC0_KPCTRL_MASK	0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT	24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK	0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT	27
+#define PMU15_PLL_PC0_FDCMODE_MASK	0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT	30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK	0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT	31
+
+#define PMU15_PLL_PLLCTL1			1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK		0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT		5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK	0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT	6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK		0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT	7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK	0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT	17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK		0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT	26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK	0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT	28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK		0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT		30
+
+#define PMU15_PLL_PLLCTL2			2
+#define PMU15_PLL_PC2_CTEN_MASK			0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT		0
+
+#define PMU15_PLL_PLLCTL3			3
+#define PMU15_PLL_PC3_DITHER_EN_MASK		0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT		0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK		0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT		25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK	0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT	0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK	0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT	1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK	0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT	2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK	0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT	3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK	0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT	5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1	0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2	1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3	2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5	3
+
+#define PMU15_PLL_PLLCTL4			4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK		0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT		0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK		0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT		3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK		0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT		6
+#define PMU15_PLL_PC4_DBGMODE_MASK		0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT		9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK	0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT	12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK		0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT	13
+#define PMU15_PLL_PC4_DINPOL_MASK		0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT		20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK		0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT		21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK		0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT		22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK		0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT		23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK		0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT		24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK		0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT		25
+#define PMU15_PLL_PC4_TEST_EN_MASK		0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT		26
+
+#define PMU15_PLL_PLLCTL5			5
+#define PMU15_PLL_PC5_FREQTGT_MASK		0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT		0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK		0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT		20
+#define PMU15_PLL_PC5_PRESCALE_MASK		0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT		27
+
+#define PMU15_PLL_PLLCTL6		6
+#define PMU15_PLL_PC6_FREQTGT_MASK	0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT	0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK	0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT	20
+#define PMU15_PLL_PC6_PRESCALE_MASK	0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT	27
+
+#define PMU15_FREQTGT_480_DEFAULT	0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT	0x1A4F5
+#define PMU15_ARM_96MHZ			96000000	/* 96 Mhz */
+#define PMU15_ARM_98MHZ			98400000	/* 98.4 Mhz */
+#define PMU15_ARM_97MHZ			97000000	/* 97 Mhz */
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK		0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT		4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT		0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8		1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111		2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8	3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN		0
+#define PMU17_PLLCTL0_BBPLL_DRST		3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK		8
+
+/* PLL usage in 4716/47162 */
+#define	PMU4716_MAINPLL_PLL0		12
+
+/* PLL usage in 4335 */
+#define PMU4335_PLL0_PC2_P1DIV_MASK			0x000f0000
+#define PMU4335_PLL0_PC2_P1DIV_SHIFT		16
+#define PMU4335_PLL0_PC2_NDIV_INT_MASK		0xff800000
+#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT		23
+#define PMU4335_PLL0_PC1_MDIV2_MASK			0x0000ff00
+#define PMU4335_PLL0_PC1_MDIV2_SHIFT		8
+
+
+/* PLL usage in 5356/5357 */
+#define	PMU5356_MAINPLL_PLL0		0
+#define	PMU5357_MAINPLL_PLL0		0
+
+/* 4716/47162 resources */
+#define RES4716_PROC_PLL_ON		0x00000040
+#define RES4716_PROC_HT_AVAIL		0x00000080
+
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE	0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE	0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE	0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES5354_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES5354_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES5354_ILP_REQUEST		4	/* 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES5354_ROM_SWITCH		7	/* 0x00080 */
+#define RES5354_PA_REF_LDO		8	/* 0x00100 */
+#define RES5354_RADIO_LDO		9	/* 0x00200 */
+#define RES5354_AFE_LDO			10	/* 0x00400 */
+#define RES5354_PLL_LDO			11	/* 0x00800 */
+#define RES5354_BG_FILTBYP		12	/* 0x01000 */
+#define RES5354_TX_FILTBYP		13	/* 0x02000 */
+#define RES5354_RX_FILTBYP		14	/* 0x04000 */
+#define RES5354_XTAL_PU			15	/* 0x08000 */
+#define RES5354_XTAL_EN			16	/* 0x10000 */
+#define RES5354_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES5354_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES5354_BB_PLL_PU		19	/* 0x80000 */
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA                 (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3		(1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH		(1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0             (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1             (1<<8)  /* core1 extPA in ChipControl 1, bit 8 */
+
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0             (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1             (1<<9)  /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES4328_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES4328_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES4328_ILP_REQUEST		4	/* 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES4328_ROM_SWITCH		7	/* 0x00080 */
+#define RES4328_PA_REF_LDO		8	/* 0x00100 */
+#define RES4328_RADIO_LDO		9	/* 0x00200 */
+#define RES4328_AFE_LDO			10	/* 0x00400 */
+#define RES4328_PLL_LDO			11	/* 0x00800 */
+#define RES4328_BG_FILTBYP		12	/* 0x01000 */
+#define RES4328_TX_FILTBYP		13	/* 0x02000 */
+#define RES4328_RX_FILTBYP		14	/* 0x04000 */
+#define RES4328_XTAL_PU			15	/* 0x08000 */
+#define RES4328_XTAL_EN			16	/* 0x10000 */
+#define RES4328_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES4328_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES4328_BB_PLL_PU		19	/* 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST	0	/* 0x00000001 */
+#define RES4325_CBUCK_BURST		1	/* 0x00000002 */
+#define RES4325_CBUCK_PWM		2	/* 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST	3	/* 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM		4	/* 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM		5	/* 0x00000020 */
+#define RES4325_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4325_ABUCK_BURST		7	/* 0x00000080 */
+#define RES4325_ABUCK_PWM		8	/* 0x00000100 */
+#define RES4325_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4325_OTP_PU			10	/* 0x00000400 */
+#define RES4325_LNLDO3_PU		11	/* 0x00000800 */
+#define RES4325_LNLDO4_PU		12	/* 0x00001000 */
+#define RES4325_XTAL_PU			13	/* 0x00002000 */
+#define RES4325_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4325_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4325_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4325_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4325_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4325B0_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4325B0_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4325B0_CLDO_PU		4	/* 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU		12	/* 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	/* 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	/* 0x00000001 */
+#define RES4329_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4329_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4329_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4329_CLDO_PU			4	/* 0x00000010 */
+#define RES4329_PALDO_PU		5	/* 0x00000020 */
+#define RES4329_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4329_RESERVED7		7	/* 0x00000080 */
+#define RES4329_RESERVED8		8	/* 0x00000100 */
+#define RES4329_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4329_OTP_PU			10	/* 0x00000400 */
+#define RES4329_RESERVED11		11	/* 0x00000800 */
+#define RES4329_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4329_XTAL_PU			13	/* 0x00002000 */
+#define RES4329_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4329_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4329_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4329_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4329_HT_AVAIL		21	/* 0x00200000 */
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+/* 4312 chip-specific ChipStatus register bits */
+#define CST4312_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4312_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD			3	/* OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST		0	/* 0x00000001 */
+#define RES4312_SWITCHER_PWM    	1	/* 0x00000002 */
+#define RES4312_PA_REF_LDO		2	/* 0x00000004 */
+#define RES4312_CORE_LDO_BURST		3	/* 0x00000008 */
+#define RES4312_CORE_LDO_PWM		4	/* 0x00000010 */
+#define RES4312_RADIO_LDO		5	/* 0x00000020 */
+#define RES4312_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4312_BG_FILTBYP		7	/* 0x00000080 */
+#define RES4312_TX_FILTBYP		8	/* 0x00000100 */
+#define RES4312_RX_FILTBYP		9	/* 0x00000200 */
+#define RES4312_XTAL_PU			10	/* 0x00000400 */
+#define RES4312_ALP_AVAIL		11	/* 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP		12	/* 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP		13	/* 0x00002000 */
+#define RES4312_HT_AVAIL		14	/* 0x00004000 */
+
+/* 4322 resources */
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+/* 4322 chip-specific ChipStatus register bits */
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	/* no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT		1	/* SPROM is present */
+#define CST4322_OTP_PRESENT		2	/* OTP is present */
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST4322_BOOT_FROM_FLASH		2	/* boot from FLASH */
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	/* ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	/* ATMEL flash */
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	/* resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ	1	/* resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	/* resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	/* resinitmode: HT available */
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE          0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE    0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE    0xF0    /* 12 mA drive strength for later 43224s */
+
+/* 43236 resources */
+#define RES43236_REGULATOR		0
+#define RES43236_ILP_REQUEST		1
+#define RES43236_XTAL_PU		2
+#define RES43236_ALP_AVAIL		3
+#define RES43236_SI_PLL_ON		4
+#define RES43236_HT_SI_AVAIL		5
+
+/* 43236 chip-specific ChipControl register bits */
+#define CCTRL43236_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43236_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43236_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO			(1<<4)	/* 0 disable */
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK		0x00000040
+#define CST43236_OTP_SEL_MASK		0x00000080
+#define CST43236_OTP_SEL_SHIFT		7
+#define CST43236_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43236_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43236_BOOT_MASK		0x00001800
+#define CST43236_BOOT_SHIFT		11
+#define CST43236_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID	3
+
+/* 43237 resources */
+#define RES43237_REGULATOR		0
+#define RES43237_ILP_REQUEST		1
+#define RES43237_XTAL_PU		2
+#define RES43237_ALP_AVAIL		3
+#define RES43237_SI_PLL_ON		4
+#define RES43237_HT_SI_AVAIL		5
+
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43237_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43237_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO			(1<<4)	/* 0 disable */
+
+/* 43237 Chip specific ChipStatus register bits */
+#define CST43237_SFLASH_MASK		0x00000040
+#define CST43237_OTP_SEL_MASK		0x00000080
+#define CST43237_OTP_SEL_SHIFT		7
+#define CST43237_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43237_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43237_BOOT_MASK		0x00001800
+#define CST43237_BOOT_SHIFT		11
+#define CST43237_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43237_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43237_BOOT_FROM_INVALID	3
+
+/* 43239 resources */
+#define RES43239_OTP_PU			9
+#define RES43239_MACPHY_CLKAVAIL	23
+#define RES43239_HT_AVAIL		24
+
+/* 43239 Chip specific ChipStatus register bits */
+#define CST43239_SPROM_MASK			0x00000002
+#define CST43239_SFLASH_MASK		0x00000004
+#define	CST43239_RES_INIT_MODE_SHIFT	7
+#define	CST43239_RES_INIT_MODE_MASK		0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs)	((cs) & (1 << 15))	/* SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs)	(~(cs) & (1 << 15))	/* USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs)	(((cs) & (1 << 0)) == 0)	/* SDIO */
+#define CST43239_CHIPMODE_GSPI(cs)	(((cs) & (1 << 0)) == (1 << 0))	/* gSPI */
+
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
+#define RES4324_LPLDO_PU			0
+#define RES4324_RESET_PULLDN_DIS		1
+#define RES4324_PMU_BG_PU			2
+#define RES4324_HSIC_LDO_PU			3
+#define RES4324_CBUCK_LPOM_PU			4
+#define RES4324_CBUCK_PFM_PU			5
+#define RES4324_CLDO_PU				6
+#define RES4324_LPLDO2_LVM			7
+#define RES4324_LNLDO1_PU			8
+#define RES4324_LNLDO2_PU			9
+#define RES4324_LDO3P3_PU			10
+#define RES4324_OTP_PU				11
+#define RES4324_XTAL_PU				12
+#define RES4324_BBPLL_PU			13
+#define RES4324_LQ_AVAIL			14
+#define RES4324_WL_CORE_READY			17
+#define RES4324_ILP_REQ				18
+#define RES4324_ALP_AVAIL			19
+#define RES4324_PALDO_PU			20
+#define RES4324_RADIO_PU			21
+#define RES4324_SR_CLK_STABLE			22
+#define RES4324_SR_SAVE_RESTORE			23
+#define RES4324_SR_PHY_PWRSW			24
+#define RES4324_SR_PHY_PIC			25
+#define RES4324_SR_SUBCORE_PWRSW		26
+#define RES4324_SR_SUBCORE_PIC			27
+#define RES4324_SR_MEM_PM0			28
+#define RES4324_HT_AVAIL			29
+#define RES4324_MACPHY_CLKAVAIL			30
+
+/* 4324 Chip specific ChipStatus register bits */
+#define CST4324_SPROM_MASK			0x00000080
+#define CST4324_SFLASH_MASK			0x00400000
+#define	CST4324_RES_INIT_MODE_SHIFT	10
+#define	CST4324_RES_INIT_MODE_MASK	0x00000c00
+#define CST4324_CHIPMODE_MASK		0x7
+#define CST4324_CHIPMODE_SDIOD(cs)	((~(cs)) & (1 << 2))	/* SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs)	(((cs) & CST4324_CHIPMODE_MASK) == 0x6)	/* USB || USBDA */
+
+/* 43242 Chip specific ChipStatus register bits */
+#define CST43242_SFLASH_MASK                    0x00000008
+#define CST43242_SR_HALT			(1<<25)
+#define CST43242_SR_CHIP_STATUS_2		27 /* bit 27 */
+
+/* 4331 resources */
+#define RES4331_REGULATOR		0
+#define RES4331_ILP_REQUEST		1
+#define RES4331_XTAL_PU			2
+#define RES4331_ALP_AVAIL		3
+#define RES4331_SI_PLL_ON		4
+#define RES4331_HT_SI_AVAIL		5
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL4331_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G		(1<<2)	/* 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15       (1<<3)  /* sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN		(1<<4)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS	(1<<5)	/* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS	(1<<6)	/* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5	(1<<7)	/* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN	(1<<8)	/* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN	(1<<9)	/* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN		(1<<10)	/* pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN	(1<<11)	/* pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2		(1<<12)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A		(1<<13)	/* 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4	(1<<16)	/* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5	(1<<17)	/* enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN		(1<<24)	/* 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define	CST4331_XTAL_FREQ		0x00000001	/* crystal frequency 20/40Mhz */
+#define	CST4331_SPROM_OTP_SEL_MASK	0x00000006
+#define	CST4331_SPROM_OTP_SEL_SHIFT	1
+#define	CST4331_SPROM_PRESENT		0x00000002
+#define	CST4331_OTP_PRESENT		0x00000004
+#define	CST4331_LDO_RF			0x00000008
+#define	CST4331_LDO_PAR			0x00000010
+
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4315_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4315_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4315_CLDO_PU			4	/* 0x00000010 */
+#define RES4315_PALDO_PU		5	/* 0x00000020 */
+#define RES4315_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4315_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4315_OTP_PU			10	/* 0x00000400 */
+#define RES4315_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4315_XTAL_PU			13	/* 0x00002000 */
+#define RES4315_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4315_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4315_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4315_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4315_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	/* gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL		0x00000001	/* use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL			0x00000002	/* use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN		0x00000003	/* use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE		0x00000004	/* gpio [8], sdio/usb mode */
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200	/* PALDO is configured with external PNP */
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4319_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4319_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4319_CLDO_PU			4	/* 0x00000010 */
+#define RES4319_PALDO_PU		5	/* 0x00000020 */
+#define RES4319_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4319_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4319_OTP_PU			10	/* 0x00000400 */
+#define RES4319_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4319_XTAL_PU			13	/* 0x00002000 */
+#define RES4319_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4319_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4319_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4319_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4319_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
+#define	CST4319_SPI_CPULESSUSB		0x00000001
+#define	CST4319_SPI_CLK_POL		0x00000002
+#define	CST4319_SPI_CLK_PH		0x00000008
+#define	CST4319_SPROM_OTP_SEL_MASK	0x000000c0	/* gpio [7:6], SDIO CIS selection */
+#define	CST4319_SPROM_OTP_SEL_SHIFT	6
+#define	CST4319_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define	CST4319_SPROM_SEL		0x00000040	/* use SPROM, OTP is powered up */
+#define	CST4319_OTP_SEL			0x00000080      /* use OTP, OTP is powered up */
+#define	CST4319_OTP_PWRDN		0x000000c0      /* use SPROM, OTP is powered down */
+#define	CST4319_SDIO_USB_MODE		0x00000100	/* gpio [8], sdio/usb mode */
+#define	CST4319_REMAP_SEL_MASK		0x00000600
+#define	CST4319_ILPDIV_EN		0x00000800
+#define	CST4319_XTAL_PD_POL		0x00001000
+#define	CST4319_LPO_SEL			0x00002000
+#define	CST4319_RES_INIT_MODE		0x0000c000
+#define	CST4319_PALDO_EXTPNP		0x00010000	/* PALDO is configured with external PNP */
+#define	CST4319_CBUCK_MODE_MASK		0x00060000
+#define CST4319_CBUCK_MODE_BURST	0x00020000
+#define CST4319_CBUCK_MODE_LPBURST	0x00060000
+#define	CST4319_RCAL_VALID		0x01000000
+#define	CST4319_RCAL_VALUE_MASK		0x3e000000
+#define	CST4319_RCAL_VALUE_SHIFT	25
+
+#define PMU1_PLL0_CHIPCTL0		0
+#define PMU1_PLL0_CHIPCTL1		1
+#define PMU1_PLL0_CHIPCTL2		2
+#define CCTL_4319USB_XTAL_SEL_MASK	0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT	19
+#define CCTL_4319USB_48MHZ_PLL_SEL	1
+#define CCTL_4319USB_24MHZ_PLL_SEL	2
+
+/* PMU resources for 4336 */
+#define	RES4336_CBUCK_LPOM		0
+#define	RES4336_CBUCK_BURST		1
+#define	RES4336_CBUCK_LP_PWM		2
+#define	RES4336_CBUCK_PWM		3
+#define	RES4336_CLDO_PU			4
+#define	RES4336_DIS_INT_RESET_PD	5
+#define	RES4336_ILP_REQUEST		6
+#define	RES4336_LNLDO_PU		7
+#define	RES4336_LDO3P3_PU		8
+#define	RES4336_OTP_PU			9
+#define	RES4336_XTAL_PU			10
+#define	RES4336_ALP_AVAIL		11
+#define	RES4336_RADIO_PU		12
+#define	RES4336_BG_PU			13
+#define	RES4336_VREG1p4_PU_PU		14
+#define	RES4336_AFE_PWRSW_PU		15
+#define	RES4336_RX_PWRSW_PU		16
+#define	RES4336_TX_PWRSW_PU		17
+#define	RES4336_BB_PWRSW_PU		18
+#define	RES4336_SYNTH_PWRSW_PU		19
+#define	RES4336_MISC_PWRSW_PU		20
+#define	RES4336_LOGEN_PWRSW_PU		21
+#define	RES4336_BBPLL_PWRSW_PU		22
+#define	RES4336_MACPHY_CLKAVAIL		23
+#define	RES4336_HT_AVAIL		24
+#define	RES4336_RSVD			25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define	CST4336_SPI_MODE_MASK		0x00000001
+#define	CST4336_SPROM_PRESENT		0x00000002
+#define	CST4336_OTP_PRESENT		0x00000004
+#define	CST4336_ARMREMAP_0		0x00000008
+#define	CST4336_ILPDIV_EN_MASK		0x00000010
+#define	CST4336_ILPDIV_EN_SHIFT		4
+#define	CST4336_XTAL_PD_POL_MASK	0x00000020
+#define	CST4336_XTAL_PD_POL_SHIFT	5
+#define	CST4336_LPO_SEL_MASK		0x00000040
+#define	CST4336_LPO_SEL_SHIFT		6
+#define	CST4336_RES_INIT_MODE_MASK	0x00000180
+#define	CST4336_RES_INIT_MODE_SHIFT	7
+#define	CST4336_CBUCK_MODE_MASK		0x00000600
+#define	CST4336_CBUCK_MODE_SHIFT	9
+
+/* 4336 Chip specific PMU ChipControl register bits */
+#define PCTL_4336_SERIAL_ENAB	(1  << 24)
+
+/* 4330 resources */
+#define	RES4330_CBUCK_LPOM		0
+#define	RES4330_CBUCK_BURST		1
+#define	RES4330_CBUCK_LP_PWM		2
+#define	RES4330_CBUCK_PWM		3
+#define	RES4330_CLDO_PU			4
+#define	RES4330_DIS_INT_RESET_PD	5
+#define	RES4330_ILP_REQUEST		6
+#define	RES4330_LNLDO_PU		7
+#define	RES4330_LDO3P3_PU		8
+#define	RES4330_OTP_PU			9
+#define	RES4330_XTAL_PU			10
+#define	RES4330_ALP_AVAIL		11
+#define	RES4330_RADIO_PU		12
+#define	RES4330_BG_PU			13
+#define	RES4330_VREG1p4_PU_PU		14
+#define	RES4330_AFE_PWRSW_PU		15
+#define	RES4330_RX_PWRSW_PU		16
+#define	RES4330_TX_PWRSW_PU		17
+#define	RES4330_BB_PWRSW_PU		18
+#define	RES4330_SYNTH_PWRSW_PU		19
+#define	RES4330_MISC_PWRSW_PU		20
+#define	RES4330_LOGEN_PWRSW_PU		21
+#define	RES4330_BBPLL_PWRSW_PU		22
+#define	RES4330_MACPHY_CLKAVAIL		23
+#define	RES4330_HT_AVAIL		24
+#define	RES4330_5gRX_PWRSW_PU		25
+#define	RES4330_5gTX_PWRSW_PU		26
+#define	RES4330_5g_LOGEN_PWRSW_PU	27
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs)	(((cs) & 0x7) < 6)	/* SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs)	(((cs) & 0x7) >= 6)	/* USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs)	(((cs) & 0x4) == 0)	/* SDIO */
+#define CST4330_CHIPMODE_GSPI(cs)	(((cs) & 0x6) == 4)	/* gSPI */
+#define CST4330_CHIPMODE_USB(cs)	(((cs) & 0x7) == 6)	/* USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs)	(((cs) & 0x7) == 7)	/* USB Direct Access */
+#define	CST4330_OTP_PRESENT		0x00000010
+#define	CST4330_LPO_AUTODET_EN		0x00000020
+#define	CST4330_ARMREMAP_0		0x00000040
+#define	CST4330_SPROM_PRESENT		0x00000080	/* takes priority over OTP if both set */
+#define	CST4330_ILPDIV_EN		0x00000100
+#define	CST4330_LPO_SEL			0x00000200
+#define	CST4330_RES_INIT_MODE_SHIFT	10
+#define	CST4330_RES_INIT_MODE_MASK	0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT	12
+#define CST4330_CBUCK_MODE_MASK		0x00003000
+#define	CST4330_CBUCK_POWER_OK		0x00004000
+#define	CST4330_BB_PLL_LOCKED		0x00008000
+#define SOCDEVRAM_BP_ADDR		0x1E000000
+#define SOCDEVRAM_ARM_ADDR		0x00800000
+
+/* 4330 Chip specific PMU ChipControl register bits */
+#define PCTL_4330_SERIAL_ENAB	(1  << 24)
+
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL		0x00000001    /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL		0x00000002    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE	0x00000004    /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE	0x00000008    /* 1=disable JTAG interface on mux'd pins */
+
+#define PMU_VREG0_ADDR				0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT	2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT	3
+
+#define PMU_VREG4_ADDR			4
+
+#define PMU_VREG4_CLDO_PWM_SHIFT	4
+#define PMU_VREG4_CLDO_PWM_MASK		0x7
+
+#define PMU_VREG4_LPLDO1_SHIFT		15
+#define PMU_VREG4_LPLDO1_MASK		0x7
+#define PMU_VREG4_LPLDO1_1p20V		0
+#define PMU_VREG4_LPLDO1_1p15V		1
+#define PMU_VREG4_LPLDO1_1p10V		2
+#define PMU_VREG4_LPLDO1_1p25V		3
+#define PMU_VREG4_LPLDO1_1p05V		4
+#define PMU_VREG4_LPLDO1_1p00V		5
+#define PMU_VREG4_LPLDO1_0p95V		6
+#define PMU_VREG4_LPLDO1_0p90V		7
+
+/* 4350/4345 VREG4 settings */
+#define PMU4350_VREG4_LPLDO1_1p10V	0
+#define PMU4350_VREG4_LPLDO1_1p15V	1
+#define PMU4350_VREG4_LPLDO1_1p21V	2
+#define PMU4350_VREG4_LPLDO1_1p24V	3
+#define PMU4350_VREG4_LPLDO1_0p90V	4
+#define PMU4350_VREG4_LPLDO1_0p96V	5
+#define PMU4350_VREG4_LPLDO1_1p01V	6
+#define PMU4350_VREG4_LPLDO1_1p04V	7
+
+#define PMU_VREG4_LPLDO2_LVM_SHIFT	18
+#define PMU_VREG4_LPLDO2_LVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_HVM_SHIFT	21
+#define PMU_VREG4_LPLDO2_HVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_LVM_HVM_MASK	0x3f
+#define PMU_VREG4_LPLDO2_1p00V		0
+#define PMU_VREG4_LPLDO2_1p15V		1
+#define PMU_VREG4_LPLDO2_1p20V		2
+#define PMU_VREG4_LPLDO2_1p10V		3
+#define PMU_VREG4_LPLDO2_0p90V		4	/* 4 - 7 is 0.90V */
+
+#define PMU_VREG4_HSICLDO_BYPASS_SHIFT	27
+#define PMU_VREG4_HSICLDO_BYPASS_MASK	0x1
+
+#define PMU_VREG5_ADDR			5
+#define PMU_VREG5_HSICAVDD_PD_SHIFT	6
+#define PMU_VREG5_HSICAVDD_PD_MASK	0x1
+#define PMU_VREG5_HSICDVDD_PD_SHIFT	11
+#define PMU_VREG5_HSICDVDD_PD_MASK	0x1
+
+/* 4334 resources */
+#define RES4334_LPLDO_PU		0
+#define RES4334_RESET_PULLDN_DIS	1
+#define RES4334_PMU_BG_PU		2
+#define RES4334_HSIC_LDO_PU		3
+#define RES4334_CBUCK_LPOM_PU		4
+#define RES4334_CBUCK_PFM_PU		5
+#define RES4334_CLDO_PU			6
+#define RES4334_LPLDO2_LVM		7
+#define RES4334_LNLDO_PU		8
+#define RES4334_LDO3P3_PU		9
+#define RES4334_OTP_PU			10
+#define RES4334_XTAL_PU			11
+#define RES4334_WL_PWRSW_PU		12
+#define RES4334_LQ_AVAIL		13
+#define RES4334_LOGIC_RET		14
+#define RES4334_MEM_SLEEP		15
+#define RES4334_MACPHY_RET		16
+#define RES4334_WL_CORE_READY		17
+#define RES4334_ILP_REQ			18
+#define RES4334_ALP_AVAIL		19
+#define RES4334_MISC_PWRSW_PU		20
+#define RES4334_SYNTH_PWRSW_PU		21
+#define RES4334_RX_PWRSW_PU		22
+#define RES4334_RADIO_PU		23
+#define RES4334_WL_PMU_PU		24
+#define RES4334_VCO_LDO_PU		25
+#define RES4334_AFE_LDO_PU		26
+#define RES4334_RX_LDO_PU		27
+#define RES4334_TX_LDO_PU		28
+#define RES4334_HT_AVAIL		29
+#define RES4334_MACPHY_CLK_AVAIL	30
+
+/* 4334 chip-specific ChipStatus register bits */
+#define CST4334_CHIPMODE_MASK		7
+#define CST4334_SDIO_MODE		0x00000000
+#define CST4334_SPI_MODE		0x00000004
+#define CST4334_HSIC_MODE		0x00000006
+#define CST4334_BLUSB_MODE		0x00000007
+#define CST4334_CHIPMODE_HSIC(cs)	(((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT		0x00000010
+#define CST4334_LPO_AUTODET_EN		0x00000020
+#define CST4334_ARMREMAP_0		0x00000040
+#define CST4334_SPROM_PRESENT		0x00000080
+#define CST4334_ILPDIV_EN_MASK		0x00000100
+#define CST4334_ILPDIV_EN_SHIFT		8
+#define CST4334_LPO_SEL_MASK		0x00000200
+#define CST4334_LPO_SEL_SHIFT		9
+#define CST4334_RES_INIT_MODE_MASK	0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT	10
+
+/* 4334 Chip specific PMU ChipControl register bits */
+#define PCTL_4334_GPIO3_ENAB    (1  << 3)
+
+/* 4334 Chip control */
+#define CCTRL4334_PMU_WAKEUP_GPIO1	(1  << 0)
+#define CCTRL4334_PMU_WAKEUP_HSIC	(1  << 1)
+#define CCTRL4334_PMU_WAKEUP_AOS	(1  << 2)
+#define CCTRL4334_HSIC_WAKE_MODE	(1  << 3)
+#define CCTRL4334_HSIC_INBAND_GPIO1	(1  << 4)
+#define CCTRL4334_HSIC_LDO_PU		(1  << 23)
+
+/* 4334 Chip control 3 */
+#define CCTRL4334_BLOCK_EXTRNL_WAKE		(1  << 4)
+#define CCTRL4334_SAVERESTORE_FIX		(1  << 5)
+
+/* 43341 Chip control 3 */
+#define CCTRL43341_BLOCK_EXTRNL_WAKE		(1  << 13)
+#define CCTRL43341_SAVERESTORE_FIX		(1  << 14)
+#define CCTRL43341_BT_ISO_SEL			(1  << 16)
+
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL		(1 << 1)    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE	(1 << 3)    /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5	(1 << 28)  	/* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL            (1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM	 (3 << 0)    /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN	 (1 << 2)    /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO	 (1 << 3)    /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS	 (1 << 4)   /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE	 (3 << 6)   /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK	 (1 << 8)   /* 1 = 1.2V CBUCK voltage ready */
+#define CST43143_PMU_OVRSPIKE	 (1 << 9)
+#define CST43143_PMU_OVRTEMP	 (0xF << 10)
+#define CST43143_SR_FLL_CAL_DONE (1 << 14)
+#define CST43143_USB_PLL_LOCKDET (1 << 15)
+#define CST43143_PMU_PLL_LOCKDET (1 << 16)
+#define CST43143_CHIPMODE_SDIOD(cs)	(((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire  */
+#define CCTRL_43143_SECI		(1<<0)
+#define CCTRL_43143_BT_LEGACY		(1<<1)
+#define CCTRL_43143_I2S_MODE		(1<<2)	/* 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER		(1<<3)	/* 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL		(1<<4)	/* 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO		(1<<5)	/* 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK	(7<<6)	/* 0: disabled */
+#define CCTRL_43143_RF_SWCTRL_0		(1<<6)
+#define CCTRL_43143_RF_SWCTRL_1		(2<<6)
+#define CCTRL_43143_RF_SWCTRL_2		(4<<6)
+#define CCTRL_43143_RF_XSWCTRL		(1<<9)	/* 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0		(1<<11)	/* 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1		(1<<12)	/* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES43143_XTAL_PU		1	/* 0x00002 */
+#define RES43143_ILP_REQUEST		2	/* 0x00004 */
+#define RES43143_ALP_AVAIL		3	/* 0x00008 */
+#define RES43143_WL_CORE_READY		4	/* 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU		5	/* 0x00020 */
+#define RES43143_HT_AVAIL		6	/* 0x00040 */
+#define RES43143_RADIO_PU		7	/* 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL	8	/* 0x00100 */
+#define RES43143_OTP_PU			9	/* 0x00200 */
+#define RES43143_LQ_AVAIL		10	/* 0x00400 */
+
+#define PMU43143_XTAL_CORE_SIZE_MASK	0x3F
+
+/* 4313 resources */
+#define	RES4313_BB_PU_RSRC		0
+#define	RES4313_ILP_REQ_RSRC		1
+#define	RES4313_XTAL_PU_RSRC		2
+#define	RES4313_ALP_AVAIL_RSRC		3
+#define	RES4313_RADIO_PU_RSRC		4
+#define	RES4313_BG_PU_RSRC		5
+#define	RES4313_VREG1P4_PU_RSRC		6
+#define	RES4313_AFE_PWRSW_RSRC		7
+#define	RES4313_RX_PWRSW_RSRC		8
+#define	RES4313_TX_PWRSW_RSRC		9
+#define	RES4313_BB_PWRSW_RSRC		10
+#define	RES4313_SYNTH_PWRSW_RSRC	11
+#define	RES4313_MISC_PWRSW_RSRC		12
+#define	RES4313_BB_PLL_PWRSW_RSRC	13
+#define	RES4313_HT_AVAIL_RSRC		14
+#define	RES4313_MACPHY_CLK_AVAIL_RSRC	15
+
+/* 4313 chip-specific ChipStatus register bits */
+#define	CST4313_SPROM_PRESENT			1
+#define	CST4313_OTP_PRESENT			2
+#define	CST4313_SPROM_OTP_SEL_MASK		0x00000002
+#define	CST4313_SPROM_OTP_SEL_SHIFT		0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE    0x00000007    /* 12 mA drive strengh for later 4313 */
+
+/* PMU respources for 4314 */
+#define RES4314_LPLDO_PU		0
+#define RES4314_PMU_SLEEP_DIS		1
+#define RES4314_PMU_BG_PU		2
+#define RES4314_CBUCK_LPOM_PU		3
+#define RES4314_CBUCK_PFM_PU		4
+#define RES4314_CLDO_PU			5
+#define RES4314_LPLDO2_LVM		6
+#define RES4314_WL_PMU_PU		7
+#define RES4314_LNLDO_PU		8
+#define RES4314_LDO3P3_PU		9
+#define RES4314_OTP_PU			10
+#define RES4314_XTAL_PU			11
+#define RES4314_WL_PWRSW_PU		12
+#define RES4314_LQ_AVAIL		13
+#define RES4314_LOGIC_RET		14
+#define RES4314_MEM_SLEEP		15
+#define RES4314_MACPHY_RET		16
+#define RES4314_WL_CORE_READY		17
+#define RES4314_ILP_REQ			18
+#define RES4314_ALP_AVAIL		19
+#define RES4314_MISC_PWRSW_PU		20
+#define RES4314_SYNTH_PWRSW_PU		21
+#define RES4314_RX_PWRSW_PU		22
+#define RES4314_RADIO_PU		23
+#define RES4314_VCO_LDO_PU		24
+#define RES4314_AFE_LDO_PU		25
+#define RES4314_RX_LDO_PU		26
+#define RES4314_TX_LDO_PU		27
+#define RES4314_HT_AVAIL		28
+#define RES4314_MACPHY_CLK_AVAIL	29
+
+/* 4314 chip-specific ChipStatus register bits */
+#define CST4314_OTP_ENABLED		0x00200000
+
+/* 43228 resources */
+#define RES43228_NOT_USED		0
+#define RES43228_ILP_REQUEST		1
+#define RES43228_XTAL_PU		2
+#define RES43228_ALP_AVAIL		3
+#define RES43228_PLL_EN			4
+#define RES43228_HT_PHY_AVAIL		5
+
+/* 43228 chipstatus  reg bits */
+#define CST43228_ILP_DIV_EN		0x1
+#define	CST43228_OTP_PRESENT		0x2
+#define	CST43228_SERDES_REFCLK_PADSEL	0x4
+#define	CST43228_SDIO_MODE		0x8
+#define	CST43228_SDIO_OTP_PRESENT	0x10
+#define	CST43228_SDIO_RESET		0x20
+
+/* 4706 chipstatus reg bits */
+#define	CST4706_PKG_OPTION		(1<<0) /* 0: full-featured package 1: low-cost package */
+#define	CST4706_SFLASH_PRESENT	(1<<1) /* 0: parallel, 1: serial flash is present */
+#define	CST4706_SFLASH_TYPE		(1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define	CST4706_MIPS_BENDIAN	(1<<3) /* 0: little,  1: big endian */
+#define	CST4706_PCIE1_DISABLE	(1<<5) /* PCIE1 enable strap pin */
+
+/* 4706 flashstrconfig reg bits */
+#define FLSTRCF4706_MASK		0x000000ff
+#define FLSTRCF4706_SF1			0x00000001	/* 2nd serial flash present */
+#define FLSTRCF4706_PF1			0x00000002	/* 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE	0x00000004	/* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1			0x00000008	/* 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK		0x000000f0	/* Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB		0x00000010	/* 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB		0x00000020	/* 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB		0x00000030	/* 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB		0x00000040	/* 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB		0x00000050	/* 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB		0x00000060	/* 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB		0x00000070	/* 256MB */
+
+/* 4360 Chip specific ChipControl register bits */
+#define CCTRL4360_I2C_MODE			(1 << 0)
+#define CCTRL4360_UART_MODE			(1 << 1)
+#define CCTRL4360_SECI_MODE			(1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE			(1 << 3)
+#define CCTRL4360_DISCRETE_FEMCTRL_MODE		(1 << 4)
+#define CCTRL4360_DIGITAL_PACTRL_MODE		(1 << 5)
+#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT	(1 << 6)
+#define CCTRL4360_EXTRA_GPIO_MODE		(1 << 7)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE		(1 << 8)
+#define CCTRL4360_BT_LGCY_MODE			(1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON		(1 << 21)
+#define CCTRL4360_SECI_ON_GPIO01		(1 << 24)
+
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN		(1 << 1)
+
+/* 4360 PMU resources and chip status bits */
+#define RES4360_REGULATOR          0
+#define RES4360_ILP_AVAIL          1
+#define RES4360_ILP_REQ            2
+#define RES4360_XTAL_LDO_PU        3
+#define RES4360_XTAL_PU            4
+#define RES4360_ALP_AVAIL          5
+#define RES4360_BBPLLPWRSW_PU      6
+#define RES4360_HT_AVAIL           7
+#define RES4360_OTP_PU             8
+
+#define CST4360_XTAL_40MZ                  0x00000001
+#define CST4360_SFLASH                     0x00000002
+#define CST4360_SPROM_PRESENT              0x00000004
+#define CST4360_SFLASH_TYPE                0x00000004
+#define CST4360_OTP_ENABLED                0x00000008
+#define CST4360_REMAP_ROM                  0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK        0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT       5
+#define CST4360_ILP_DIVEN                  0x00000080
+#define CST4360_MODE_USB                   0x00000100
+#define CST4360_SPROM_SIZE_MASK            0x00000600
+#define CST4360_SPROM_SIZE_SHIFT           9
+#define CST4360_BBPLL_LOCK                 0x00000800
+#define CST4360_AVBBPLL_LOCK               0x00001000
+#define CST4360_USBBBPLL_LOCK              0x00002000
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define CCTRL_4360_UART_SEL	0x2
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU		0
+#define RES43602_REGULATOR		1
+#define RES43602_PMU_SLEEP		2
+#define RES43602_RSVD_3			3
+#define RES43602_XTALLDO_PU		4
+#define RES43602_SERDES_PU		5
+#define RES43602_BBPLL_PWRSW_PU		6
+#define RES43602_SR_CLK_START		7
+#define RES43602_SR_PHY_PWRSW		8
+#define RES43602_SR_SUBCORE_PWRSW	9
+#define RES43602_XTAL_PU		10
+#define	RES43602_PERST_OVR		11
+#define RES43602_SR_CLK_STABLE		12
+#define RES43602_SR_SAVE_RESTORE	13
+#define RES43602_SR_SLEEP		14
+#define RES43602_LQ_START		15
+#define RES43602_LQ_AVAIL		16
+#define RES43602_WL_CORE_RDY		17
+#define RES43602_ILP_REQ		18
+#define RES43602_ALP_AVAIL		19
+#define RES43602_RADIO_PU		20
+#define RES43602_RFLDO_PU		21
+#define RES43602_HT_START		22
+#define RES43602_HT_AVAIL		23
+#define RES43602_MACPHY_CLKAVAIL	24
+#define RES43602_PARLDO_PU		25
+#define RES43602_RSVD_26		26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT             (1<<1)
+#define CST43602_SPROM_SIZE                (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK                (1<<11)
+#define CST43602_RF_LDO_OUT_OK             (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC1_GPIO12_OVRD           (1<<28) /* GPIO12 override */
+
+#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN  (1<<2)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3)
+#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5)  /* enable pmu_wakeup to request for ALP_AVAIL */
+#define PMU43602_CC2_PERST_L_EXTEND_EN     (1<<9)  /* extend perst_l until rsc PERST_OVR comes up */
+#define PMU43602_CC2_FORCE_EXT_LPO         (1<<19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL            (1<<30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR	(0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL		0x68000528
+#define PMU43602_PLL_CTL7_VAL		0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK	(1 << 29)
+
+/* 4349 related */
+#define RES4349_LPLDO_PU			0
+#define RES4349_BG_PU				1
+#define RES4349_PMU_SLEEP			2
+#define RES4349_PALDO3P3_PU			3
+#define RES4349_CBUCK_LPOM_PU		4
+#define RES4349_CBUCK_PFM_PU		5
+#define RES4349_COLD_START_WAIT		6
+#define RES4349_RSVD_7				7
+#define RES4349_LNLDO_PU			8
+#define RES4349_XTALLDO_PU			9
+#define RES4349_LDO3P3_PU			10
+#define RES4349_OTP_PU				11
+#define RES4349_XTAL_PU				12
+#define RES4349_SR_CLK_START		13
+#define RES4349_LQ_AVAIL			14
+#define RES4349_LQ_START			15
+#define RES4349_PERST_OVR			16
+#define RES4349_WL_CORE_RDY			17
+#define RES4349_ILP_REQ				18
+#define RES4349_ALP_AVAIL			19
+#define RES4349_MINI_PMU			20
+#define RES4349_RADIO_PU			21
+#define RES4349_SR_CLK_STABLE		22
+#define RES4349_SR_SAVE_RESTORE		23
+#define RES4349_SR_PHY_PWRSW		24
+#define RES4349_SR_VDDM_PWRSW		25
+#define RES4349_SR_SUBCORE_PWRSW	26
+#define RES4349_SR_SLEEP			27
+#define RES4349_HT_START			28
+#define RES4349_HT_AVAIL			29
+#define RES4349_MACPHY_CLKAVAIL		30
+
+#define CR4_4349_RAM_BASE			(0x180000)
+#define CC4_4349_SR_ASM_ADDR		(0x48)
+
+#define CST4349_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 6)) != 0)	/* SDIO */
+#define CST4349_CHIPMODE_PCIE(cs)	(((cs) & (1 << 7)) != 0)	/* PCIE */
+
+#define CST4349_SPROM_PRESENT		0x00000010
+
+
+/* 43430 PMU resources based on pmu_params.xls */
+#define RES43430_LPLDO_PU				0
+#define RES43430_BG_PU					1
+#define RES43430_PMU_SLEEP				2
+#define RES43430_RSVD_3					3
+#define RES43430_CBUCK_LPOM_PU			4
+#define RES43430_CBUCK_PFM_PU			5
+#define RES43430_COLD_START_WAIT		6
+#define RES43430_RSVD_7					7
+#define RES43430_LNLDO_PU				8
+#define RES43430_RSVD_9					9
+#define RES43430_LDO3P3_PU				10
+#define RES43430_OTP_PU					11
+#define RES43430_XTAL_PU				12
+#define RES43430_SR_CLK_START			13
+#define RES43430_LQ_AVAIL				14
+#define RES43430_LQ_START				15
+#define RES43430_RSVD_16				16
+#define RES43430_WL_CORE_RDY			17
+#define RES43430_ILP_REQ				18
+#define RES43430_ALP_AVAIL				19
+#define RES43430_MINI_PMU				20
+#define RES43430_RADIO_PU				21
+#define RES43430_SR_CLK_STABLE			22
+#define RES43430_SR_SAVE_RESTORE		23
+#define RES43430_SR_PHY_PWRSW			24
+#define RES43430_SR_VDDM_PWRSW			25
+#define RES43430_SR_SUBCORE_PWRSW		26
+#define RES43430_SR_SLEEP				27
+#define RES43430_HT_START				28
+#define RES43430_HT_AVAIL				29
+#define RES43430_MACPHY_CLK_AVAIL		30
+
+/* 43430 chip status bits */
+#define CST43430_SDIO_MODE				0x00000001
+#define CST43430_GSPI_MODE				0x00000002
+#define CST43430_RSRC_INIT_MODE_0		0x00000080
+#define CST43430_RSRC_INIT_MODE_1		0x00000100
+#define CST43430_SEL0_SDIO				0x00000200
+#define CST43430_SEL1_SDIO				0x00000400
+#define CST43430_SEL2_SDIO				0x00000800
+#define CST43430_BBPLL_LOCKED			0x00001000
+#define CST43430_DBG_INST_DETECT		0x00004000
+#define CST43430_CLB2WL_BT_READY		0x00020000
+#define CST43430_JTAG_MODE				0x00100000
+#define CST43430_HOST_IFACE				0x00400000
+#define CST43430_TRIM_EN				0x00800000
+#define CST43430_DIN_PACKAGE_OPTION		0x10000000
+
+/* defines to detect active host interface in use */
+#define CHIP_HOSTIF_PCIEMODE	0x1
+#define CHIP_HOSTIF_USBMODE	0x2
+#define CHIP_HOSTIF_SDIOMODE	0x4
+#define CHIP_HOSTIF_PCIE(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
+#define CHIP_HOSTIF_SDIO(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
+
+/* 4335 resources */
+#define RES4335_LPLDO_PO           0
+#define RES4335_PMU_BG_PU          1
+#define RES4335_PMU_SLEEP          2
+#define RES4335_RSVD_3             3
+#define RES4335_CBUCK_LPOM_PU		4
+#define RES4335_CBUCK_PFM_PU		5
+#define RES4335_RSVD_6             6
+#define RES4335_RSVD_7             7
+#define RES4335_LNLDO_PU           8
+#define RES4335_XTALLDO_PU         9
+#define RES4335_LDO3P3_PU			10
+#define RES4335_OTP_PU				11
+#define RES4335_XTAL_PU				12
+#define RES4335_SR_CLK_START       13
+#define RES4335_LQ_AVAIL			14
+#define RES4335_LQ_START           15
+#define RES4335_RSVD_16            16
+#define RES4335_WL_CORE_RDY        17
+#define RES4335_ILP_REQ				18
+#define RES4335_ALP_AVAIL			19
+#define RES4335_MINI_PMU           20
+#define RES4335_RADIO_PU			21
+#define RES4335_SR_CLK_STABLE		22
+#define RES4335_SR_SAVE_RESTORE		23
+#define RES4335_SR_PHY_PWRSW		24
+#define RES4335_SR_VDDM_PWRSW      25
+#define RES4335_SR_SUBCORE_PWRSW	26
+#define RES4335_SR_SLEEP           27
+#define RES4335_HT_START           28
+#define RES4335_HT_AVAIL			29
+#define RES4335_MACPHY_CLKAVAIL		30
+
+/* 4335 Chip specific ChipStatus register bits */
+#define CST4335_SPROM_MASK			0x00000020
+#define CST4335_SFLASH_MASK			0x00000040
+#define	CST4335_RES_INIT_MODE_SHIFT	7
+#define	CST4335_RES_INIT_MODE_MASK	0x00000180
+#define CST4335_CHIPMODE_MASK		0xF
+#define CST4335_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4335_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4335_CHIPMODE_USB20D(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC || USBDA */
+#define CST4335_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+
+/* 4335 Chip specific ChipControl1 register bits */
+#define CCTRL1_4335_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 4335 Chip specific ChipControl2 register bits */
+#define CCTRL2_4335_AOSBLOCK		(1 << 30)
+#define CCTRL2_4335_PMUWAKE		(1 << 31)
+#define PATCHTBL_SIZE			(0x800)
+#define CR4_4335_RAM_BASE                    (0x180000)
+#define CR4_4345_RAM_BASE                    (0x1b0000)
+#define CR4_4349_RAM_BASE                    (0x180000)
+#define CR4_4350_RAM_BASE                    (0x180000)
+#define CR4_4360_RAM_BASE                    (0x0)
+#define CR4_43602_RAM_BASE                   (0x180000)
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+/* 4335 resources--END */
+
+/* 4345 Chip specific ChipStatus register bits */
+#define CST4345_SPROM_MASK		0x00000020
+#define CST4345_SFLASH_MASK		0x00000040
+#define CST4345_RES_INIT_MODE_SHIFT	7
+#define CST4345_RES_INIT_MODE_MASK	0x00000180
+#define CST4345_CHIPMODE_MASK		0x4000F
+#define CST4345_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4345_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4345_CHIPMODE_HSIC(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC */
+#define CST4345_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+#define CST4345_CHIPMODE_USB20D(cs)	(((cs) & (1 << 18)) != 0)	/* USBDA */
+
+/* 4350 Chipcommon ChipStatus bits */
+#define CST4350_SDIO_MODE		0x00000001
+#define CST4350_HSIC20D_MODE		0x00000002
+#define CST4350_BP_ON_HSIC_CLK		0x00000004
+#define CST4350_PCIE_MODE		0x00000008
+#define CST4350_USB20D_MODE		0x00000010
+#define CST4350_USB30D_MODE		0x00000020
+#define CST4350_SPROM_PRESENT		0x00000040
+#define CST4350_RSRC_INIT_MODE_0	0x00000080
+#define CST4350_RSRC_INIT_MODE_1	0x00000100
+#define CST4350_SEL0_SDIO		0x00000200
+#define CST4350_SEL1_SDIO		0x00000400
+#define CST4350_SDIO_PAD_MODE		0x00000800
+#define CST4350_BBPLL_LOCKED		0x00001000
+#define CST4350_USBPLL_LOCKED		0x00002000
+#define CST4350_LINE_STATE		0x0000C000
+#define CST4350_SERDES_PIPE_PLLLOCK	0x00010000
+#define CST4350_BT_READY		0x00020000
+#define CST4350_SFLASH_PRESENT		0x00040000
+#define CST4350_CPULESS_ENABLE		0x00080000
+#define CST4350_STRAP_HOST_IFC_1	0x00100000
+#define CST4350_STRAP_HOST_IFC_2	0x00200000
+#define CST4350_STRAP_HOST_IFC_3	0x00400000
+#define CST4350_RAW_SPROM_PRESENT	0x00800000
+#define CST4350_APP_CLK_SWITCH_SEL_RDBACK	0x01000000
+#define CST4350_RAW_RSRC_INIT_MODE_0	0x02000000
+#define CST4350_SDIO_PAD_VDDIO		0x04000000
+#define CST4350_GSPI_MODE		0x08000000
+#define CST4350_PACKAGE_OPTION		0xF0000000
+#define CST4350_PACKAGE_SHIFT		28
+
+/* package option for 4350 */
+#define CST4350_PACKAGE_WLCSP		0x0
+#define CST4350_PACKAGE_PCIE		0x1
+#define CST4350_PACKAGE_WLBGA		0x2
+#define CST4350_PACKAGE_DBG		0x3
+#define CST4350_PACKAGE_USB		0x4
+#define CST4350_PACKAGE_USB_HSIC	0x4
+
+#define CST4350_PKG_MODE(cs)	((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT)
+
+#define CST4350_PKG_WLCSP(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP))
+#define CST4350_PKG_PCIE(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE))
+#define CST4350_PKG_WLBGA(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA))
+#define CST4350_PKG_USB(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB))
+#define CST4350_PKG_USB_HSIC(cs)	(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC))
+
+/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */
+#define CST4350_PKG_USB_40M(cs)		(cs & CST4350_RAW_SPROM_PRESENT)
+
+#define CST4350_CHIPMODE_SDIOD(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs)	((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* strap_host_ifc strap value */
+#define CST4350_HOST_IFC_MASK		0x00700000
+#define CST4350_HOST_IFC_SHIFT		20
+
+/* host_ifc raw mode */
+#define CST4350_IFC_MODE_SDIOD			0x0
+#define CST4350_IFC_MODE_HSIC20D		0x1
+#define CST4350_IFC_MODE_HSIC30D		0x2
+#define CST4350_IFC_MODE_PCIE			0x3
+#define CST4350_IFC_MODE_USB20D			0x4
+#define CST4350_IFC_MODE_USB30D			0x5
+#define CST4350_IFC_MODE_USB30D_WL		0x6
+#define CST4350_IFC_MODE_USB30D_BT		0x7
+
+#define CST4350_IFC_MODE(cs)	((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT)
+
+/* 4350 PMU resources */
+#define RES4350_LPLDO_PU	0
+#define RES4350_PMU_BG_PU	1
+#define RES4350_PMU_SLEEP	2
+#define RES4350_RSVD_3		3
+#define RES4350_CBUCK_LPOM_PU	4
+#define RES4350_CBUCK_PFM_PU	5
+#define RES4350_COLD_START_WAIT	6
+#define RES4350_RSVD_7		7
+#define RES4350_LNLDO_PU	8
+#define RES4350_XTALLDO_PU	9
+#define RES4350_LDO3P3_PU	10
+#define RES4350_OTP_PU		11
+#define RES4350_XTAL_PU		12
+#define RES4350_SR_CLK_START	13
+#define RES4350_LQ_AVAIL	14
+#define RES4350_LQ_START	15
+#define RES4350_PERST_OVR	16
+#define RES4350_WL_CORE_RDY	17
+#define RES4350_ILP_REQ		18
+#define RES4350_ALP_AVAIL	19
+#define RES4350_MINI_PMU	20
+#define RES4350_RADIO_PU	21
+#define RES4350_SR_CLK_STABLE	22
+#define RES4350_SR_SAVE_RESTORE	23
+#define RES4350_SR_PHY_PWRSW	24
+#define RES4350_SR_VDDM_PWRSW	25
+#define RES4350_SR_SUBCORE_PWRSW	26
+#define RES4350_SR_SLEEP	27
+#define RES4350_HT_START	28
+#define RES4350_HT_AVAIL	29
+#define RES4350_MACPHY_CLKAVAIL	30
+
+#define MUXENAB4350_UART_MASK		(0x0000000f)
+#define MUXENAB4350_UART_SHIFT		0
+#define MUXENAB4350_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4350_HOSTWAKE_SHIFT	4
+
+
+/* 4350 GCI function sel values */
+#define CC4350_FNSEL_HWDEF		(0)
+#define CC4350_FNSEL_SAMEASPIN		(1)
+#define CC4350_FNSEL_UART		(2)
+#define CC4350_FNSEL_SFLASH		(3)
+#define CC4350_FNSEL_SPROM		(4)
+#define CC4350_FNSEL_I2C		(5)
+#define CC4350_FNSEL_MISC0		(6)
+#define CC4350_FNSEL_GCI		(7)
+#define CC4350_FNSEL_MISC1		(8)
+#define CC4350_FNSEL_MISC2		(9)
+#define CC4350_FNSEL_PWDOG 		(10)
+#define CC4350_FNSEL_IND		(12)
+#define CC4350_FNSEL_PDN		(13)
+#define CC4350_FNSEL_PUP		(14)
+#define CC4350_FNSEL_TRISTATE		(15)
+#define CC4350C_FNSEL_UART		(3)
+
+
+/* 4350 GPIO */
+#define CC4350_PIN_GPIO_00		(0)
+#define CC4350_PIN_GPIO_01		(1)
+#define CC4350_PIN_GPIO_02		(2)
+#define CC4350_PIN_GPIO_03		(3)
+#define CC4350_PIN_GPIO_04		(4)
+#define CC4350_PIN_GPIO_05		(5)
+#define CC4350_PIN_GPIO_06		(6)
+#define CC4350_PIN_GPIO_07		(7)
+#define CC4350_PIN_GPIO_08		(8)
+#define CC4350_PIN_GPIO_09		(9)
+#define CC4350_PIN_GPIO_10		(10)
+#define CC4350_PIN_GPIO_11		(11)
+#define CC4350_PIN_GPIO_12		(12)
+#define CC4350_PIN_GPIO_13		(13)
+#define CC4350_PIN_GPIO_14		(14)
+#define CC4350_PIN_GPIO_15		(15)
+
+#define CC4350_RSVD_16_SHIFT		16
+
+#define CC2_4350_PHY_PWRSW_UPTIME_MASK		(0xf << 0)
+#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT		(0)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK	(0xf << 4)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT	(4)
+#define CC2_4350_VDDM_PWRSW_UPTIME_MASK		(0xf << 8)
+#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT	(8)
+#define CC2_4350_SBC_PWRSW_DNDELAY_MASK		(0x3 << 12)
+#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT	(12)
+#define CC2_4350_PHY_PWRSW_DNDELAY_MASK		(0x3 << 14)
+#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT	(14)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK	(0x3 << 16)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT	(16)
+#define CC2_4350_VDDM_PWRSW_EN_MASK		(1 << 20)
+#define CC2_4350_VDDM_PWRSW_EN_SHIFT		(20)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK		(1 << 21)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT	(21)
+#define CC2_4350_SDIO_AOS_WAKEUP_MASK		(1 << 24)
+#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT		(24)
+
+/* Applies to 4335/4350/4345 */
+#define CC3_SR_CLK_SR_MEM_MASK			(1 << 0)
+#define CC3_SR_CLK_SR_MEM_SHIFT			(0)
+#define CC3_SR_BIT1_TBD_MASK			(1 << 1)
+#define CC3_SR_BIT1_TBD_SHIFT			(1)
+#define CC3_SR_ENGINE_ENABLE_MASK		(1 << 2)
+#define CC3_SR_ENGINE_ENABLE_SHIFT		(2)
+#define CC3_SR_BIT3_TBD_MASK			(1 << 3)
+#define CC3_SR_BIT3_TBD_SHIFT			(3)
+#define CC3_SR_MINDIV_FAST_CLK_MASK		(0xF << 4)
+#define CC3_SR_MINDIV_FAST_CLK_SHIFT		(4)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK	(1 << 8)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT	(8)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK	(1 << 9)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT	(9)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK	(1 << 10)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT	(10)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK	(1 << 11)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT	(11)
+#define CC3_SR_NUM_CLK_HIGH_MASK		(0x7 << 12)
+#define CC3_SR_NUM_CLK_HIGH_SHIFT		(12)
+#define CC3_SR_BIT15_TBD_MASK			(1 << 15)
+#define CC3_SR_BIT15_TBD_SHIFT			(15)
+#define CC3_SR_PHY_FUNC_PIC_MASK		(1 << 16)
+#define CC3_SR_PHY_FUNC_PIC_SHIFT		(16)
+#define CC3_SR_BIT17_19_TBD_MASK		(0x7 << 17)
+#define CC3_SR_BIT17_19_TBD_SHIFT		(17)
+#define CC3_SR_CHIP_TRIGGER_1_MASK		(1 << 20)
+#define CC3_SR_CHIP_TRIGGER_1_SHIFT		(20)
+#define CC3_SR_CHIP_TRIGGER_2_MASK		(1 << 21)
+#define CC3_SR_CHIP_TRIGGER_2_SHIFT		(21)
+#define CC3_SR_CHIP_TRIGGER_3_MASK		(1 << 22)
+#define CC3_SR_CHIP_TRIGGER_3_SHIFT		(22)
+#define CC3_SR_CHIP_TRIGGER_4_MASK		(1 << 23)
+#define CC3_SR_CHIP_TRIGGER_4_SHIFT		(23)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK		(1 << 24)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT		(24)
+#define CC3_SR_BIT25_26_TBD_MASK		(0x3 << 25)
+#define CC3_SR_BIT25_26_TBD_SHIFT		(25)
+#define CC3_SR_ALLOW_SBC_STBY_MASK		(1 << 27)
+#define CC3_SR_ALLOW_SBC_STBY_SHIFT		(27)
+#define CC3_SR_GPIO_MUX_MASK			(0xF << 28)
+#define CC3_SR_GPIO_MUX_SHIFT			(28)
+
+/* Applies to 4335/4350/4345 */
+#define CC4_SR_INIT_ADDR_MASK		(0x3FF0000)
+#define 	CC4_4350_SR_ASM_ADDR	(0x30)
+#define CC4_4350_C0_SR_ASM_ADDR		(0x0)
+#define 	CC4_4335_SR_ASM_ADDR	(0x48)
+#define 	CC4_4345_SR_ASM_ADDR	(0x48)
+#define CC4_SR_INIT_ADDR_SHIFT		(16)
+
+#define CC4_4350_EN_SR_CLK_ALP_MASK	(1 << 30)
+#define CC4_4350_EN_SR_CLK_ALP_SHIFT	(30)
+#define CC4_4350_EN_SR_CLK_HT_MASK	(1 << 31)
+#define CC4_4350_EN_SR_CLK_HT_SHIFT	(31)
+
+#define VREG4_4350_MEMLPDO_PU_MASK	(1 << 31)
+#define VREG4_4350_MEMLPDO_PU_SHIFT	31
+
+#define VREG6_4350_SR_EXT_CLKDIR_MASK	(1 << 20)
+#define VREG6_4350_SR_EXT_CLKDIR_SHIFT	20
+#define VREG6_4350_SR_EXT_CLKDIV_MASK	(0x3 << 21)
+#define VREG6_4350_SR_EXT_CLKDIV_SHIFT	21
+#define VREG6_4350_SR_EXT_CLKEN_MASK	(1 << 23)
+#define VREG6_4350_SR_EXT_CLKEN_SHIFT	23
+
+#define CC5_4350_PMU_EN_ASSERT_MASK	(1 << 13)
+#define CC5_4350_PMU_EN_ASSERT_SHIFT	(13)
+
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK	(1 << 4)
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT	(4)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK	(1 << 6)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT	(6)
+#define CC6_4350_PMU_EN_EXT_PERST_MASK		(1 << 17)
+#define CC6_4350_PMU_EN_EXT_PERST_SHIFT		(17)
+#define CC6_4350_PMU_EN_WAKEUP_MASK		(1 << 18)
+#define CC6_4350_PMU_EN_WAKEUP_SHIFT		(18)
+
+#define CC7_4350_PMU_EN_ASSERT_L2_MASK	(1 << 26)
+#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT	(26)
+#define CC7_4350_PMU_EN_MDIO_MASK	(1 << 27)
+#define CC7_4350_PMU_EN_MDIO_SHIFT	(27)
+
+#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK		(1 << 13)
+#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF		(13)
+#define CC6_4345_PMU_EN_L2_DEASSERT_MASK		(1 << 14)
+#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF		(14)
+#define CC6_4345_PMU_EN_ASSERT_L2_MASK		(1 << 15)
+#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT		(15)
+#define CC6_4345_PMU_EN_MDIO_MASK		(1 << 24)
+#define CC6_4345_PMU_EN_MDIO_SHIFT		(24)
+
+/* GCI chipcontrol register indices */
+#define CC_GCI_CHIPCTRL_00	(0)
+#define CC_GCI_CHIPCTRL_01	(1)
+#define CC_GCI_CHIPCTRL_02	(2)
+#define CC_GCI_CHIPCTRL_03	(3)
+#define CC_GCI_CHIPCTRL_04	(4)
+#define CC_GCI_CHIPCTRL_05	(5)
+#define CC_GCI_CHIPCTRL_06	(6)
+#define CC_GCI_CHIPCTRL_07	(7)
+#define CC_GCI_CHIPCTRL_08	(8)
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
+
+#define CC_GCI_06_JTAG_SEL_SHIFT	4
+#define CC_GCI_06_JTAG_SEL_MASK		(1 << 4)
+
+#define CC_GCI_NUMCHIPCTRLREGS(cap1)	((cap1 & 0xF00) >> 8)
+
+/* 4345 PMU resources */
+#define RES4345_LPLDO_PU		0
+#define RES4345_PMU_BG_PU		1
+#define RES4345_PMU_SLEEP 		2
+#define RES4345_HSICLDO_PU		3
+#define RES4345_CBUCK_LPOM_PU		4
+#define RES4345_CBUCK_PFM_PU		5
+#define RES4345_COLD_START_WAIT		6
+#define RES4345_RSVD_7			7
+#define RES4345_LNLDO_PU		8
+#define RES4345_XTALLDO_PU		9
+#define RES4345_LDO3P3_PU		10
+#define RES4345_OTP_PU			11
+#define RES4345_XTAL_PU			12
+#define RES4345_SR_CLK_START		13
+#define RES4345_LQ_AVAIL		14
+#define RES4345_LQ_START		15
+#define RES4345_PERST_OVR		16
+#define RES4345_WL_CORE_RDY		17
+#define RES4345_ILP_REQ			18
+#define RES4345_ALP_AVAIL		19
+#define RES4345_MINI_PMU		20
+#define RES4345_RADIO_PU		21
+#define RES4345_SR_CLK_STABLE		22
+#define RES4345_SR_SAVE_RESTORE		23
+#define RES4345_SR_PHY_PWRSW		24
+#define RES4345_SR_VDDM_PWRSW		25
+#define RES4345_SR_SUBCORE_PWRSW	26
+#define RES4345_SR_SLEEP		27
+#define RES4345_HT_START		28
+#define RES4345_HT_AVAIL		29
+#define RES4345_MACPHY_CLK_AVAIL	30
+
+/* 4335 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4335_PIN_GPIO_00		(0)
+#define CC4335_PIN_GPIO_01		(1)
+#define CC4335_PIN_GPIO_02		(2)
+#define CC4335_PIN_GPIO_03		(3)
+#define CC4335_PIN_GPIO_04		(4)
+#define CC4335_PIN_GPIO_05		(5)
+#define CC4335_PIN_GPIO_06		(6)
+#define CC4335_PIN_GPIO_07		(7)
+#define CC4335_PIN_GPIO_08		(8)
+#define CC4335_PIN_GPIO_09		(9)
+#define CC4335_PIN_GPIO_10		(10)
+#define CC4335_PIN_GPIO_11		(11)
+#define CC4335_PIN_GPIO_12		(12)
+#define CC4335_PIN_GPIO_13		(13)
+#define CC4335_PIN_GPIO_14		(14)
+#define CC4335_PIN_GPIO_15		(15)
+#define CC4335_PIN_SDIO_CLK		(16)
+#define CC4335_PIN_SDIO_CMD		(17)
+#define CC4335_PIN_SDIO_DATA0	(18)
+#define CC4335_PIN_SDIO_DATA1	(19)
+#define CC4335_PIN_SDIO_DATA2	(20)
+#define CC4335_PIN_SDIO_DATA3	(21)
+#define CC4335_PIN_RF_SW_CTRL_6	(22)
+#define CC4335_PIN_RF_SW_CTRL_7	(23)
+#define CC4335_PIN_RF_SW_CTRL_8	(24)
+#define CC4335_PIN_RF_SW_CTRL_9	(25)
+/* Last GPIO Pad */
+#define CC4335_PIN_GPIO_LAST	(31)
+
+/* 4335 GCI function sel values
+*/
+#define CC4335_FNSEL_HWDEF		(0)
+#define CC4335_FNSEL_SAMEASPIN	(1)
+#define CC4335_FNSEL_GPIO0		(2)
+#define CC4335_FNSEL_GPIO1		(3)
+#define CC4335_FNSEL_GCI0		(4)
+#define CC4335_FNSEL_GCI1		(5)
+#define CC4335_FNSEL_UART		(6)
+#define CC4335_FNSEL_SFLASH		(7)
+#define CC4335_FNSEL_SPROM		(8)
+#define CC4335_FNSEL_MISC0		(9)
+#define CC4335_FNSEL_MISC1		(10)
+#define CC4335_FNSEL_MISC2		(11)
+#define CC4335_FNSEL_IND		(12)
+#define CC4335_FNSEL_PDN		(13)
+#define CC4335_FNSEL_PUP		(14)
+#define CC4335_FNSEL_TRI		(15)
+
+/* GCI Core Control Reg */
+#define	GCI_CORECTRL_SR_MASK	(1 << 0)	/* SECI block Reset */
+#define	GCI_CORECTRL_RSL_MASK	(1 << 1)	/* ResetSECILogic */
+#define	GCI_CORECTRL_ES_MASK	(1 << 2)	/* EnableSECI */
+#define	GCI_CORECTRL_FSL_MASK	(1 << 3)	/* Force SECI Out Low */
+#define	GCI_CORECTRL_SOM_MASK	(7 << 4)	/* SECI Op Mode */
+#define	GCI_CORECTRL_US_MASK	(1 << 7)	/* Update SECI */
+#define	GCI_CORECTRL_BOS_MASK	(1 << 8)	/* Break On Sleep */
+
+/* 4345 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4345_PIN_GPIO_00		(0)
+#define CC4345_PIN_GPIO_01		(1)
+#define CC4345_PIN_GPIO_02		(2)
+#define CC4345_PIN_GPIO_03		(3)
+#define CC4345_PIN_GPIO_04		(4)
+#define CC4345_PIN_GPIO_05		(5)
+#define CC4345_PIN_GPIO_06		(6)
+#define CC4345_PIN_GPIO_07		(7)
+#define CC4345_PIN_GPIO_08		(8)
+#define CC4345_PIN_GPIO_09		(9)
+#define CC4345_PIN_GPIO_10		(10)
+#define CC4345_PIN_GPIO_11		(11)
+#define CC4345_PIN_GPIO_12		(12)
+#define CC4345_PIN_GPIO_13		(13)
+#define CC4345_PIN_GPIO_14		(14)
+#define CC4345_PIN_GPIO_15		(15)
+#define CC4345_PIN_GPIO_16		(16)
+#define CC4345_PIN_SDIO_CLK		(17)
+#define CC4345_PIN_SDIO_CMD		(18)
+#define CC4345_PIN_SDIO_DATA0	(19)
+#define CC4345_PIN_SDIO_DATA1	(20)
+#define CC4345_PIN_SDIO_DATA2	(21)
+#define CC4345_PIN_SDIO_DATA3	(22)
+#define CC4345_PIN_RF_SW_CTRL_0	(23)
+#define CC4345_PIN_RF_SW_CTRL_1	(24)
+#define CC4345_PIN_RF_SW_CTRL_2	(25)
+#define CC4345_PIN_RF_SW_CTRL_3	(26)
+#define CC4345_PIN_RF_SW_CTRL_4	(27)
+#define CC4345_PIN_RF_SW_CTRL_5	(28)
+#define CC4345_PIN_RF_SW_CTRL_6	(29)
+#define CC4345_PIN_RF_SW_CTRL_7	(30)
+#define CC4345_PIN_RF_SW_CTRL_8	(31)
+#define CC4345_PIN_RF_SW_CTRL_9	(32)
+
+/* 4345 GCI function sel values
+*/
+#define CC4345_FNSEL_HWDEF		(0)
+#define CC4345_FNSEL_SAMEASPIN		(1)
+#define CC4345_FNSEL_GPIO0		(2)
+#define CC4345_FNSEL_GPIO1		(3)
+#define CC4345_FNSEL_GCI0		(4)
+#define CC4345_FNSEL_GCI1		(5)
+#define CC4345_FNSEL_UART		(6)
+#define CC4345_FNSEL_SFLASH		(7)
+#define CC4345_FNSEL_SPROM		(8)
+#define CC4345_FNSEL_MISC0		(9)
+#define CC4345_FNSEL_MISC1		(10)
+#define CC4345_FNSEL_MISC2		(11)
+#define CC4345_FNSEL_IND		(12)
+#define CC4345_FNSEL_PDN		(13)
+#define CC4345_FNSEL_PUP		(14)
+#define CC4345_FNSEL_TRI		(15)
+
+#define MUXENAB4345_UART_MASK		(0x0000000f)
+#define MUXENAB4345_UART_SHIFT		0
+#define MUXENAB4345_HOSTWAKE_MASK	(0x000000f0)
+#define MUXENAB4345_HOSTWAKE_SHIFT	4
+
+/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */
+#define CC4349_GRP_GCI_AVS_CTRL_MASK   (0xffe00000)
+#define CC4349_GRP_GCI_AVS_CTRL_SHIFT  (21)
+#define CC4349_GRP_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* 4345 GCI AVS function sel values */
+#define CC4345_GCI_AVS_CTRL_MASK   (0xfc)
+#define CC4345_GCI_AVS_CTRL_SHIFT  (2)
+#define CC4345_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0			(0)
+#define CC_GCI_GPIO_1			(1)
+#define CC_GCI_GPIO_2			(2)
+#define CC_GCI_GPIO_3			(3)
+#define CC_GCI_GPIO_4			(4)
+#define CC_GCI_GPIO_5			(5)
+#define CC_GCI_GPIO_6			(6)
+#define CC_GCI_GPIO_7			(7)
+#define CC_GCI_GPIO_8			(8)
+#define CC_GCI_GPIO_9			(9)
+#define CC_GCI_GPIO_10			(10)
+#define CC_GCI_GPIO_11			(11)
+#define CC_GCI_GPIO_12			(12)
+#define CC_GCI_GPIO_13			(13)
+#define CC_GCI_GPIO_14			(14)
+#define CC_GCI_GPIO_15			(15)
+
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID		0xFF
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL(val, pos)  ((((uint32)val) << pos) & GCIMASK(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos)	((val >> pos) & 0xF)
+
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos)  (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos)  ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos)	((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos)  ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos)	((val >> pos) & 0xF)
+
+
+/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
+#define GCI_INTSTATUS_RBI	(1 << 0)	/* Rx Break Interrupt */
+#define GCI_INTSTATUS_UB	(1 << 1)	/* UART Break Interrupt */
+#define GCI_INTSTATUS_SPE	(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE	(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI	(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+/* 4335 GCI IntMask Register bits. */
+#define GCI_INTMASK_RBI		(1 << 0)	/* Rx Break Interrupt */
+#define GCI_INTMASK_UB		(1 << 1)	/* UART Break Interrupt */
+#define GCI_INTMASK_SPE		(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE		(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI	(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+/* 4335 GCI WakeMask Register bits. */
+#define GCI_WAKEMASK_RBI	(1 << 0)	/* Rx Break Interrupt */
+#define GCI_WAKEMASK_UB		(1 << 1)	/* UART Break Interrupt */
+#define GCI_WAKEMASK_SPE	(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE	(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI		(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+#define	GCI_WAKE_ON_GCI_GPIO1	1
+#define	GCI_WAKE_ON_GCI_GPIO2	2
+#define	GCI_WAKE_ON_GCI_GPIO3	3
+#define	GCI_WAKE_ON_GCI_GPIO4	4
+#define	GCI_WAKE_ON_GCI_GPIO5	5
+#define	GCI_WAKE_ON_GCI_GPIO6	6
+#define	GCI_WAKE_ON_GCI_GPIO7	7
+#define	GCI_WAKE_ON_GCI_GPIO8	8
+#define	GCI_WAKE_ON_GCI_SECI_IN	9
+
+/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
+* for now only UART for bootloader.
+*/
+#define MUXENAB4335_UART_MASK		(0x0000000f)
+
+#define MUXENAB4335_UART_SHIFT		0
+#define MUXENAB4335_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4335_HOSTWAKE_SHIFT	4
+#define MUXENAB4335_GETIX(val, name) \
+	((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#define PMU_MAX_TRANSITION_DLY	15000
+
+/* PMU resource up transition time in ILP cycles */
+#define PMURES_UP_TRANSITION	2
+
+
+/* SECI configuration */
+#define SECI_MODE_UART			0x0
+#define SECI_MODE_SECI			0x1
+#define SECI_MODE_LEGACY_3WIRE_BT	0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN	0x3
+#define SECI_MODE_HALF_SECI		0x4
+
+#define SECI_RESET		(1 << 0)
+#define SECI_RESET_BAR_UART	(1 << 1)
+#define SECI_ENAB_SECI_ECI	(1 << 2)
+#define SECI_ENAB_SECIOUT_DIS	(1 << 3)
+#define SECI_MODE_MASK		0x7
+#define SECI_MODE_SHIFT		4 /* (bits 5, 6, 7) */
+#define SECI_UPD_SECI		(1 << 7)
+
+#define SECI_SLIP_ESC_CHAR	0xDB
+#define SECI_SIGNOFF_0		SECI_SLIP_ESC_CHAR
+#define SECI_SIGNOFF_1     0
+#define SECI_REFRESH_REQ	0xDA
+
+/* seci clk_ctl_st bits */
+#define CLKCTL_STS_SECI_CLK_REQ		(1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL	(1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE		(1 << 0)
+#define SECI_UART_MSR_RTS_STATE		(1 << 1)
+#define SECI_UART_SECI_IN_STATE		(1 << 2)
+#define SECI_UART_SECI_IN2_STATE	(1 << 3)
+
+/* GCI RX FIFO Control Register */
+#define	GCI_RXF_LVL_MASK	(0xFF << 0)
+#define	GCI_RXF_TIMEOUT_MASK	(0xFF << 8)
+
+/* GCI UART Registers' Bit definitions */
+/* Seci Fifo Level Register */
+#define	SECI_TXF_LVL_MASK	(0x3F << 8)
+#define	TXF_AE_LVL_DEFAULT	0x4
+#define	SECI_RXF_LVL_FC_MASK	(0x3F << 16)
+
+/* SeciUARTFCR Bit definitions */
+#define	SECI_UART_FCR_RFR		(1 << 0)
+#define	SECI_UART_FCR_TFR		(1 << 1)
+#define	SECI_UART_FCR_SR		(1 << 2)
+#define	SECI_UART_FCR_THP		(1 << 3)
+#define	SECI_UART_FCR_AB		(1 << 4)
+#define	SECI_UART_FCR_ATOE		(1 << 5)
+#define	SECI_UART_FCR_ARTSOE		(1 << 6)
+#define	SECI_UART_FCR_ABV		(1 << 7)
+#define	SECI_UART_FCR_ALM		(1 << 8)
+
+/* SECI UART LCR register bits */
+#define SECI_UART_LCR_STOP_BITS		(1 << 0) /* 0 - 1bit, 1 - 2bits */
+#define SECI_UART_LCR_PARITY_EN		(1 << 1)
+#define SECI_UART_LCR_PARITY		(1 << 2) /* 0 - odd, 1 - even */
+#define SECI_UART_LCR_RX_EN		(1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL		(1 << 4) /* 1 => SECI_OUT held low */
+#define SECI_UART_LCR_TXO_EN		(1 << 5)
+#define SECI_UART_LCR_RTSO_EN		(1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN	(1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK		(1 << 8)
+#define SECI_UART_LCR_TXCRC_INV		(1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF	(1 << 10)
+#define SECI_UART_LCR_TXCRC_EN		(1 << 11)
+#define	SECI_UART_LCR_RXSYNC_EN		(1 << 12)
+
+#define SECI_UART_MCR_TX_EN		(1 << 0)
+#define SECI_UART_MCR_PRTS		(1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN	(1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN	(1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN		(1 << 4)
+#define SECI_UART_MCR_AUTO_RTS		(1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS	(1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN	(1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT	(1 << 9)
+
+/* SeciUARTLSR Bit Mask */
+#define	SECI_UART_LSR_RXOVR_MASK	(1 << 0)
+#define	SECI_UART_LSR_RFF_MASK		(1 << 1)
+#define	SECI_UART_LSR_TFNE_MASK		(1 << 2)
+#define	SECI_UART_LSR_TI_MASK		(1 << 3)
+#define	SECI_UART_LSR_TPR_MASK		(1 << 4)
+#define	SECI_UART_LSR_TXHALT_MASK	(1 << 5)
+
+/* SeciUARTMSR Bit Mask */
+#define	SECI_UART_MSR_CTSS_MASK		(1 << 0)
+#define	SECI_UART_MSR_RTSS_MASK		(1 << 1)
+#define	SECI_UART_MSR_SIS_MASK		(1 << 2)
+#define	SECI_UART_MSR_SIS2_MASK		(1 << 3)
+
+/* SeciUARTData Bits */
+#define SECI_UART_DATA_RF_NOT_EMPTY_BIT	(1 << 12)
+#define SECI_UART_DATA_RF_FULL_BIT	(1 << 13)
+#define SECI_UART_DATA_RF_OVRFLOW_BIT	(1 << 14)
+#define	SECI_UART_DATA_FIFO_PTR_MASK	0xFF
+#define	SECI_UART_DATA_RF_RD_PTR_SHIFT	16
+#define	SECI_UART_DATA_RF_WR_PTR_SHIFT	24
+
+/* LTECX: ltecxmux */
+#define LTECX_EXTRACT_MUX(val, idx)	(getbit4(&(val), (idx)))
+
+/* LTECX: ltecxmux MODE */
+#define LTECX_MUX_MODE_IDX		0
+#define LTECX_MUX_MODE_WCI2		0x0
+#define LTECX_MUX_MODE_GPIO		0x1
+
+
+/* LTECX GPIO Information Index */
+#define LTECX_NVRAM_FSYNC_IDX	0
+#define LTECX_NVRAM_LTERX_IDX	1
+#define LTECX_NVRAM_LTETX_IDX	2
+#define LTECX_NVRAM_WLPRIO_IDX	3
+
+/* LTECX WCI2 Information Index */
+#define LTECX_NVRAM_WCI2IN_IDX	0
+#define LTECX_NVRAM_WCI2OUT_IDX	1
+
+/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */
+#define LTECX_EXTRACT_PADNUM(val, idx)	(getbit8(&(val), (idx)))
+#define LTECX_EXTRACT_FNSEL(val, idx)	(getbit4(&(val), (idx)))
+#define LTECX_EXTRACT_GCIGPIO(val, idx)	(getbit4(&(val), (idx)))
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+/* WLAN - number of antenna */
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF	0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK             0x1
+#define CC_SR_CTL0_ENABLE_SHIFT              0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT       1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT        2 /* Rising edge resource trigger 0 to sr_engine  */
+#define CC_SR_CTL0_MIN_DIV_SHIFT             6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT        16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT       19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT          20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT  25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define CC_SR_CTL1_SR_INIT_MASK             0x3FF
+#define CC_SR_CTL1_SR_INIT_SHIFT            0
+
+#define	ECI_INLO_PKTDUR_MASK	0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT	4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT		0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT		1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT		2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT		3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT		4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT	5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT	6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT	7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT			0
+#define GCI_GPIO_STS_POS_EDGE_BIT		1
+#define GCI_GPIO_STS_NEG_EDGE_BIT		2
+#define GCI_GPIO_STS_FAST_EDGE_BIT		3
+#define GCI_GPIO_STS_CLEAR			0xF
+
+#define GCI_GPIO_STS_VALUE	(1 << GCI_GPIO_STS_VALUE_BIT)
+
+#endif	/* _SBCHIPC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 0000000000000000000000000000000000000000..cc9223a1634b5b7101c0cc633e169fab00bf34b0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,264 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sbconfig.h 456346 2014-02-18 16:48:52Z $
+ */
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* enumeration in SB is based on the premise that cores are contiguos in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE		0x10000		/* Each bus gets 64Kbytes for cores */
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	/* Max cores per bus */
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define	SBCONFIGOFF		0xf00		/* core sbconfig regs are top 256bytes of regs */
+#define	SBCONFIGSIZE		256		/* sizeof (sbconfig_t) */
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		/* sonics >= 2.3 */
+#define	SBTMERRLOG		0x50		/* sonics >= 2.3 */
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		/* initiator port ocp slave flag */
+	uint32	PAD[3];
+	uint32	sbtpsflag;		/* target port ocp slave flag */
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		/* (sonics >= 2.3) */
+	uint32	PAD;
+	uint32	sbtmerrlog;		/* (sonics >= 2.3) */
+	uint32	PAD[3];
+	uint32	sbadmatch3;		/* address match3 */
+	uint32	PAD;
+	uint32	sbadmatch2;		/* address match2 */
+	uint32	PAD;
+	uint32	sbadmatch1;		/* address match1 */
+	uint32	PAD[7];
+	uint32	sbimstate;		/* initiator agent state */
+	uint32	sbintvec;		/* interrupt mask */
+	uint32	sbtmstatelow;		/* target state */
+	uint32	sbtmstatehigh;		/* target state */
+	uint32	sbbwa0;			/* bandwidth allocation table0 */
+	uint32	PAD;
+	uint32	sbimconfiglow;		/* initiator configuration */
+	uint32	sbimconfighigh;		/* initiator configuration */
+	uint32	sbadmatch0;		/* address match0 */
+	uint32	PAD;
+	uint32	sbtmconfiglow;		/* target configuration */
+	uint32	sbtmconfighigh;		/* target configuration */
+	uint32	sbbconfig;		/* broadcast configuration */
+	uint32	PAD;
+	uint32	sbbstate;		/* broadcast state */
+	uint32	PAD[3];
+	uint32	sbactcnfg;		/* activate configuration */
+	uint32	PAD[3];
+	uint32	sbflagst;		/* current sbflags */
+	uint32	PAD[3];
+	uint32	sbidlow;		/* identification */
+	uint32	sbidhigh;		/* identification */
+} sbconfig_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* sbipsflag */
+#define	SBIPS_INT1_MASK		0x3f		/* which sbflags get routed to mips interrupt 1 */
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		/* which sbflags get routed to mips interrupt 2 */
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	/* which sbflags get routed to mips interrupt 3 */
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	/* which sbflags get routed to mips interrupt 4 */
+#define	SBIPS_INT4_SHIFT	24
+
+/* sbtpsflag */
+#define	SBTPS_NUM0_MASK		0x3f		/* interrupt sbFlag # generated by this core */
+#define	SBTPS_F0EN0		0x40		/* interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define	SBTMEL_CM		0x00000007	/* command */
+#define	SBTMEL_CI		0x0000ff00	/* connection id */
+#define	SBTMEL_EC		0x0f000000	/* error code */
+#define	SBTMEL_ME		0x80000000	/* multiple error */
+
+/* sbimstate */
+#define	SBIM_PC			0xf		/* pipecount */
+#define	SBIM_AP_MASK		0x30		/* arbitration policy */
+#define	SBIM_AP_BOTH		0x00		/* use both timeslaces and token */
+#define	SBIM_AP_TS		0x10		/* use timesliaces only */
+#define	SBIM_AP_TK		0x20		/* use token only */
+#define	SBIM_AP_RSV		0x30		/* reserved */
+#define	SBIM_IBE		0x20000		/* inbanderror */
+#define	SBIM_TO			0x40000		/* timeout */
+#define	SBIM_BY			0x01800000	/* busy (sonics >= 2.3) */
+#define	SBIM_RJ			0x02000000	/* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define	SBTML_RESET		0x0001		/* reset */
+#define	SBTML_REJ_MASK		0x0006		/* reject field */
+#define	SBTML_REJ		0x0002		/* reject */
+#define	SBTML_TMPREJ		0x0004		/* temporary reject, for error recovery */
+
+#define	SBTML_SICF_SHIFT	16		/* Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define	SBTMH_SERR		0x0001		/* serror */
+#define	SBTMH_INT		0x0002		/* interrupt */
+#define	SBTMH_BUSY		0x0004		/* busy */
+#define	SBTMH_TO		0x0020		/* timeout (sonics >= 2.3) */
+
+#define	SBTMH_SISF_SHIFT	16		/* Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define	SBBWA_TAB0_MASK		0xffff		/* lookup table 0 */
+#define	SBBWA_TAB1_MASK		0xffff		/* lookup table 1 */
+#define	SBBWA_TAB1_SHIFT	16
+
+/* sbimconfiglow */
+#define	SBIMCL_STO_MASK		0x7		/* service timeout */
+#define	SBIMCL_RTO_MASK		0x70		/* request timeout */
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	/* connection id */
+#define	SBIMCL_CID_SHIFT	16
+
+/* sbimconfighigh */
+#define	SBIMCH_IEM_MASK		0xc		/* inband error mode */
+#define	SBIMCH_TEM_MASK		0x30		/* timeout error mode */
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		/* bus error mode */
+#define	SBIMCH_BEM_SHIFT	6
+
+/* sbadmatch0 */
+#define	SBAM_TYPE_MASK		0x3		/* address type */
+#define	SBAM_AD64		0x4		/* reserved */
+#define	SBAM_ADINT0_MASK	0xf8		/* type0 size */
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		/* type1 size */
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		/* type2 size */
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		/* enable */
+#define	SBAM_ADNEG		0x800		/* negative decode */
+#define	SBAM_BASE0_MASK		0xffffff00	/* type0 base address */
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	/* type1 base address for the core */
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	/* type2 base address for the core */
+#define	SBAM_BASE2_SHIFT	16
+
+/* sbtmconfiglow */
+#define	SBTMCL_CD_MASK		0xff		/* clock divide */
+#define	SBTMCL_CO_MASK		0xf800		/* clock offset */
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	/* interrupt flags */
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	/* interrupt mode */
+#define	SBTMCL_IM_SHIFT		24
+
+/* sbtmconfighigh */
+#define	SBTMCH_BM_MASK		0x3		/* busy mode */
+#define	SBTMCH_RM_MASK		0x3		/* retry mode */
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		/* stop mode */
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		/* sb error mode */
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		/* int mode */
+#define	SBTMCH_IM_SHIFT		10
+
+/* sbbconfig */
+#define	SBBC_LAT_MASK		0x3		/* sb latency */
+#define	SBBC_MAX0_MASK		0xf0000		/* maxccntr0 */
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	/* maxccntr1 */
+#define	SBBC_MAX1_SHIFT		20
+
+/* sbbstate */
+#define	SBBS_SRD		0x1		/* st reg disable */
+#define	SBBS_HRD		0x2		/* hold reg disable */
+
+/* sbidlow */
+#define	SBIDL_CS_MASK		0x3		/* config space */
+#define	SBIDL_AR_MASK		0x38		/* # address ranges supported */
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		/* sync */
+#define	SBIDL_INIT		0x80		/* initiator */
+#define	SBIDL_MINLAT_MASK	0xf00		/* minimum backplane latency */
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		/* maximum backplane latency */
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		/* this initiator is first */
+#define	SBIDL_CW_MASK		0xc0000		/* cycle counter width */
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	/* target ports */
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	/* initiator ports */
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	/* sonics backplane revision code */
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	/* version 2.2 or earlier */
+#define	SBIDL_RV_2_3		0x10000000	/* version 2.3 */
+
+/* sbidhigh */
+#define	SBIDH_RC_MASK		0x000f		/* revision code */
+#define	SBIDH_RCE_MASK		0x7000		/* revision code extension field */
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		/* core code */
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	/* vendor code */
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		/* update buffered registers value */
+
+/* vendor codes */
+#define	SB_VEND_BCM		0x4243		/* Broadcom's SB vendor code */
+
+#endif	/* _SBCONFIG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 0000000000000000000000000000000000000000..3427a75d966542c489c67ebf173a3dd2edb2bad7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,399 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sbhnddma.h 452424 2014-01-30 09:43:39Z $
+ */
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+/* DMA structure:
+ *  support two DMA engines: 32 bits address or 64 bit addressing
+ *  basic DMA register set is per channel(transmit or receive)
+ *  a pair of channels is defined for convenience
+ */
+
+
+/* 32 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	addr;			/* descriptor ring base address (4K aligned) */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	status;			/* current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		/* dma tx channel */
+	dma32regs_t	rcv;		/* dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct {	/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma32diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl;		/* misc control bits & bufcount */
+	uint32	addr;		/* data buffer address */
+} dma32dd_t;
+
+/*
+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
+ */
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
+
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
+#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
+#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
+#define	XC_FL		((uint32)1 << 4)	/* flush request */
+#define XC_MR_MASK	0x000001C0		/* Multiple outstanding reads */
+#define XC_MR_SHIFT	6
+#define	XC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	XC_AE_SHIFT	16
+#define XC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define XC_BL_SHIFT	18
+#define XC_PC_MASK	0x00E00000		/* Prefetch control */
+#define XC_PC_SHIFT	21
+#define XC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define XC_PT_SHIFT	24
+
+/* Multiple outstanding reads */
+#define DMA_MR_1	0
+#define DMA_MR_2	1
+#define DMA_MR_4	2
+#define DMA_MR_8	3
+#define DMA_MR_12	4
+#define DMA_MR_16	5
+#define DMA_MR_20	6
+#define DMA_MR_32	7
+
+/* DMA Burst Length in bytes */
+#define DMA_BL_16	0
+#define DMA_BL_32	1
+#define DMA_BL_64	2
+#define DMA_BL_128	3
+#define DMA_BL_256	4
+#define DMA_BL_512	5
+#define DMA_BL_1024	6
+
+/* Prefetch control */
+#define DMA_PC_0	0
+#define DMA_PC_4	1
+#define DMA_PC_8	2
+#define DMA_PC_16	3
+/* others: reserved */
+
+/* Prefetch threshold */
+#define DMA_PT_1	0
+#define DMA_PT_2	1
+#define DMA_PT_4	2
+#define DMA_PT_8	3
+
+/* transmit descriptor table pointer */
+#define	XP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* transmit channel status */
+#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	XS_XS_MASK	0xf000			/* transmit state */
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			/* disabled */
+#define	XS_XS_ACTIVE	0x1000			/* active */
+#define	XS_XS_IDLE	0x2000			/* idle wait */
+#define	XS_XS_STOPPED	0x3000			/* stopped */
+#define	XS_XS_SUSP	0x4000			/* suspend pending */
+#define	XS_XE_MASK	0xf0000			/* transmit errors */
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			/* no error */
+#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
+#define	XS_XE_DFU	0x20000			/* data fifo underrun */
+#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
+#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
+#define	XS_AD_MASK	0xfff00000		/* active descriptor */
+#define	XS_AD_SHIFT	20
+
+/* receive channel control */
+#define	RC_RE		((uint32)1 << 0)	/* receive enable */
+#define	RC_RO_MASK	0xfe			/* receive frame offset */
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
+#define	RC_SH		((uint32)1 << 9)	/* separate rx header descriptor enable */
+#define	RC_OC		((uint32)1 << 10)	/* overflow continue */
+#define	RC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	RC_AE_SHIFT	16
+#define RC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define RC_BL_SHIFT	18
+#define RC_PC_MASK	0x00E00000		/* Prefetch control */
+#define RC_PC_SHIFT	21
+#define RC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define RC_PT_SHIFT	24
+
+/* receive descriptor table pointer */
+#define	RP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* receive channel status */
+#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	RS_RS_MASK	0xf000			/* receive state */
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			/* disabled */
+#define	RS_RS_ACTIVE	0x1000			/* active */
+#define	RS_RS_IDLE	0x2000			/* idle wait */
+#define	RS_RS_STOPPED	0x3000			/* reserved */
+#define	RS_RE_MASK	0xf0000			/* receive errors */
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			/* no error */
+#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
+#define	RS_RE_DFO	0x20000			/* data fifo overflow */
+#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
+#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
+#define	RS_AD_MASK	0xfff00000		/* active descriptor */
+#define	RS_AD_SHIFT	20
+
+/* fifoaddr */
+#define	FA_OFF_MASK	0xffff			/* offset */
+#define	FA_SEL_MASK	0xf0000			/* select */
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			/* transmit dma data */
+#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
+#define	FA_SEL_RDD	0x40000			/* receive dma data */
+#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
+#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
+#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
+#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
+#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
+#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
+#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
+
+/* descriptor control flags */
+#define	CTRL_BC_MASK	0x00001fff		/* buffer byte count, real data len must <= 4KB */
+#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_PARITY	((uint32)3 << 18)	/* parity bit */
+#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
+#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
+#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
+#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	CTRL_CORE_MASK	0x0ff00000
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
+	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
+	uint32	status0;		/* current descriptor, xmt state */
+	uint32	status1;		/* active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		/* dma64 tx channel */
+	dma64regs_t	rx;		/* dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct {		/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma64diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl1;		/* misc control bits */
+	uint32	ctrl2;		/* buffer count and address extension */
+	uint32	addrlow;	/* memory address of the date buffer, bits 31:0 */
+	uint32	addrhigh;	/* memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64RINGALIGN_BITS	13
+#define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
+#define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
+
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+#define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
+#define	D64RINGBOUNDARY_LARGE	(1 << 16)
+
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
+#define D64_DEF_USBBURSTLEN     2
+#define D64_DEF_SDIOBURSTLEN    1
+
+
+#ifndef D64_USBBURSTLEN
+#define D64_USBBURSTLEN	DMA_BL_64
+#endif
+#ifndef D64_SDIOBURSTLEN
+#define D64_SDIOBURSTLEN	DMA_BL_32
+#endif
+
+/* transmit channel control */
+#define	D64_XC_XE		0x00000001	/* transmit enable */
+#define	D64_XC_SE		0x00000002	/* transmit suspend request */
+#define	D64_XC_LE		0x00000004	/* loopback enable */
+#define	D64_XC_FL		0x00000010	/* flush request */
+#define D64_XC_MR_MASK		0x000001C0	/* Multiple outstanding reads */
+#define D64_XC_MR_SHIFT		6
+#define	D64_XC_PD		0x00000800	/* parity check disable */
+#define	D64_XC_AE		0x00030000	/* address extension bits */
+#define	D64_XC_AE_SHIFT		16
+#define D64_XC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_XC_BL_SHIFT		18
+#define D64_XC_PC_MASK		0x00E00000		/* Prefetch control */
+#define D64_XC_PC_SHIFT		21
+#define D64_XC_PT_MASK		0x03000000		/* Prefetch threshold */
+#define D64_XC_PT_SHIFT		24
+
+/* transmit descriptor table pointer */
+#define	D64_XP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* transmit channel status */
+#define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/* current descriptor pointer */
+#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
+#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
+#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
+#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
+#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/* active descriptor */
+#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
+#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
+#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
+#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
+#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_XS1_XE_COREE	0x50000000	/* core error */
+
+/* receive channel control */
+#define	D64_RC_RE		0x00000001	/* receive enable */
+#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
+#define	D64_RC_SH		0x00000200	/* separate rx header descriptor enable */
+#define	D64_RC_SHIFT		9	/* separate rx header descriptor enable */
+#define	D64_RC_OC		0x00000400	/* overflow continue */
+#define	D64_RC_PD		0x00000800	/* parity check disable */
+#define D64_RC_GE		0x00004000	/* Glom enable */
+#define	D64_RC_AE		0x00030000	/* address extension bits */
+#define	D64_RC_AE_SHIFT		16
+#define D64_RC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_RC_BL_SHIFT		18
+#define D64_RC_PC_MASK		0x00E00000	/* Prefetch control */
+#define D64_RC_PC_SHIFT		21
+#define D64_RC_PT_MASK		0x03000000	/* Prefetch threshold */
+#define D64_RC_PT_SHIFT		24
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN		(1 << 0)	/* partity enable */
+#define DMA_CTRL_ROC		(1 << 1)	/* rx overflow continue */
+#define DMA_CTRL_RXMULTI	(1 << 2)	/* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED	(1 << 3)	/* Unframed Rx/Tx data */
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)	/* DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE	(1 << 6)	/* always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM	(1 << 7)	/* DMA Rx glome is enabled */
+
+/* receive descriptor table pointer */
+#define	D64_RP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* receive channel status */
+#define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/* current descriptor pointer */
+#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
+#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
+#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
+#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
+#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
+#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
+#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
+#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
+#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
+#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_RS1_RE_COREE	0x50000000	/* core error */
+
+/* fifoaddr */
+#define	D64_FA_OFF_MASK		0xffff		/* offset */
+#define	D64_FA_SEL_MASK		0xf0000		/* select */
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
+#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
+#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
+#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
+#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
+#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
+#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
+#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
+#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
+#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS	0x0ff00000	/* core specific flags */
+#define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/* buirst size control */
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
+
+/* descriptor control flags 2 */
+#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count. real data len must <= 16KB */
+#define	D64_CTRL2_AE		0x00030000	/* address extension bits */
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      /* parity bit */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+#define D64_RX_FRM_STS_LEN	0x0000ffff	/* frame length mask */
+#define D64_RX_FRM_STS_OVFL	0x00800000	/* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT	0x0f000000	/* no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE	0xf0000000	/* core-dependent data type */
+
+/* receive frame status */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} dma_rxh_t;
+
+#endif	/* _sbhnddma_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 0000000000000000000000000000000000000000..91aa2fb1590a21d7f85aab569e5037da7dc65c29
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,352 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sbpcmcia.h 446298 2014-01-03 11:30:17Z $
+ */
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
+
+/* PCMCIA Function Configuration Registers */
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+/* Standard PCMCIA FCR registers */
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+/* Memory base in the function fcr's */
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+/* PCMCIA base plus Srom access in fcr0: */
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	/* Corerev >= 2 && <= 5 */
+#define	SROM_INFO		(0x07be / 2)	/* Corerev >= 6 */
+
+/*  Values for srom_cs: */
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+/* Fields in srom_info: */
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+#if !defined(LINUX_POSTMOGRIFY_REMOVAL)
+/* CIS stuff */
+
+/* The CIS stops where the FCRs start */
+#define	CIS_SIZE		PCMCIA_FCR
+#define CIS_SIZE_12K    1154    /* Maximum h/w + s/w sub region size for 12k OTP */
+
+/* CIS tuple length field max */
+#define CIS_TUPLE_LEN_MAX	0xff
+
+/* Standard tuples we know about */
+
+#define CISTPL_NULL			0x00
+#define	CISTPL_VERS_1		0x15		/* CIS ver, manf, dev & ver strings */
+#define	CISTPL_MANFID		0x20		/* Manufacturer and device id */
+#define CISTPL_FUNCID		0x21		/* Function identification */
+#define	CISTPL_FUNCE		0x22		/* Function extensions */
+#define	CISTPL_CFTABLE		0x1b		/* Config table entry */
+#define	CISTPL_END		0xff		/* End of the CIS tuple chain */
+
+/* Function identifier provides context for the function extentions tuple */
+#define CISTPL_FID_SDIO		0x0c		/* Extensions defined by SDIO spec */
+
+/* Function extensions for LANs (assumed for extensions other than SDIO) */
+#define	LAN_TECH		1		/* Technology type */
+#define	LAN_SPEED		2		/* Raw bit rate */
+#define	LAN_MEDIA		3		/* Transmission media */
+#define	LAN_NID			4		/* Node identification (aka MAC addr) */
+#define	LAN_CONN		5		/* Connector standard */
+
+
+/* CFTable */
+#define CFTABLE_REGWIN_2K	0x08		/* 2k reg windows size */
+#define CFTABLE_REGWIN_4K	0x10		/* 4k reg windows size */
+#define CFTABLE_REGWIN_8K	0x20		/* 8k reg windows size */
+
+/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll
+ * take one for HNBU, and use "extensions" (a la FUNCE) within it.
+ */
+
+#define	CISTPL_BRCM_HNBU	0x80
+
+/* Subtypes of BRCM_HNBU: */
+
+#define HNBU_SROMREV		0x00	/* A byte with sromrev, 1 if not present */
+#define HNBU_CHIPID		0x01	/* Two 16bit values: PCI vendor & device id */
+#define HNBU_BOARDREV		0x02	/* One byte board revision */
+#define HNBU_PAPARMS		0x03	/* PA parameters: 8 (sromrev == 1)
+					 * or 9 (sromrev > 1) bytes
+					 */
+#define HNBU_OEM		0x04	/* Eight bytes OEM data (sromrev == 1) */
+#define HNBU_CC			0x05	/* Default country code (sromrev == 1) */
+#define	HNBU_AA			0x06	/* Antennas available */
+#define	HNBU_AG			0x07	/* Antenna gain */
+#define HNBU_BOARDFLAGS		0x08	/* board flags (2 or 4 bytes) */
+#define HNBU_LEDS		0x09	/* LED set */
+#define HNBU_CCODE		0x0a	/* Country code (2 bytes ascii + 1 byte cctl)
+					 * in rev 2
+					 */
+#define HNBU_CCKPO		0x0b	/* 2 byte cck power offsets in rev 3 */
+#define HNBU_OFDMPO		0x0c	/* 4 byte 11g ofdm power offsets in rev 3 */
+#define HNBU_GPIOTIMER		0x0d	/* 2 bytes with on/off values in rev 3 */
+#define HNBU_PAPARMS5G		0x0e	/* 5G PA params */
+#define HNBU_ANT5G		0x0f	/* 4328 5G antennas available/gain */
+#define HNBU_RDLID		0x10	/* 2 byte USB remote downloader (RDL) product Id */
+#define HNBU_RSSISMBXA2G	0x11	/* 4328 2G RSSI mid pt sel & board switch arch,
+					 * 2 bytes, rev 3.
+					 */
+#define HNBU_RSSISMBXA5G	0x12	/* 4328 5G RSSI mid pt sel & board switch arch,
+					 * 2 bytes, rev 3.
+					 */
+#define HNBU_XTALFREQ		0x13	/* 4 byte Crystal frequency in kilohertz */
+#define HNBU_TRI2G		0x14	/* 4328 2G TR isolation, 1 byte */
+#define HNBU_TRI5G		0x15	/* 4328 5G TR isolation, 3 bytes */
+#define HNBU_RXPO2G		0x16	/* 4328 2G RX power offset, 1 byte */
+#define HNBU_RXPO5G		0x17	/* 4328 5G RX power offset, 1 byte */
+#define HNBU_BOARDNUM		0x18	/* board serial number, independent of mac addr */
+#define HNBU_MACADDR		0x19	/* mac addr override for the standard CIS LAN_NID */
+#define HNBU_RDLSN		0x1a	/* 2 bytes; serial # advertised in USB descriptor */
+#define HNBU_BOARDTYPE		0x1b	/* 2 bytes; boardtype */
+#define HNBU_LEDDC		0x1c	/* 2 bytes; LED duty cycle */
+#define HNBU_HNBUCIS		0x1d	/* what follows is proprietary HNBU CIS format */
+#define HNBU_PAPARMS_SSLPNPHY	0x1e	/* SSLPNPHY PA params */
+#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */
+#define HNBU_RDLRNDIS		0x20	/* 1 byte; 1 = RDL advertises RNDIS config */
+#define HNBU_CHAINSWITCH	0x21	/* 2 byte; txchain, rxchain */
+#define HNBU_REGREV		0x22	/* 1 byte; */
+#define HNBU_FEM		0x23	/* 2 or 4 byte: 11n frontend specification */
+#define HNBU_PAPARMS_C0		0x24	/* 8 or 30 bytes: 11n pa paramater for chain 0 */
+#define HNBU_PAPARMS_C1		0x25	/* 8 or 30 bytes: 11n pa paramater for chain 1 */
+#define HNBU_PAPARMS_C2		0x26	/* 8 or 30 bytes: 11n pa paramater for chain 2 */
+#define HNBU_PAPARMS_C3		0x27	/* 8 or 30 bytes: 11n pa paramater for chain 3 */
+#define HNBU_PO_CCKOFDM		0x28	/* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */
+#define HNBU_PO_MCS2G		0x29	/* 8 bytes: mcs2g power offset */
+#define HNBU_PO_MCS5GM		0x2a	/* 8 bytes: mcs5g mid band power offset */
+#define HNBU_PO_MCS5GLH		0x2b	/* 16 bytes: mcs5g low-high band power offset */
+#define HNBU_PO_CDD		0x2c	/* 2 bytes: cdd2g/5g power offset */
+#define HNBU_PO_STBC		0x2d	/* 2 bytes: stbc2g/5g power offset */
+#define HNBU_PO_40M		0x2e	/* 2 bytes: 40Mhz channel 2g/5g power offset */
+#define HNBU_PO_40MDUP		0x2f	/* 2 bytes: 40Mhz channel dup 2g/5g power offset */
+
+#define HNBU_RDLRWU		0x30	/* 1 byte; 1 = RDL advertises Remote Wake-up */
+#define HNBU_WPS		0x31	/* 1 byte; GPIO pin for WPS button */
+#define HNBU_USBFS		0x32	/* 1 byte; 1 = USB advertises FS mode only */
+#define HNBU_BRMIN		0x33	/* 4 byte bootloader min resource mask */
+#define HNBU_BRMAX		0x34	/* 4 byte bootloader max resource mask */
+#define HNBU_PATCH		0x35	/* bootloader patch addr(2b) & data(4b) pair */
+#define HNBU_CCKFILTTYPE	0x36	/* CCK digital filter selection options */
+#define HNBU_OFDMPO5G		0x37	/* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */
+#define HNBU_ELNA2G             0x38
+#define HNBU_ELNA5G             0x39
+#define HNBU_TEMPTHRESH 0x3A /* 2 bytes
+					 * byte1 tempthresh
+					 * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits)
+					 */
+#define HNBU_UUID 0x3B /* 16 Bytes Hex */
+
+#define HNBU_USBEPNUM		0x40	/* USB endpoint numbers */
+
+/* POWER PER RATE for SROM V9 */
+#define HNBU_CCKBW202GPO       0x41    /* 2 bytes each
+					 * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps)
+					 * cckbw202gpo cckbw20ul2gpo
+					 */
+
+#define HNBU_LEGOFDMBW202GPO    0x42    /* 4 bytes each
+					 * OFDM power offsets for 20 MHz Legacy rates
+					 * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+					 * legofdmbw202gpo  legofdmbw20ul2gpo
+					 */
+
+#define HNBU_LEGOFDMBW205GPO   0x43    /* 4 bytes each
+					* 5G band: OFDM power offsets for 20 MHz Legacy rates
+					* (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+					* low subband : legofdmbw205glpo  legofdmbw20ul2glpo
+					* mid subband :legofdmbw205gmpo  legofdmbw20ul2gmpo
+					* high subband :legofdmbw205ghpo  legofdmbw20ul2ghpo
+					*/
+
+#define HNBU_MCS2GPO    0x44    /* 4 bytes each
+				     * mcs 0-7  power-offset. LSB nibble: m0, MSB nibble: m7
+				     * mcsbw202gpo  mcsbw20ul2gpo mcsbw402gpo
+				     */
+#define HNBU_MCS5GLPO    0x45    /* 4 bytes each
+				     * 5G low subband mcs 0-7 power-offset.
+				     * LSB nibble: m0, MSB nibble: m7
+				     * mcsbw205glpo  mcsbw20ul5glpo mcsbw405glpo
+				     */
+#define HNBU_MCS5GMPO    0x46    /* 4 bytes each
+				     * 5G mid subband mcs 0-7 power-offset.
+				     * LSB nibble: m0, MSB nibble: m7
+				     * mcsbw205gmpo  mcsbw20ul5gmpo mcsbw405gmpo
+				     */
+#define HNBU_MCS5GHPO    0x47    /* 4 bytes each
+				     * 5G high subband mcs 0-7 power-offset.
+				     * LSB nibble: m0, MSB nibble: m7
+				     * mcsbw205ghpo  mcsbw20ul5ghpo mcsbw405ghpo
+				     */
+#define HNBU_MCS32PO	0x48	/*  2 bytes total
+				 * mcs-32 power offset for each band/subband.
+				 * LSB nibble: 2G band, MSB nibble:
+				 * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo
+				 */
+#define HNBU_LEG40DUPPO	0x49 /*  2 bytes total
+				* Additional power offset for Legacy Dup40 transmissions.
+				 * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh.
+				 * LSB nibble: 2G band, MSB nibble: 5G band high subband.
+				 * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo
+				 */
+
+#define HNBU_PMUREGS	0x4a /* Variable length (5 bytes for each register)
+				* The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and
+				* ResourceDependency Table registers.
+				*/
+
+#define HNBU_PATCH2		0x4b	/* bootloader TCAM patch addr(4b) & data(4b) pair .
+				* This is required for socram rev 15 onwards.
+				*/
+
+#define HNBU_USBRDY		0x4c	/* Variable length (upto 5 bytes)
+				* This is to indicate the USB/HSIC host controller
+				* that the device is ready for enumeration.
+				*/
+
+#define HNBU_USBREGS	0x4d	/* Variable length
+				* The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2
+				* registers during the USB initialization.
+				*/
+
+#define HNBU_BLDR_TIMEOUT	0x4e	/* 2 bytes used for HSIC bootloader to reset chip
+				* on connect timeout.
+				* The Delay after USBConnect for timeout till dongle receives
+				* get_descriptor request.
+				*/
+#define HNBU_USBFLAGS		0x4f
+#define HNBU_PATCH_AUTOINC	0x50
+#define HNBU_MDIO_REGLIST	0x51
+#define HNBU_MDIOEX_REGLIST	0x52
+/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+#define HNBU_UMANFID		0x53
+#define HNBU_PUBKEY		0x54	/* 128 byte; publick key to validate downloaded FW */
+#define HNBU_WOWLGPIO       0x55   /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */
+#define HNBU_MUXENAB		0x56	/* 1 byte to enable mux options */
+#define HNBU_GCI_CCR		0x57	/* GCI Chip control register */
+
+#define HNBU_FEM_CFG		0x58	/* FEM config */
+#define HNBU_ACPA_C0		0x59	/* ACPHY PA parameters: chain 0 */
+#define HNBU_ACPA_C1		0x5a	/* ACPHY PA parameters: chain 1 */
+#define HNBU_ACPA_C2		0x5b	/* ACPHY PA parameters: chain 2 */
+#define HNBU_MEAS_PWR		0x5c
+#define HNBU_PDOFF		0x5d
+#define HNBU_ACPPR_2GPO		0x5e	/* ACPHY Power-per-rate 2gpo */
+#define HNBU_ACPPR_5GPO		0x5f	/* ACPHY Power-per-rate 5gpo */
+#define HNBU_ACPPR_SBPO		0x60	/* ACPHY Power-per-rate sbpo */
+#define HNBU_NOISELVL		0x61
+#define HNBU_RXGAIN_ERR		0x62
+#define HNBU_AGBGA		0x63
+#define HNBU_USBDESC_COMPOSITE	0x64    /* USB WLAN/BT composite descriptor */
+#define HNBU_PATCH_AUTOINC8	0x65	/* Auto increment patch entry for 8 byte patching */
+#define HNBU_PATCH8		0x66	/* Patch entry for 8 byte patching */
+#define HNBU_ACRXGAINS_C0	0x67	/* ACPHY rxgains: chain 0 */
+#define HNBU_ACRXGAINS_C1	0x68	/* ACPHY rxgains: chain 1 */
+#define HNBU_ACRXGAINS_C2	0x69	/* ACPHY rxgains: chain 2 */
+#define HNBU_TXDUTY		0x6a	/* Tx duty cycle for ACPHY 5g 40/80 Mhz */
+#define HNBU_USBUTMI_CTL        0x6b    /* 2 byte USB UTMI/LDO Control */
+#define HNBU_PDOFF_2G		0x6c
+#define HNBU_USBSSPHY_UTMI_CTL0 0x6d    /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL1 0x6e    /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL2 0x6f    /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_SLEEP0    0x70    /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP1    0x71    /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP2    0x72    /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP3    0x73    /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_MDIO		0x74	/* USB SSPHY INIT regs setting */
+#define HNBU_USB30PHY_NOSS		0x75	/* USB30 NO Super Speed */
+#define HNBU_USB30PHY_U1U2		0x76	/* USB30 PHY U1U2 Enable */
+#define HNBU_USB30PHY_REGS		0x77	/* USB30 PHY REGs update */
+
+#define HNBU_SROM3SWRGN		0x80	/* 78 bytes; srom rev 3 s/w region without crc8
+					 * plus extra info appended.
+					 */
+#define HNBU_RESERVED		0x81	/* Reserved for non-BRCM post-mfg additions */
+#define HNBU_CUSTOM1		0x82	/* 4 byte; For non-BRCM post-mfg additions */
+#define HNBU_CUSTOM2		0x83	/* Reserved; For non-BRCM post-mfg additions */
+#define HNBU_ACPAPARAM		0x84	/* ACPHY PAPARAM */
+#define HNBU_ACPA_CCK		0x86	/* ACPHY PA trimming parameters: CCK */
+#define HNBU_ACPA_40		0x87	/* ACPHY PA trimming parameters: 40 */
+#define HNBU_ACPA_80		0x88	/* ACPHY PA trimming parameters: 80 */
+#define HNBU_ACPA_4080		0x89	/* ACPHY PA trimming parameters: 40/80 */
+#define HNBU_SUBBAND5GVER	0x8a	/* subband5gver */
+#define HNBU_PAPARAMBWVER	0x8b	/* paparambwver */
+
+#define HNBU_MCS5Gx1PO		0x8c
+#define HNBU_ACPPR_SB8080_PO		0x8d
+
+
+#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */
+
+/* sbtmstatelow */
+#define SBTML_INT_ACK		0x40000		/* ack the sb interrupt */
+#define SBTML_INT_EN		0x20000		/* enable sb interrupt */
+
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS	0x40000		/* sb interrupt status */
+
+#endif	/* _SBPCMCIA_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 0000000000000000000000000000000000000000..1395c32ae28e00e1314de80c27105465578f9b45
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,168 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * $Copyright Open 2003 Broadcom Corporation$
+ *
+ * $Id: sbsdio.h 383835 2013-02-07 23:32:39Z $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL	0x1001D		/* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL			0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK		0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT	0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK		0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT		1
+#define SBSDIO_FUNC1_SLEEPCSR			0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK		0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT		0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN		1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK	0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT	1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK		0x7f
+#define SBSDIO_MESBUSYCTRL_ENAB		0x80		/* Enable busy capability for MES access */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10  /* Enable function 2 tx for each block */
+#define SBSDIO_DEVCTL_F2WM_ENAB		0x10		/* Enable F2 Watermark */
+#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 	0x20		/* Isolate sdio clk and cmd (non-data) */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+#define SBSDIO_CSR_MASK			0x1F
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 0000000000000000000000000000000000000000..dbbd2f6b784cc31ab05c6b46add3cf243f74c5b0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,281 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * $Copyright Open 2005 Broadcom Corporation$
+ *
+ * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 funcintmask;		/* SDIO Function Interrupt Mask, SDIO rev4 */
+	uint32 PAD[2];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8 pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8 PAD[3];
+	uint8 pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8 PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[7];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)	/* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)	/* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+#define SDPCM_HWEXT_LEN	8
+#else
+#define SDPCM_HWEXT_LEN	0
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 0000000000000000000000000000000000000000..97e1a32ed76dd5243acd2fa415cc0708f6fc771b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,182 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sbsocram.h 481602 2014-05-29 22:43:34Z $
+ */
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/* Memcsocram core registers */
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	/* rev 6 */
+	uint32	errlogaddr;	/* rev 6 */
+	/* used for patching rev 3 & 5 */
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;	/* corev 8 */
+	uint32	bankpda;
+	uint32	PAD[14];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		/* corerev >= 2 */
+	uint32	PAD[133];
+	uint32  sr_control;     /* corerev >= 15 */
+	uint32  sr_status;      /* corerev >= 15 */
+	uint32  sr_address;     /* corerev >= 15 */
+	uint32  sr_data;        /* corerev >= 15 */
+} sbsocramregs_t;
+
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+/* Coreinfo register */
+#define	SRCI_PT_MASK		0x00070000	/* corerev >= 6; port type[18:16] */
+#define	SRCI_PT_SHIFT		16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+/* corerev >= 3 */
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define SR_BSZ_BASE		14
+
+/* Standby control register */
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	/* rev >= 3 */
+#define	SRSC_SBYEN_SHIFT	24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	/* rev >= 3 */
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_SZMASK		0x7f
+#define SOCRAM_BANKIDX_ROM_MASK		0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define	SOCRAM_BANKINFO_REG		0x40
+#define	SOCRAM_BANKIDX_REG		0x10
+#define	SOCRAM_BANKINFO_STDBY_MASK	0x400
+#define	SOCRAM_BANKINFO_STDBY_TIMER	0x800
+
+/* bankinfo rev >= 10 */
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT		13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK		0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT		14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK		0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT		15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK		0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT		16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK		0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT		17
+#define SOCRAM_BANKINFO_PDASZ_MASK		0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT	24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK	0x01000000
+
+/* extracoreinfo register */
+#define SOCRAM_DEVRAMBANK_MASK		0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT		12
+
+/* bank info to calculate bank size */
+#define   SOCRAM_BANKINFO_SZBASE          8192
+#define SOCRAM_BANKSIZE_SHIFT         13      /* SOCRAM_BANKINFO_SZBASE */
+
+
+#endif	/* _SBSOCRAM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 0000000000000000000000000000000000000000..b8586b99cd88b20ea620e838ebb1d728375566fb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,604 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sdio.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+#ifdef BCMSDIO
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+#define SDIOD_CCCR_UHSI_SUPPORT		0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH	0x15
+#define SDIOD_CCCR_INTR_EXTN		0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP		0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
+#define SDIOD_CCCR_BRCM_CARDCTL			0xf1
+#define SDIOD_CCCR_BRCM_SEPINT			0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0	0x40	/* SDIO spec version 3.0 */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50	   0x08
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S	1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S	0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S	0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M	BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S	4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S	0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S	1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS			8
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_3		3
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_11		11
+#define SDIOH_CMD_14		14
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_19		19
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD5_S18R_M		BITFIELD_MASK(1)
+#define CMD5_S18R_S		24
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD14_RCA_M		BITFIELD_MASK(16)
+#define CMD14_RCA_S		16
+#define CMD14_SLEEP_M		BITFIELD_MASK(1)
+#define CMD14_SLEEP_S		15
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+
+#define RSP4_S18A_M			BITFIELD_MASK(1) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S			24
+
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT	0
+#define CMD_OPTION_TUNING	1
+
+#endif /* def BCMSDIO */
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 0000000000000000000000000000000000000000..fdb43f24d7ca6db323087ee16b2f99dbe42821cd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,427 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sdioh.h 345499 2012-07-18 06:59:05Z $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities3		0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_ErrStatus		0x054
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+#define	SD_GPIO_Reg			0x100
+#define	SD_GPIO_OE			0x104
+#define	SD_GPIO_Enable			0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2			0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart		0x060
+#define SD3_PresetValCount		8
+/* preset-indiv regs */
+#define SD3_PresetVal_init		0x060
+#define SD3_PresetVal_default	0x062
+#define SD3_PresetVal_HS		0x064
+#define SD3_PresetVal_SDR12		0x066
+#define SD3_PresetVal_SDR25		0x068
+#define SD3_PresetVal_SDR50		0x06a
+#define SD3_PresetVal_SDR104	0x06c
+#define SD3_PresetVal_DDR50		0x06e
+/* SDIO3.0 Revx specific Registers */
+#define SD3_Tuning_Info_Register 0x0EC
+#define SD3_WL_BT_reset_register 0x0F0
+
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX	0
+#define SD3_PRESETVAL_DESPEED_IX	1
+#define SD3_PRESETVAL_HISPEED_IX	2
+#define SD3_PRESETVAL_SDR12_IX		3
+#define SD3_PRESETVAL_SDR25_IX		4
+#define SD3_PRESETVAL_SDR50_IX		5
+#define SD3_PRESETVAL_SDR104_IX		6
+#define SD3_PRESETVAL_DDR50_IX		7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+	bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M 		BITFIELD_MASK(8)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+#define SDIO_OCR_READ_FAIL	(2)
+
+
+#define CAP_ASYNCINT_SUP_M	BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S	29
+
+#define CAP_SLOTTYPE_M		BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S		30
+
+#define CAP3_MSBits_OFFSET	(32)
+/* note: following are caps MSB32 bits.
+	So the bits start from 0, instead of 32. that is why
+	CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M		BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M	BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S	(33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M	BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S	(34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M		BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S	(36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S	(37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S	(38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M	BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S	(40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M	BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S	(45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M	BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S	(46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M		BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S		(48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M	BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S	14
+
+#define PRESET_CLK_DIV_M	BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S	0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S	15					/* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S	14					/* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S	7					/* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S	6					/* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M	BITFIELD_MASK(2)	/* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S	4					/* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S	3					/* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M	BITFIELD_MASK(3)	/* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S	0					/* bit# */
+
+#define HOST_CONTR_VER_2		(1)
+#define HOST_CONTR_VER_3		(2)
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_RETUNING_INT_M		BITFIELD_MASK(1)	/* Bit 12 */
+#define INTSTAT_RETUNING_INT_S		12
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+#define ERRINT_ADMA_M			BITFIELD_MASK(1)
+#define ERRINT_ADMA_S			9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+#define ERRINT_ADMA_BIT		0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 0000000000000000000000000000000000000000..2795647a61666151c18cb2cd314b94fe77813b1e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,40 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * $ Copyright Open Broadcom Corporation $
+ *
+ * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 0000000000000000000000000000000000000000..6dbd5b0927cd6d25a06752627eb4530d55f10483
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,571 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: siutils.h 481602 2014-05-29 22:43:34Z $
+ */
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+#ifdef SR_DEBUG
+#include "wlioctl.h"
+#endif /* SR_DEBUG */
+
+
+/*
+ * Data structure to export all chip specific common variables
+ *   public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
+struct si_pub {
+	uint	socitype;		/* SOCI_SB, SOCI_AI */
+
+	uint	bustype;		/* SI_BUS, PCI_BUS */
+	uint	buscoretype;		/* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+	uint	buscorerev;		/* buscore rev */
+	uint	buscoreidx;		/* buscore index */
+	int	ccrev;			/* chip common core rev */
+	uint32	cccaps;			/* chip common capabilities */
+	uint32  cccaps_ext;			/* chip common capabilities extension */
+	int	pmurev;			/* pmu core rev */
+	uint32	pmucaps;		/* pmu capabilities */
+	uint	boardtype;		/* board type */
+	uint    boardrev;               /* board rev */
+	uint	boardvendor;		/* board vendor */
+	uint	boardflags;		/* board flags */
+	uint	boardflags2;		/* board flags2 */
+	uint	chip;			/* chip number */
+	uint	chiprev;		/* chip revision */
+	uint	chippkg;		/* chip package option */
+	uint32	chipst;			/* chip status */
+	bool	issim;			/* chip is in simulation or emulation */
+	uint    socirev;		/* SOC interconnect rev */
+	bool	pci_pr32414;
+
+};
+
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
+typedef const struct si_pub si_t;
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach().  Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define	SI_OSH		NULL	/* Use for si_kattach when no osh is available */
+
+#define	BADIDX		(SI_MAXCORES + 1)
+
+/* clkctl xtal what flags */
+#define	XTAL			0x1	/* primary crystal oscillator (2050) */
+#define	PLL			0x2	/* main chip pll */
+
+/* clkctl clk mode */
+#define	CLK_FAST		0	/* force fast (pll) clock */
+#define	CLK_DYNAMIC		2	/* enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY	0	/* Driver */
+#define GPIO_APP_PRIORITY	1	/* Application */
+#define GPIO_HI_PRIORITY	2	/* Highest priority. Ignore GPIO reservation */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT		0	/* GPIO register event */
+#define GPIO_REGEVT_INTMSK	1	/* GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL	2	/* GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ	16	/* min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2	/* wireless interface is down */
+#define SI_PCIUP	3	/* wireless interface is up */
+
+#ifdef SR_DEBUG
+#define PMU_RES		31
+#endif /* SR_DEBUG */
+
+#define	ISSIM_ENAB(sih)	FALSE
+
+/* PMU clock/power control */
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+#define AOB_ENAB(sih)	((sih)->ccrev >= 35 ? \
+			((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0)
+
+/* chipcommon clock/power control (exclusive with PMU's) */
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+/* External BT Coex enable mask */
+#define CC_BTCOEX_EN_MASK  0x01
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+/* WL/BT control enable mask */
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+/* CR4 specific defines used by the host driver */
+#define SI_CR4_CAP			(0x04)
+#define SI_CR4_BANKIDX		(0x40)
+#define SI_CR4_BANKINFO		(0x44)
+#define SI_CR4_BANKPDA		(0x4C)
+
+#define	ARMCR4_TCBBNB_MASK	0xf0
+#define	ARMCR4_TCBBNB_SHIFT	4
+#define	ARMCR4_TCBANB_MASK	0xf
+#define	ARMCR4_TCBANB_SHIFT	0
+
+#define	SICF_CPUHALT		(0x0020)
+#define	ARMCR4_BSZ_MASK		0x3f
+#define	ARMCR4_BSZ_MULT		8192
+
+#include <osl_decl.h>
+/* === exported functions === */
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+extern void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit);
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_flag_alt(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
+extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
+extern void *si_wrapperregs(si_t *sih);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_numcoreunits(si_t *sih, uint coreid);
+extern uint si_numd11coreunits(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint si_chip_hostif(si_t *sih);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
+extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
+
+/* GPIO event handlers */
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+	gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool si_pci_pmecap(si_t *sih);
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+
+
+#ifdef BCMSDIO
+extern void si_sdio_init(si_t *sih);
+#endif
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val)  (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a)	do {} while (0)
+static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+static INLINE void * si_gci_init(si_t *sih) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+#define si_gci(sih) 0
+
+/* OTP status */
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+
+/* SPROM availability */
+extern bool si_is_sprom_available(si_t *sih);
+extern bool si_is_sprom_enabled(si_t *sih);
+extern void si_sprom_enable(si_t *sih, bool enable);
+
+/* OTP/SROM CIS stuff */
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT	0
+#define CIS_SROM	1
+#define CIS_OTP		2
+
+/* Fab-id information */
+#define	DEFAULT_FAB	0x0	/* Original/first fab used for this chip */
+#define	CSM_FAB7	0x1	/* CSM Fab7 chip */
+#define	TSMC_FAB12	0x2	/* TSMC Fab12/Fab14 chip */
+#define	SMIC_FAB4	0x3	/* SMIC Fab4 chip */
+
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+extern uint16 si_chipid(si_t *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int si_devpath(si_t *sih, char *path, int size);
+extern int si_devpath_pcie(si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val);
+extern void si_pcie_set_error_injection(si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(si_t *sih, uint32 substate);
+extern uint32 si_pcie_get_L1substate(si_t *sih);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+/* Enable BT-COEX & Ex-PA for 4313 */
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+/* BT/WL selection for 4313 bt combo >= P250 boards */
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+/* === debug routines === */
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+#if defined(BCMDBG_PHYDUMP)
+extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+#ifdef SR_DEBUG
+extern void si_dump_pmu(si_t *sih, void *pmu_var);
+extern void si_pmu_keep_on(si_t *sih, int32 int_val);
+extern uint32 si_pmu_keep_on_get(si_t *sih);
+extern uint32 si_power_island_set(si_t *sih, uint32 int_val);
+extern uint32 si_power_island_get(si_t *sih);
+#endif /* SR_DEBUG */
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_maxpayload_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+
+extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
+
+extern int si_set_sromctl(si_t *sih, uint32 value);
+extern uint32 si_get_sromctl(si_t *sih);
+
+extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
+extern void si_gci_reset(si_t *sih);
+#ifdef BCMLTECOEX
+extern void si_gci_seci_init(si_t *sih);
+extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+#endif /* BCMLTECOEX */
+extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin);
+extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel);
+extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
+extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint16 si_cc_get_reg16(uint32 reg_offs);
+extern uint32 si_cc_get_reg32(uint32 reg_offs);
+extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val);
+extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+extern void si_swdenable(si_t *sih, uint32 swdflag);
+
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
+extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val);
+extern void si_pcie_ltr_war(si_t *sih);
+extern void si_pcie_hw_LTR_war(si_t *sih);
+extern void si_pcie_hw_L1SS_war(si_t *sih);
+extern void si_pciedev_crwlpciegen2(si_t *sih);
+extern void si_pcie_prep_D3(si_t *sih, bool enter_D3);
+extern void si_pciedev_reg_pm_clk_period(si_t *sih);
+
+#ifdef WLRSDB
+extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits);
+extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+#endif
+
+
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 	5
+#define GCI_CLK_GATE_BIT 	18
+
+#define USBAPP_CLK_BIT		0
+#define PCIE_CLK_BIT		3
+#define ARMCR4_DBG_CLK_BIT	4
+#define SAMPLE_SYNC_CLK_BIT 	17
+#define PCIE_TL_CLK_BIT		18
+#define HQ_REQ_BIT		24
+#define PLL_DIV2_BIT_START	9
+#define PLL_DIV2_MASK		(0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP		(0x37 << PLL_DIV2_BIT_START)
+
+#define PMUREG(si, member) \
+	(AOB_ENAB(si) ? \
+		si_corereg_addr(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+			OFFSETOF(pmuregs_t, member)): \
+		si_corereg_addr(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member)))
+
+#define pmu_corereg(si, cc_idx, member, mask, val) \
+	(AOB_ENAB(si) ? \
+		si_pmu_corereg(si, si_findcoreidx(sih, PMU_CORE_ID, 0), \
+			       OFFSETOF(pmuregs_t, member), mask, val): \
+		si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
+
+/* GCI Macros */
+#define ALLONES_32				0xFFFFFFFF
+#define GCI_CCTL_SECIRST_OFFSET			0 /* SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET			1 /* ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET			2 /* EnableSeci  */
+#define GCI_CCTL_FSL_OFFSET			3 /* ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET			4 /* SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET			7 /* UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET		8 /* BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET		9 /* SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET			11 /* ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET			12 /* AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET			16 /* ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET			17 /* ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET			18 /* ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET		19 /* ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET			20 /* ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET			21 /* ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET			24 /* SeciClkStretch, 31:24 */
+
+#define GCI_MODE_UART				0x0
+#define GCI_MODE_SECI				0x1
+#define GCI_MODE_BTSIG				0x2
+#define GCI_MODE_GPIO				0x3
+#define GCI_MODE_MASK				0x7
+
+#define GCI_CCTL_LOWTOUT_DIS			0x0
+#define GCI_CCTL_LOWTOUT_10BIT			0x1
+#define GCI_CCTL_LOWTOUT_20BIT			0x2
+#define GCI_CCTL_LOWTOUT_30BIT			0x3
+#define GCI_CCTL_LOWTOUT_MASK			0x3
+
+#define GCI_CCTL_SCS_DEF			0x19
+#define GCI_CCTL_SCS_MASK			0xFF
+
+#define GCI_SECIIN_MODE_OFFSET			0
+#define GCI_SECIIN_GCIGPIO_OFFSET		4
+#define GCI_SECIIN_RXID2IP_OFFSET		8
+
+#define GCI_SECIOUT_MODE_OFFSET			0
+#define GCI_SECIOUT_GCIGPIO_OFFSET		4
+#define GCI_SECIOUT_SECIINRELATED_OFFSET	16
+
+#define GCI_SECIAUX_RXENABLE_OFFSET		0
+#define GCI_SECIFIFO_RXENABLE_OFFSET		16
+
+#define GCI_SECITX_ENABLE_OFFSET		0
+
+#define GCI_GPIOCTL_INEN_OFFSET			0
+#define GCI_GPIOCTL_OUTEN_OFFSET		1
+#define GCI_GPIOCTL_PDN_OFFSET			4
+
+#define GCI_GPIOIDX_OFFSET			16
+
+#define GCI_LTECX_SECI_ID			0 /* SECI port for LTECX */
+
+/* To access per GCI bit registers */
+#define GCI_REG_WIDTH				32
+
+/* GCI bit positions */
+/* GCI [127:000] = WLAN [127:0] */
+#define GCI_WLAN_IP_ID				0
+#define GCI_WLAN_BEGIN				0
+#define GCI_WLAN_PRIO_POS			(GCI_WLAN_BEGIN + 4)
+
+/* GCI [639:512] = LTE [127:0] */
+#define GCI_LTE_IP_ID				4
+#define GCI_LTE_BEGIN				512
+#define GCI_LTE_FRAMESYNC_POS			(GCI_LTE_BEGIN + 0)
+#define GCI_LTE_RX_POS				(GCI_LTE_BEGIN + 1)
+#define GCI_LTE_TX_POS				(GCI_LTE_BEGIN + 2)
+#define GCI_LTE_AUXRXDVALID_POS			(GCI_LTE_BEGIN + 56)
+
+/* Reg Index corresponding to ECI bit no x of ECI space */
+#define GCI_REGIDX(x)				((x)/GCI_REG_WIDTH)
+/* Bit offset of ECI bit no x in 32-bit words */
+#define GCI_BITOFFSET(x)			((x)%GCI_REG_WIDTH)
+
+/* End - GCI Macros */
+
+#ifdef REROUTE_OOBINT
+#define CC_OOB          0x0
+#define M2MDMA_OOB      0x1
+#define PMU_OOB         0x2
+#define D11_OOB         0x3
+#define SDIOD_OOB       0x4
+#define PMU_OOB_BIT     (0x10 | PMU_OOB)
+#endif /* REROUTE_OOBINT */
+
+
+#endif	/* _siutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 0000000000000000000000000000000000000000..249527cd3420a5e1b8afd543b5caa18eebe0c382
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,74 @@
+/*
+ * TRX image file header format.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: trxhdr.h 349211 2012-08-07 09:45:24Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_MAX_LEN	0x3B0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE	0x8	/* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE	0x10	/* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+#define TRX_BOOTLOADER		0x40	/* the image is a bootloader */
+
+#define TRX_V1		1
+#define TRX_V1_MAX_OFFSETS	3		/* V1: Max number of individual files */
+
+#ifndef BCMTRXV2
+#define TRX_VERSION	TRX_V1		/* Version 1 */
+#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
+#endif
+
+/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
+ * Ver 2 of trx header. To make it generic, trx_header is structure is modified
+ * as below where size of "offsets" field will vary as per the TRX version.
+ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well.
+ * To make sure, other applications like "dhdl" which are yet to be enhanced to support
+ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2
+ * is defined.
+ */
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+#ifndef BCMTRXV2
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+#else
+	uint32 offsets[1];	/* Offsets of partitions from start of header */
+#endif
+};
+
+#ifdef BCMTRXV2
+#define TRX_VERSION		TRX_V2		/* Version 2 */
+#define TRX_MAX_OFFSET  TRX_V2_MAX_OFFSETS
+
+#define TRX_V2		2
+/* V2: Max number of individual files
+ * To support SDR signature + Config data region
+ */
+#define TRX_V2_MAX_OFFSETS	5
+#define SIZEOF_TRXHDR_V1	(sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
+#define SIZEOF_TRXHDR_V2	(sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
+#define TRX_VER(trx)		(trx->flag_version>>16)
+#define ISTRX_V1(trx)		(TRX_VER(trx) == TRX_V1)
+#define ISTRX_V2(trx)		(TRX_VER(trx) == TRX_V2)
+/* For V2, return size of V2 size: others, return V1 size */
+#define SIZEOF_TRX(trx)	    (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1)
+#else
+#define SIZEOF_TRX(trx)	    (sizeof(struct trx_header))
+#endif /* BCMTRXV2 */
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 0000000000000000000000000000000000000000..08daa7e9103af698a2626b70ade0b08d21108214
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,321 @@
+/*
+ * $Copyright Open Broadcom Corporation$
+ * $Id: typedefs.h 484281 2014-06-12 22:42:26Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
+
+#include "site_typedefs.h"
+
+#else
+
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	/* ! __cplusplus */
+
+
+#endif	/* ! __cplusplus */
+
+#if defined(__LP64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs.  Otherwise, define
+ * them entirely in this file.  We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif /* TARGETENV_android */
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	/* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif	/* == 2.6.18 */
+#endif	/* __KERNEL__ */
+#endif  /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+
+/* Do not support the (u)int64 types with strict ansi for GNU C */
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif /* __ICL */
+
+#if !defined(__DJGPP__)
+
+/* pick up ushort & uint from standard types.h */
+#if defined(__KERNEL__)
+
+/* See note above */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>	/* sys/types.h and linux/types.h are oil and water */
+#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+#else
+
+#include <sys/types.h>
+
+#endif /* linux && __KERNEL__ */
+
+#endif 
+
+
+/* use the default typedefs in the next section of this file */
+#define USE_TYPEDEF_DEFAULTS
+
+#endif /* SITE_TYPEDEFS */
+
+
+/*
+ * Default Typedefs
+ */
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	/* @abstract@ */ unsigned char	bool;
+#endif /* endif TYPEDEF_BOOL */
+
+/* define uchar, ushort, uint, ulong */
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+/* define [u]int8/16/32/64, uintptr */
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+/* define float32/64, float_t */
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic.  Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else /* default to double precision floating point */
+typedef float64 float_t;
+#endif
+
+#endif /* TYPEDEF_FLOAT_T */
+
+/* define macro values */
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  /* TRUE */
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  /* ON = 1 */
+#endif
+
+#define	AUTO	(-1) /* Auto = -1 */
+
+/* define PTRSZ, INLINE */
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+/* Detect compiler type. */
+#if defined(__GNUC__) || defined(__lint)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif /* INLINE */
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif /* USE_TYPEDEF_DEFAULTS */
+
+/* Suppress unused parameter warning */
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
+#include <bcmdefs.h>
+#endif /* _TYPEDEFS_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 0000000000000000000000000000000000000000..3d54d8147846bef3daa4a9b6963b00cc04ce5ac1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,289 @@
+/*
+* $Copyright Open 2009 Broadcom Corporation$
+* $Id: wlfc_proto.h 455301 2014-02-13 12:42:13Z $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+	/* Use TLV to convey WLFC information.
+	 ---------------------------------------------------------------------------
+	| Type |  Len | value                    | Description
+	 ---------------------------------------------------------------------------
+	|  1   |   1  | (handle)                 | MAC OPEN
+	 ---------------------------------------------------------------------------
+	|  2   |   1  | (handle)                 | MAC CLOSE
+	 ---------------------------------------------------------------------------
+	|  3   |   2  | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+	 ---------------------------------------------------------------------------
+	|  4   |   4+ | see pkttag comments      | TXSTATUS
+	|      |      | TX status & timestamps   | Present only when pkt timestamp is enabled
+	 ---------------------------------------------------------------------------
+	|  5   |   4  | see pkttag comments      | PKKTTAG [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  6   |   8  | (handle, ifid, MAC)      | MAC ADD
+	 ---------------------------------------------------------------------------
+	|  7   |   8  | (handle, ifid, MAC)      | MAC DEL
+	 ---------------------------------------------------------------------------
+	|  8   |   1  | (rssi)                   | RSSI - RSSI value for the packet.
+	 ---------------------------------------------------------------------------
+	|  9   |   1  | (interface ID)           | Interface OPEN
+	 ---------------------------------------------------------------------------
+	|  10  |   1  | (interface ID)           | Interface CLOSE
+	 ---------------------------------------------------------------------------
+	|  11  |   8  | fifo credit returns map  | FIFO credits back to the host
+	|      |      |                          |
+	|      |      |                          | --------------------------------------
+	|      |      |                          | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+	|      |      |                          | --------------------------------------
+	|      |      |                          |
+	 ---------------------------------------------------------------------------
+	|  12  |   2  | MAC handle,              | Host provides a bitmap of pending
+	|      |      | AC[0-3] traffic bitmap   | unicast traffic for MAC-handle dstn.
+	|      |      |                          | [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  13  |   3  | (count, handle, prec_bmp)| One time request for packet to a specific
+	|      |      |                          | MAC destination.
+	 ---------------------------------------------------------------------------
+	|  15  |  12  | (pkttag, timestamps)     | Send TX timestamp at reception from host
+	 ---------------------------------------------------------------------------
+	|  16  |  12  | (pkttag, timestamps)     | Send WLAN RX timestamp along with RX frame
+	 ---------------------------------------------------------------------------
+	| 255  |  N/A |  N/A                     | FILLER - This is a special type
+	|      |      |                          | that has no length or value.
+	|      |      |                          | Typically used for padding.
+	 ---------------------------------------------------------------------------
+	*/
+
+#define WLFC_CTL_TYPE_MAC_OPEN			1
+#define WLFC_CTL_TYPE_MAC_CLOSE			2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT	3
+#define WLFC_CTL_TYPE_TXSTATUS			4
+#define WLFC_CTL_TYPE_PKTTAG			5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD		6
+#define WLFC_CTL_TYPE_MACDESC_DEL		7
+#define WLFC_CTL_TYPE_RSSI			8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN		9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE		10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK		11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP	12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET	13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS	14
+
+
+#define WLFC_CTL_TYPE_TX_ENTRY_STAMP		15
+#define WLFC_CTL_TYPE_RX_STAMP			16
+
+#define WLFC_CTL_TYPE_TRANS_ID			18
+#define WLFC_CTL_TYPE_COMP_TXSTATUS		19
+
+#define WLFC_CTL_TYPE_TID_OPEN			20
+#define WLFC_CTL_TYPE_TID_CLOSE			21
+
+
+#define WLFC_CTL_TYPE_FILLER			255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC		8	/* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC			1	/* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI			1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE		1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP	2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS		4
+#define WLFC_CTL_VALUE_LEN_PKTTAG		4
+
+#define WLFC_CTL_VALUE_LEN_SEQ			2
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK	6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT	3	/* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET	3	/* credit, MAC-handle, prec_bitmap */
+
+
+#define WLFC_PKTFLAG_PKTFROMHOST	0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED	0x02
+
+#define WL_TXSTATUS_STATUS_MASK			0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT		24
+
+#define WL_TXSTATUS_SET_STATUS(x, status)	((x)  = \
+	((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+	(((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x)	(((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+	WL_TXSTATUS_STATUS_MASK)
+
+#define WL_TXSTATUS_GENERATION_MASK		1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT		31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen)	((x) = \
+	((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+	(((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x)	(((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+	WL_TXSTATUS_GENERATION_MASK)
+
+#define WL_TXSTATUS_FLAGS_MASK			0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT			27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x)		(((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+	WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK			0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT			24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x)		(((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK			0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num)	((x) = \
+	((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x)		((x) & WL_TXSTATUS_PKTID_MASK)
+
+#define WL_TXSTATUS_HSLOT_MASK			0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT			8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot)	((x)  = \
+	((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+	(((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x)	(((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+	WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK		0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+	((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x)		((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+#define WL_SEQ_FROMFW_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT		13
+#define WL_SEQ_SET_FROMFW(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+	(((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+#define WL_SEQ_GET_FROMFW(x)	(((x) >> WL_SEQ_FROMFW_SHIFT) & \
+	WL_SEQ_FROMFW_MASK)
+
+#define WL_SEQ_FROMDRV_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT		12
+#define WL_SEQ_SET_FROMDRV(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+	(((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define WL_SEQ_GET_FROMDRV(x)	(((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+	WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK			0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT		0
+#define WL_SEQ_SET_NUM(x, val)	((x) = \
+	((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+	(((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+#define WL_SEQ_GET_NUM(x)	(((x) >> WL_SEQ_NUM_SHIFT) & \
+	WL_SEQ_NUM_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE	32
+#define WLFC_MAX_IFNUM				16
+#define WLFC_MAC_DESC_ID_INVALID	0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x)	(x) |= \
+	(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x)	(x) &= \
+	~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+
+#define WLFC_MAX_PENDING_DATALEN	120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD	0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS	1
+/* WL firmware suppressed a packet because MAC is
+	already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS	2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC	3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK	4
+
+#define WLFC_D11_STATUS_INTERPRET(txs)	\
+	(((txs)->status.suppr_ind !=  TX_STATUS_SUPR_NONE) ? \
+	WLFC_CTL_PKTFLAG_D11SUPPRESS : \
+	((txs)->status.was_acked ? \
+		WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK))
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+	{printf("WLFC: %s():%d:caller:%p\n", \
+	__FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+	banner, ea[0], 	ea[1], 	ea[2], 	ea[3], 	ea[4], 	ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS		256
+#define WLHOST_REORDERDATA_LEN		 10
+#define WLHOST_REORDERDATA_TOTLEN	(WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET		0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET		2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET			4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET		6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET		8
+
+#define WLHOST_REORDERDATA_DEL_FLOW		0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL		0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID		0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID		0x08
+#define WLHOST_REORDERDATA_NEW_HOLE		0x10
+
+/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
+#define WLFC_CTL_TRANS_ID_LEN			6
+#define WLFC_TYPE_TRANS_ID_LEN			6
+
+#define WLFC_MODE_HANGER	1 /* use hanger */
+#define WLFC_MODE_AFQ		2 /* use afq */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT		2	/* afq bit */
+#define WLFC_SET_AFQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+#define WLFC_GET_AFQ(x)	(((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT	3	/* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+#define WLFC_GET_REUSESEQ(x)	(((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+
+#define WLFC_MODE_REORDERSUPP_SHIFT	4	/* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+#define WLFC_GET_REORDERSUPP(x)	(((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 0000000000000000000000000000000000000000..5835c34358ad2fa4e6f7f954d3b2b673f8257f15
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,5864 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wlioctl.h 490639 2014-07-11 08:31:53Z $
+ */
+
+#ifndef _wlioctl_h_
+#define	_wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmip.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmip.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <proto/802.1d.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+#include <devctrl_if/wlioctl_defs.h>
+
+#if 0 && (NDISVER >= 0x0600)
+#include <proto/bcmipv6.h>
+#endif
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+
+
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ	16
+#endif
+
+/* Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+	cdc_ioctl_t	msg;
+	uint32		data_len;
+	char           intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE	sizeof(rem_ioctl_t)
+
+typedef struct {
+	uint32 num;
+	chanspec_t list[1];
+} chanspec_list_t;
+
+/* DFS Forced param */
+typedef struct wl_dfs_forced_params {
+	chanspec_t chspec;
+	uint16 version;
+	chanspec_list_t chspec_list;
+} wl_dfs_forced_t;
+
+#define DFS_PREFCHANLIST_VER 0x01
+#define WL_CHSPEC_LIST_FIXED_SIZE	OFFSETOF(chanspec_list_t, list)
+#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \
+	(WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list))
+#define WL_DFS_FORCED_PARAMS_MAX_SIZE \
+	WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t))
+
+/* association decision information */
+typedef struct {
+	bool		assoc_approved;		/* (re)association approved */
+	uint16		reject_reason;		/* reason code for rejecting association */
+	struct		ether_addr   da;
+#if 0 && (NDISVER >= 0x0620)
+	LARGE_INTEGER	sys_time;		/* current system time */
+#else
+	int64		sys_time;		/* current system time */
+#endif
+} assoc_decision_t;
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+	struct ether_addr 	da;
+	uint16 			len;
+	uint32 			packetId;
+	uint8			data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;	/* the length of SSID */
+	uint8		ssid[32];	/* SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+	uint32 			channel;
+	int32 			dwell_time;
+	struct ether_addr 	BSSID;
+	wl_action_frame_t	action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL	0
+#define MFP_TEST_FLAG_ANY_KEY	1
+typedef struct wl_sa_query {
+	uint32			flag;
+	uint8 			action;
+	uint16 			id;
+	struct ether_addr 	da;
+} wl_sa_query_t;
+
+#endif /*  LINUX_POSTMOGRIFY_REMOVAL */
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Flags for OBSS IOVAR Parameters */
+#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD        (0x01)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD      (0x02)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04)
+#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD    (0x08)
+#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD          (0x10)
+#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD          (0x20)
+#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD            (0x40)
+
+/* OBSS IOVAR Version information */
+#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in
+		* seconds before we confirm that OBSS is present and
+		* dynamically activate dynamic bwswitch.
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in
+		* seconds before we confirm that OBSS is GONE and
+		* dynamically start pseudo upgrade. If in pseudo sense time, we
+		* will see OBSS, [means that, we false detected that OBSS-is-gone
+		* in watchdog] this count will be incremented in steps of
+		* obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS
+		* detection again. Note that, at present, max 30seconds is
+		* allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT]
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above
+		*/
+	uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in
+		* pseudo state. This is used to sense/measure the stats from lq.
+		*/
+	uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */
+	uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */
+	uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */
+} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 version;	/* version field */
+	uint32 config_mask;
+	uint32 reset_mask;
+	wlc_prot_dynbwsw_config_t config_params;
+} BWL_POST_PACKED_STRUCT obss_config_params_t;
+
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define	LEGACY_WL_BSS_INFO_VERSION	107	/* older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	uint8		channel;		/* Channel no. */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* variable length Information Elements */
+} wl_bss_info_107_t;
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define	LEGACY2_WL_BSS_INFO_VERSION	108		/* old version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N BSS Capabilities (based on HT_CAP_*) */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint32		reserved32[1];		/* Reserved for expansion of BSS properties */
+	uint8		flags;			/* flags */
+	uint8		reserved[3];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_108_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define	WL_BSS_INFO_VERSION	109		/* current version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N+AC BSS Capabilities */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint8		padding1[3];		/* explicit struct alignment padding */
+	uint16		vht_rxmcsmap;	/* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint16		vht_txmcsmap;	/* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint8		flags;			/* flags */
+	uint8		vht_cap;		/* BSS is vht capable */
+	uint8		reserved[2];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	int16		SNR;			/* average SNR of during frame reception */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+typedef struct wl_bsscfg {
+	uint32  bsscfg_idx;
+	uint32  wsec;
+	uint32  WPA_auth;
+	uint32  wsec_index;
+	uint32  associated;
+	uint32  BSS;
+	uint32  phytest_on;
+	struct ether_addr   prev_BSSID;
+	struct ether_addr   BSSID;
+	uint32  targetbss_wpa2_flags;
+	uint32 assoc_type;
+	uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_if_add {
+	uint32  bsscfg_flags;
+	uint32  if_flags;
+	uint32  ap;
+	struct ether_addr   mac_addr;
+} wl_if_add_t;
+
+typedef struct wl_bss_config {
+	uint32	atim_window;
+	uint32	beacon_period;
+	uint32	chanspec;
+} wl_bss_config_t;
+
+#define WL_BSS_USER_RADAR_CHAN_SELECT	0x1	/* User application will randomly select
+						 * radar channel.
+						 */
+
+#define DLOAD_HANDLER_VER			1	/* Downloader version */
+#define DLOAD_FLAG_VER_MASK		0xf000	/* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT	12	/* Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 			0x0001
+
+/* generic download types & flags */
+enum {
+	DL_TYPE_UCODE = 1,
+	DL_TYPE_CLM = 2
+};
+
+/* ucode type values */
+enum {
+	UCODE_FW,
+	INIT_VALS,
+	BS_INIT_VALS
+};
+
+struct wl_dload_data {
+	uint16 flag;
+	uint16 dload_type;
+	uint32 len;
+	uint32 crc;
+	uint8  data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+	uint32 ucode_type;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_num;
+	uint8  data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+	uint32 ds_id;
+	uint32 clm_total_len;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_offset;
+	uint8  data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct wlc_ssid {
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+#define MAX_PREFERRED_AP_NUM     5
+typedef struct wlc_fastssidinfo {
+	uint32				SSID_channel[MAX_PREFERRED_AP_NUM];
+	wlc_ssid_t		SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
+	uint8   len;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT wnm_url_t;
+
+typedef struct chan_scandata {
+	uint8		txpower;
+	uint8		pad;
+	chanspec_t	channel;	/* Channel num, bw, ctrl_sb and band */
+	uint32		channel_mintime;
+	uint32		channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+	EXTDSCAN_FOREGROUND_SCAN,
+	EXTDSCAN_BACKGROUND_SCAN,
+	EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID		5
+
+typedef struct wl_extdscan_params {
+	int8 		nprobes;		/* 0, passive, otherwise active */
+	int8    	split_scan;		/* split scan */
+	int8		band;			/* band */
+	int8		pad;
+	wlc_ssid_t 	ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
+	uint32		tx_rate;		/* in 500ksec units */
+	wl_scan_type_t	scan_type;		/* enum */
+	int32 		channel_num;
+	chan_scandata_t channel_list[1];	/* list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE 	(sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;		/* default: {0, ""} */
+	struct ether_addr bssid;	/* default: bcast */
+	int8 bss_type;			/* default: any,
+					 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					 */
+	uint8 scan_type;		/* flags, 0 use default */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+	int32 channel_num;		/* count of channels and ssids that follow
+					 *
+					 * low half is count of channels in channel_list, 0
+					 * means default (use all available channels)
+					 *
+					 * high half is entries in wlc_ssid_t array that
+					 * follows channel_list, aligned for int32 (4 bytes)
+					 * meaning an odd channel count implies a 2-byte pad
+					 * between end of channel_list and first ssid
+					 *
+					 * if ssid count is zero, single ssid in the fixed
+					 * parameter portion is assumed, otherwise ssid in
+					 * the fixed portion is ignored
+					 */
+	uint16 channel_list[1];		/* list of chanspecs */
+} wl_scan_params_t;
+
+/* size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#define WL_MAX_ROAMSCAN_DATSZ	(WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
+#define ISCAN_REQ_VERSION 1
+
+/* incremental scan struct */
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/* 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+/* incremental scan results struct */
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/* size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+#define SCANOL_PARAMS_VERSION	1
+
+typedef struct scanol_params {
+	uint32 version;
+	uint32 flags;	/* offload scanning flags */
+	int32 active_time;	/* -1 use default, dwell time per channel for active scanning */
+	int32 passive_time;	/* -1 use default, dwell time per channel for passive scanning */
+	int32 idle_rest_time;	/* -1 use default, time idle between scan cycle */
+	int32 idle_rest_time_multiplier;
+	int32 active_rest_time;
+	int32 active_rest_time_multiplier;
+	int32 scan_cycle_idle_rest_time;
+	int32 scan_cycle_idle_rest_multiplier;
+	int32 scan_cycle_active_rest_time;
+	int32 scan_cycle_active_rest_multiplier;
+	int32 max_rest_time;
+	int32 max_scan_cycles;
+	int32 nprobes;		/* -1 use default, number of probes per channel */
+	int32 scan_start_delay;
+	uint32 nchannels;
+	uint32 ssid_count;
+	wlc_ssid_t ssidlist[1];
+} scanol_params_t;
+
+typedef struct wl_probe_params {
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	struct ether_addr mac;
+} wl_probe_params_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_MAXRATES_IN_SET		16	/* max # of rates in a rateset */
+typedef struct wl_rateset {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8   mcs[MCSSET_LEN];        /* supported mcs index bit map */
+	uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+} wl_rateset_args_t;
+
+#define TXBF_RATE_MCS_ALL		4
+#define TXBF_RATE_VHT_ALL		4
+#define TXBF_RATE_OFDM_ALL		8
+
+typedef struct wl_txbf_rateset {
+	uint8	txbf_rate_mcs[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint8	txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint8	txbf_rate_ofdm[TXBF_RATE_OFDM_ALL];	/* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_cnt;
+	uint8	txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define OFDM_RATE_MASK			0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+	wl_rateset_t rs_tgt;
+	uint32 phy_type;
+	int32 bandtype;
+	uint8 cck_only;
+	uint8 rate_mask;
+	uint8 mcsallow;
+	uint8 bw;
+	uint8 txstreams;
+} wl_rates_info_t;
+
+/* uint32 list */
+typedef struct wl_uint32_list {
+	/* in - # of elements, out - # of entries */
+	uint32 count;
+	/* variable length uint32 list */
+	uint32 element[1];
+} wl_uint32_list_t;
+
+/* used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;	/* 00:00:00:00:00:00: broadcast scan */
+	uint16 bssid_cnt;		/* 0: use chanspec_num, and the single bssid,
+					* otherwise count of chanspecs in chanspec_list
+					* AND paired bssids following chanspec_list
+					* also, chanspec_num has to be set to zero
+					* for bssid list to be used
+					*/
+	int32 chanspec_num;		/* 0: all available channels,
+					* otherwise count of chanspecs in chanspec_list
+					*/
+	chanspec_t chanspec_list[1];	/* list of chanspecs */
+} wl_assoc_params_t;
+
+#define WL_ASSOC_PARAMS_FIXED_SIZE 	OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/* used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;	/* optional field, but it must include the fixed portion
+					 * of the wl_assoc_params_t struct when it does present.
+					 */
+} wl_join_params_t;
+
+#ifndef  LINUX_POSTMOGRIFY_REMOVAL
+#define WL_JOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_join_params_t, params) + \
+					 WL_ASSOC_PARAMS_FIXED_SIZE)
+/* scan params for extended join */
+typedef struct wl_join_scan_params {
+	uint8 scan_type;		/* 0 use default, active or passive scan */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+} wl_join_scan_params_t;
+
+/* extended join params */
+typedef struct wl_extjoin_params {
+	wlc_ssid_t ssid;		/* {0, ""}: wildcard scan */
+	wl_join_scan_params_t scan;
+	wl_join_assoc_params_t assoc;	/* optional field, but it must include the fixed portion
+					 * of the wl_join_assoc_params_t struct when it does
+					 * present.
+					 */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_extjoin_params_t, assoc) + \
+					 WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+#define ANT_SELCFG_MAX		4	/* max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED	4	/* max number of streams supported */
+typedef struct {
+	uint8 ant_config[ANT_SELCFG_MAX];	/* antenna configuration */
+	uint8 num_antcfg;	/* number of available antenna configurations */
+} wlc_antselcfg_t;
+
+typedef struct {
+	uint32 duration;	/* millisecs spent sampling this channel */
+	uint32 congest_ibss;	/* millisecs in our bss (presumably this traffic will */
+				/*  move if cur bss moves channels) */
+	uint32 congest_obss;	/* traffic not in our bss */
+	uint32 interference;	/* millisecs detecting a non 802.11 interferer. */
+	uint32 timestamp;	/* second timestamp */
+} cca_congest_t;
+
+typedef struct {
+	chanspec_t chanspec;	/* Which channel? */
+	uint8 num_secs;		/* How many secs worth of data */
+	cca_congest_t  secs[1];	/* Data */
+} cca_congest_channel_req_t;
+
+
+/* interference sources */
+enum interference_source {
+	ITFR_NONE = 0,		/* interference */
+	ITFR_PHONE,		/* wireless phone */
+	ITFR_VIDEO_CAMERA,	/* wireless video camera */
+	ITFR_MICROWAVE_OVEN,	/* microwave oven */
+	ITFR_BABY_MONITOR,	/* wireless baby monitor */
+	ITFR_BLUETOOTH,		/* bluetooth */
+	ITFR_VIDEO_CAMERA_OR_BABY_MONITOR,	/* wireless camera or baby monitor */
+	ITFR_BLUETOOTH_OR_BABY_MONITOR,	/* bluetooth or baby monitor */
+	ITFR_VIDEO_CAMERA_OR_PHONE,	/* video camera or phone */
+	ITFR_UNIDENTIFIED	/* interference from unidentified source */
+};
+
+/* structure for interference source report */
+typedef struct {
+	uint32 flags;	/* flags.  bit definitions below */
+	uint32 source;	/* last detected interference source */
+	uint32 timestamp;	/* second timestamp on interferenced flag change */
+} interference_source_rep_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WLC_CNTRY_BUF_SZ	4		/* Country string is 3 bytes + NUL */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];	/* nul-terminated country code used in
+						 * the Country IE
+						 */
+	int32 rev;				/* revision specifier for ccode
+						 * on set, -1 indicates unspecified.
+						 * on get, rev >= 0
+						 */
+	char ccode[WLC_CNTRY_BUF_SZ];		/* nul-terminated built-in country code.
+						 * variable length, but fixed size in
+						 * struct allows simple allocation for
+						 * expected country strings <= 3 chars.
+						 */
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+	uint32 buflen;
+	uint32 band;
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	uint32 count;
+	uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+	uint32 buflen;
+	uint32 band_set;
+	uint32 band;
+	uint32 count;
+	char country_abbrev[1];
+} wl_country_list_t;
+
+typedef struct wl_rm_req_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+	uint32	token;		/* overall measurement set token */
+	uint32	count;		/* number of measurement requests */
+	void	*cb;		/* completion callback function: may be NULL */
+	void	*cb_arg;	/* arg to completion callback function */
+	wl_rm_req_elt_t	req[1];	/* variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN	OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+	uint32	len;		/* byte length of data block */
+	uint8	data[1];	/* variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN	24	/* length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+	uint8	rpi[WL_RPI_REP_BIN_NUM];
+	int8	rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+	uint32	token;		/* overall measurement set token */
+	uint32	len;		/* length of measurement report block */
+	wl_rm_rep_elt_t	rep[1];	/* variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN	8
+
+#ifdef BCMCCX
+
+#define LEAP_USER_MAX		32
+#define LEAP_DOMAIN_MAX		32
+#define LEAP_PASSWORD_MAX	32
+
+typedef struct wl_leap_info {
+	wlc_ssid_t ssid;
+	uint8 user_len;
+	uchar user[LEAP_USER_MAX];
+	uint8 password_len;
+	uchar password[LEAP_PASSWORD_MAX];
+	uint8 domain_len;
+	uchar domain[LEAP_DOMAIN_MAX];
+} wl_leap_info_t;
+
+typedef struct wl_leap_list {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_leap_info_t leap_info[1];
+} wl_leap_list_t;
+#endif	/* BCMCCX */
+
+typedef enum sup_auth_status {
+	/* Basic supplicant authentication states */
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	/* Extended supplicant authentication states */
+	/* Waiting to receive handshake msg M1 */
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	/* Preparing to send handshake msg M2 */
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	/* Waiting to receive handshake msg M3 */
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	WLC_SUP_KEYXCHANGE_PREP_M4,	/* Preparing to send handshake msg M4 */
+	WLC_SUP_KEYXCHANGE_WAIT_G1,	/* Waiting to receive handshake msg G1 */
+	WLC_SUP_KEYXCHANGE_PREP_G2	/* Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct wl_wsec_key {
+	uint32		index;		/* key index */
+	uint32		len;		/* key length */
+	uint8		data[DOT11_MAX_KEY_SIZE];	/* key data */
+	uint32		pad_1[18];
+	uint32		algo;		/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	uint32		flags;		/* misc flags */
+	uint32		pad_2[2];
+	int		pad_3;
+	int		iv_initialized;	/* has IV been initialized already? */
+	int		pad_4;
+	/* Rx IV */
+	struct {
+		uint32	hi;		/* upper 32 bits of IV */
+		uint16	lo;		/* lower 16 bits of IV */
+	} rxiv;
+	uint32		pad_5[2];
+	struct ether_addr ea;		/* per station */
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN	8
+#define WSEC_MAX_PSK_LEN	64
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE		(1<<0)
+
+/* receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct {
+	ushort	key_len;		/* octets in key material */
+	ushort	flags;			/* key handling qualification */
+	uint8	key[WSEC_MAX_PSK_LEN];	/* PMK material */
+} wsec_pmk_t;
+
+typedef struct _pmkid {
+	struct ether_addr	BSSID;
+	uint8			PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32	npmkid;
+	pmkid_t	pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr	BSSID;
+	uint8			preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32	npmkid_cand;
+	pmkid_cand_t	pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#define WL_STA_ANT_MAX		4	/* max possible rx antennas */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct wl_assoc_info {
+	uint32		req_len;
+	uint32		resp_len;
+	uint32		flags;
+	struct dot11_assoc_req req;
+	struct ether_addr reassoc_bssid; /* used in reassoc's */
+	struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+typedef struct wl_led_info {
+	uint32      index;      /* led index */
+	uint32      behavior;
+	uint8       activehi;
+} wl_led_info_t;
+
+
+/* srom read/write struct passed through ioctl */
+typedef struct {
+	uint	byteoff;	/* byte offset */
+	uint	nbytes;		/* number of bytes */
+	uint16	buf[1];
+} srom_rw_t;
+
+#define CISH_FLAG_PCIECIS	(1 << 15)	/* write CIS format bit for PCIe CIS */
+/* similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+	uint16	source;		/* cis source */
+	uint16	flags;		/* flags */
+	uint32	byteoff;	/* byte offset */
+	uint32	nbytes;		/* number of bytes */
+	/* data follows here */
+} cis_rw_t;
+
+/* R_REG and W_REG struct passed through ioctl */
+typedef struct {
+	uint32	byteoff;	/* byte offset of the field in d11regs_t */
+	uint32	val;		/* read/write value of the field */
+	uint32	size;		/* sizeof the field */
+	uint	band;		/* band (optional) */
+} rw_reg_t;
+
+/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
+/* PCL - Power Control Loop */
+typedef struct {
+	uint16	auto_ctrl;	/* WL_ATTEN_XX */
+	uint16	bb;		/* Baseband attenuation */
+	uint16	radio;		/* Radio attenuation */
+	uint16	txctl1;		/* Radio TX_CTL1 value */
+} atten_t;
+
+/* Per-AC retry parameters */
+struct wme_tx_params_s {
+	uint8  short_retry;
+	uint8  short_fallback;
+	uint8  long_retry;
+	uint8  long_fallback;
+	uint16 max_rate;  /* In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+typedef struct wl_plc_nodelist {
+	uint32 count;			/* Number of nodes */
+	struct _node {
+		struct ether_addr ea;	/* Node ether address */
+		uint32 node_type;	/* Node type */
+		uint32 cost;		/* PLC affinity */
+	} node[1];
+} wl_plc_nodelist_t;
+
+typedef struct wl_plc_params {
+	uint32	cmd;			/* Command */
+	uint8	plc_failover;		/* PLC failover control/status */
+	struct	ether_addr node_ea;	/* Node ether address */
+	uint32	cost;			/* Link cost or mac cost */
+} wl_plc_params_t;
+
+/* Used to get specific link/ac parameters */
+typedef struct {
+	int32 ac;
+	uint8 val;
+	struct ether_addr ea;
+} link_val_t;
+
+
+#define WL_PM_MUTE_TX_VER 1
+
+typedef struct wl_pm_mute_tx {
+	uint16 version;		/* version */
+	uint16 len;		/* length */
+	uint16 deadline;	/* deadline timer (in milliseconds) */
+	uint8  enable;		/* set to 1 to enable mode; set to 0 to disable it */
+} wl_pm_mute_tx_t;
+
+
+typedef struct {
+	uint16			ver;		/* version of this struct */
+	uint16			len;		/* length in bytes of this structure */
+	uint16			cap;		/* sta's advertised capabilities */
+	uint32			flags;		/* flags defined below */
+	uint32			idle;		/* time since data pkt rx'd from sta */
+	struct ether_addr	ea;		/* Station address */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			in;		/* seconds elapsed since associated */
+	uint32			listen_interval_inms; /* Min Listen interval in ms for this STA */
+	uint32			tx_pkts;	/* # of user packets transmitted (unicast) */
+	uint32			tx_failures;	/* # of user packets failed */
+	uint32			rx_ucast_pkts;	/* # of unicast packets received */
+	uint32			rx_mcast_pkts;	/* # of multicast packets received */
+	uint32			tx_rate;	/* Rate used by last tx frame */
+	uint32			rx_rate;	/* Rate of last successful rx frame */
+	uint32			rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	uint32			rx_decrypt_failures;	/* # of packet decrypted unsuccessfully */
+	uint32			tx_tot_pkts;	/* # of user tx pkts (ucast + mcast) */
+	uint32			rx_tot_pkts;	/* # of data packets recvd (uni + mcast) */
+	uint32			tx_mcast_pkts;	/* # of mcast pkts txed */
+	uint64			tx_tot_bytes;	/* data bytes txed (ucast + mcast) */
+	uint64			rx_tot_bytes;	/* data bytes recvd (ucast + mcast) */
+	uint64			tx_ucast_bytes;	/* data bytes txed (ucast) */
+	uint64			tx_mcast_bytes;	/* # data bytes txed (mcast) */
+	uint64			rx_ucast_bytes;	/* data bytes recvd (ucast) */
+	uint64			rx_mcast_bytes;	/* data bytes recvd (mcast) */
+	int8			rssi[WL_STA_ANT_MAX]; /* average rssi per antenna
+										   * of data frames
+										   */
+	int8			nf[WL_STA_ANT_MAX];	/* per antenna noise floor */
+	uint16			aid;		/* association ID */
+	uint16			ht_capabilities;	/* advertised ht caps */
+	uint16			vht_flags;		/* converted vht flags */
+	uint32			tx_pkts_retried;	/* # of frames where a retry was
+							 * necessary
+							 */
+	uint32			tx_pkts_retry_exhausted; /* # of user frames where a retry
+							  * was exhausted
+							  */
+	int8			rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last
+								  * received data frame.
+								  */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and WLAN locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	uint32			tx_pkts_total;		/* # user frames sent successfully */
+	uint32			tx_pkts_retries;	/* # user frames retries */
+	uint32			tx_pkts_fw_total;	/* # FW generated sent successfully */
+	uint32			tx_pkts_fw_retries;	/* # retries for FW generated frames */
+	uint32			tx_pkts_fw_retry_exhausted;	/* # FW generated where a retry
+								 * was exhausted
+								 */
+	uint32			rx_pkts_retried;	/* # rx with retry bit set */
+	uint32			tx_rate_fallback;	/* lowest fallback TX rate */
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE	OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER		4
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define	WLC_NUMRATES	16	/* max # of rates in a rateset */
+
+typedef struct wlc_rateset {
+	uint32	count;			/* number of rates in rates[] */
+	uint8	rates[WLC_NUMRATES];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8	htphy_membership;	/* HT PHY Membership */
+	uint8	mcs[MCSSET_LEN];	/* supported mcs index bit map */
+	uint16  vht_mcsmap;		/* supported vht mcs nss bit map */
+} wlc_rateset_t;
+
+/* Used to get specific STA parameters */
+typedef struct {
+	uint32	val;
+	struct ether_addr ea;
+} scb_val_t;
+
+/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+	uint32 code;
+	scb_val_t ioctl_args;
+} authops_t;
+
+/* channel encoding */
+typedef struct channel_info {
+	int hw_channel;
+	int target_channel;
+	int scan_channel;
+} channel_info_t;
+
+/* For ioctls that take a list of MAC addresses */
+typedef struct maclist {
+	uint count;			/* number of MAC addresses */
+	struct ether_addr ea[1];	/* variable length array of MAC addresses */
+} maclist_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+	uint rx_good_pkt;
+	uint rx_bad_pkt;
+	uint tx_good_pkt;
+	uint tx_bad_pkt;
+	uint rx_ocast_good_pkt; /* unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN              0
+#define LQ_IDX_MAX              1
+#define LQ_IDX_AVG              2
+#define LQ_IDX_SUM              2
+#define LQ_IDX_LAST             3
+#define LQ_STOP_MONITOR         0
+#define LQ_START_MONITOR        1
+
+/* Get averages RSSI, Rx PHY rate and SNR values */
+typedef struct {
+	int rssi[LQ_IDX_LAST];  /* Array to keep min, max, avg rssi */
+	int snr[LQ_IDX_LAST];   /* Array to keep min, max, avg snr */
+	int isvalid;            /* Flag indicating whether above data is valid */
+} wl_lq_t; /* Link Quality */
+
+typedef enum wl_wakeup_reason_type {
+	LCD_ON = 1,
+	LCD_OFF,
+	DRC1_WAKE,
+	DRC2_WAKE,
+	REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+/* Unique filter id */
+	uint32	id;
+
+/* stores the reason for the last wake up */
+	uint8	reason;
+} wl_wr_t;
+
+/* Get MAC specific rate histogram command */
+typedef struct {
+	struct	ether_addr ea;	/* MAC Address */
+	uint8	ac_cat;	/* Access Category */
+	uint8	num_pkts;	/* Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t;	/* MAC Specific Rate Histogram command */
+
+/* Get MAC rate histogram response */
+typedef struct {
+	uint32	rate[DOT11_RATE_MAX + 1];	/* Rates */
+	uint32	mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX];	/* MCS counts */
+	uint32	vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX];	/* VHT counts */
+	uint32	tsf_timer[2][2];	/* Start and End time for 8bytes value */
+} wl_mac_ratehisto_res_t;	/* MAC Specific Rate Histogram Response */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} wl_ioctl_t;
+
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	uint32 buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+#define WL_NUM_RATES_CCK			4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM			8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM	8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT		2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT			10
+#define WL_NUM_RATES_MCS32			1
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+	uint		vendorid;	/* PCI vendor id */
+	uint		deviceid;	/* device id of chip */
+	uint		radiorev;	/* radio revision */
+	uint		chiprev;	/* chip revision */
+	uint		corerev;	/* core revision */
+	uint		boardid;	/* board identifier (usu. PCI sub-device id) */
+	uint		boardvendor;	/* board vendor (usu. PCI sub-vendor id) */
+	uint		boardrev;	/* board revision */
+	uint		driverrev;	/* driver version */
+	uint		ucoderev;	/* microcode version */
+	uint		bus;		/* bus type */
+	uint		chipnum;	/* chip number */
+	uint		phytype;	/* phy type */
+	uint		phyrev;		/* phy revision */
+	uint		anarev;		/* anacore rev */
+	uint		chippkg;	/* chip package info */
+	uint		nvramrev;	/* nvram revision number */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH	48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+	uint instance;
+	char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+/* structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+	uint16	magic;
+	uint16	fifo;
+	uint16	size;
+} wl_txfifo_sz_t;
+
+/* Transfer info about an IOVar from the driver */
+/* Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+	uint8 module;
+	uint8 type;
+	char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/* bump this number if you change the ioctl interface */
+#define WLC_IOCTL_VERSION	2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES	1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+/* struct passed in for WLC_NDCONFIG_ITEM */
+typedef struct {
+	char *name;
+	void *param;
+} ndconfig_item_t;
+#endif
+
+
+#define WL_PHY_PAVARS_LEN	32	/* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
+
+#define WL_PHY_PAVAR_VER	1	/* pavars version */
+#define WL_PHY_PAVARS2_NUM	3	/* a1, b0, b1 */
+typedef struct wl_pavars2 {
+	uint16 ver;		/* version of this struct */
+	uint16 len;		/* len of this structure */
+	uint16 inuse;		/* driver return 1 for a1,b0,b1 in current band range */
+	uint16 phy_type;	/* phy type */
+	uint16 bandrange;
+	uint16 chain;
+	uint16 inpa[WL_PHY_PAVARS2_NUM];	/* phy pavars for one band range */
+} wl_pavars2_t;
+
+typedef struct wl_po {
+	uint16	phy_type;	/* Phy type */
+	uint16	band;
+	uint16	cckpo;
+	uint32	ofdmpo;
+	uint16	mcspo[8];
+} wl_po_t;
+
+#define WL_NUM_RPCALVARS 5	/* number of rpcal vars */
+
+typedef struct wl_rpcal {
+	uint16 value;
+	uint16 update;
+} wl_rpcal_t;
+
+typedef struct wl_aci_args {
+	int enter_aci_thresh; /* Trigger level to start detecting ACI */
+	int exit_aci_thresh; /* Trigger level to exit ACI mode */
+	int usec_spin; /* microsecs to delay between rssi samples */
+	int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+	uint16 nphy_adcpwr_enter_thresh;	/* ADC power to enter ACI mitigation mode */
+	uint16 nphy_adcpwr_exit_thresh;	/* ADC power to exit ACI mitigation mode */
+	uint16 nphy_repeat_ctr;		/* Number of tries per channel to compute power */
+	uint16 nphy_num_samples;	/* Number of samples to compute power on one channel */
+	uint16 nphy_undetect_window_sz;	/* num of undetects to exit ACI Mitigation mode */
+	uint16 nphy_b_energy_lo_aci;	/* low ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_md_aci;	/* mid ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_hi_aci;	/* high ACI power energy threshold for bphy */
+	uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
+	uint16 nphy_noise_noassoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_glitch_th_up;
+	uint16 nphy_noise_assoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_aci_glitch_th_up;
+	uint16 nphy_noise_assoc_aci_glitch_th_dn;
+	uint16 nphy_noise_assoc_enter_th;
+	uint16 nphy_noise_noassoc_enter_th;
+	uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+	uint16 nphy_noise_noassoc_crsidx_incr;
+	uint16 nphy_noise_assoc_crsidx_incr;
+	uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH	16	/* bytes of pre NPHY aci args */
+#define	WL_SAMPLECOLLECT_T_VERSION	2	/* version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+	/* version 0 fields */
+	uint8 coll_us;
+	int cores;
+	/* add'l version 1 fields */
+	uint16 version;     /* see definition of WL_SAMPLECOLLECT_T_VERSION */
+	uint16 length;      /* length of entire structure */
+	int8 trigger;
+	uint16 timeout;
+	uint16 mode;
+	uint32 pre_dur;
+	uint32 post_dur;
+	uint8 gpio_sel;
+	uint8 downsamp;
+	uint8 be_deaf;
+	uint8 agc;		/* loop from init gain and going down */
+	uint8 filter;		/* override high pass corners to lowest */
+	/* add'l version 2 fields */
+	uint8 trigger_state;
+	uint8 module_sel1;
+	uint8 module_sel2;
+	uint16 nsamps;
+	int bitStart;
+	uint32 gpioCapMask;
+} wl_samplecollect_args_t;
+
+#define	WL_SAMPLEDATA_T_VERSION		1	/* version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define	WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+	uint16 version;	/* structure version */
+	uint16 size;	/* size of structure */
+	uint16 tag;	/* Header/Data */
+	uint16 length;	/* data length */
+	uint32 flag;	/* bit def */
+} wl_sampledata_t;
+
+
+/* WL_OTA START */
+/* OTA Test Status */
+enum {
+	WL_OTA_TEST_IDLE = 0,	/* Default Idle state */
+	WL_OTA_TEST_ACTIVE = 1,	/* Test Running */
+	WL_OTA_TEST_SUCCESS = 2,	/* Successfully Finished Test */
+	WL_OTA_TEST_FAIL = 3	/* Test Failed in the Middle */
+};
+/* OTA SYNC Status */
+enum {
+	WL_OTA_SYNC_IDLE = 0,	/* Idle state */
+	WL_OTA_SYNC_ACTIVE = 1,	/* Waiting for Sync */
+	WL_OTA_SYNC_FAIL = 2	/* Sync pkt not recieved */
+};
+
+/* Various error states dut can get stuck during test */
+enum {
+	WL_OTA_SKIP_TEST_CAL_FAIL = 1,		/* Phy calibration failed */
+	WL_OTA_SKIP_TEST_SYNCH_FAIL = 2,		/* Sync Packet not recieved */
+	WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3,	/* Cmd flow file download failed */
+	WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4,	/* No test found in Flow file */
+	WL_OTA_SKIP_TEST_WL_NOT_UP = 5,		/* WL UP failed */
+	WL_OTA_SKIP_TEST_UNKNOWN_CALL		/* Unintentional scheduling on ota test */
+};
+
+/* Differentiator for ota_tx and ota_rx */
+enum {
+	WL_OTA_TEST_TX = 0,		/* ota_tx */
+	WL_OTA_TEST_RX = 1,		/* ota_rx */
+};
+
+/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
+enum {
+	WL_OTA_TEST_BW_20_IN_40MHZ = 0,	/* 20 in 40 operation */
+	WL_OTA_TEST_BW_20MHZ = 1,		/* 20 Mhz operation */
+	WL_OTA_TEST_BW_40MHZ = 2		/* full 40Mhz operation */
+};
+typedef struct ota_rate_info {
+	uint8 rate_cnt;					/* Total number of rates */
+	uint8 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE];	/* array of rates from 1mbps to 130mbps */
+							/* for legacy rates : ratein mbps * 2 */
+							/* for HT rates : mcs index */
+} ota_rate_info_t;
+
+typedef struct ota_power_info {
+	int8 pwr_ctrl_on;	/* power control on/off */
+	int8 start_pwr;		/* starting power/index */
+	int8 delta_pwr;		/* delta power/index */
+	int8 end_pwr;		/* end power/index */
+} ota_power_info_t;
+
+typedef struct ota_packetengine {
+	uint16 delay;           /* Inter-packet delay */
+				/* for ota_tx, delay is tx ifs in micro seconds */
+				/* for ota_rx, delay is wait time in milliseconds */
+	uint16 nframes;         /* Number of frames */
+	uint16 length;          /* Packet length */
+} ota_packetengine_t;
+
+/* Test info vector */
+typedef struct wl_ota_test_args {
+	uint8 cur_test;			/* test phase */
+	uint8 chan;			/* channel */
+	uint8 bw;			/* bandwidth */
+	uint8 control_band;		/* control band */
+	uint8 stf_mode;			/* stf mode */
+	ota_rate_info_t rt_info;	/* Rate info */
+	ota_packetengine_t pkteng;	/* packeteng info */
+	uint8 txant;			/* tx antenna */
+	uint8 rxant;			/* rx antenna */
+	ota_power_info_t pwr_info;	/* power sweep info */
+	uint8 wait_for_sync;		/* wait for sync or not */
+} wl_ota_test_args_t;
+
+typedef struct wl_ota_test_vector {
+	wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ];	/* Test argument struct */
+	uint16 test_cnt;					/* Total no of test */
+	uint8 file_dwnld_valid;					/* File successfully downloaded */
+	uint8 sync_timeout;					/* sync packet timeout */
+	int8 sync_fail_action;					/* sync fail action */
+	struct ether_addr sync_mac;				/* macaddress for sync pkt */
+	struct ether_addr tx_mac;				/* macaddress for tx */
+	struct ether_addr rx_mac;				/* macaddress for rx */
+	int8 loop_test;					/* dbg feature to loop the test */
+} wl_ota_test_vector_t;
+
+
+/* struct copied back form dongle to host to query the status */
+typedef struct wl_ota_test_status {
+	int16 cur_test_cnt;		/* test phase */
+	int8 skip_test_reason;		/* skip test reasoin */
+	wl_ota_test_args_t test_arg;	/* cur test arg details */
+	uint16 test_cnt;		/* total no of test downloaded */
+	uint8 file_dwnld_valid;		/* file successfully downloaded ? */
+	uint8 sync_timeout;		/* sync timeout */
+	int8 sync_fail_action;		/* sync fail action */
+	struct ether_addr sync_mac;	/* macaddress for sync pkt */
+	struct ether_addr tx_mac;	/* tx mac address */
+	struct ether_addr rx_mac;	/* rx mac address */
+	uint8  test_stage;		/* check the test status */
+	int8 loop_test;		/* Debug feature to puts test enfine in a loop */
+	uint8 sync_status;		/* sync status */
+} wl_ota_test_status_t;
+
+/* WL_OTA END */
+
+/* wl_radar_args_t */
+typedef struct {
+	int npulses;	/* required number of pulses at n * t_int */
+	int ncontig;	/* required number of pulses at t_int */
+	int min_pw;	/* minimum pulse width (20 MHz clocks) */
+	int max_pw;	/* maximum pulse width (20 MHz clocks) */
+	uint16 thresh0;	/* Radar detection, thresh 0 */
+	uint16 thresh1;	/* Radar detection, thresh 1 */
+	uint16 blank;	/* Radar detection, blank control */
+	uint16 fmdemodcfg;	/* Radar detection, fmdemod config */
+	int npulses_lp;  /* Radar detection, minimum long pulses */
+	int min_pw_lp; /* Minimum pulsewidth for long pulses */
+	int max_pw_lp; /* Maximum pulsewidth for long pulses */
+	int min_fm_lp; /* Minimum fm for long pulses */
+	int max_span_lp;  /* Maximum deltat for long pulses */
+	int min_deltat; /* Minimum spacing between pulses */
+	int max_deltat; /* Maximum spacing between pulses */
+	uint16 autocorr;	/* Radar detection, autocorr on or off */
+	uint16 st_level_time;	/* Radar detection, start_timing level */
+	uint16 t2_min; /* minimum clocks needed to remain in state 2 */
+	uint32 version; /* version */
+	uint32 fra_pulse_err;	/* sample error margin for detecting French radar pulsed */
+	int npulses_fra;  /* Radar detection, minimum French pulses set */
+	int npulses_stg2;  /* Radar detection, minimum staggered-2 pulses set */
+	int npulses_stg3;  /* Radar detection, minimum staggered-3 pulses set */
+	uint16 percal_mask;	/* defines which period cal is masked from radar detection */
+	int quant;	/* quantization resolution to pulse positions */
+	uint32 min_burst_intv_lp;	/* minimum burst to burst interval for bin3 radar */
+	uint32 max_burst_intv_lp;	/* maximum burst to burst interval for bin3 radar */
+	int nskip_rst_lp;	/* number of skipped pulses before resetting lp buffer */
+	int max_pw_tol;	/* maximum tollerance allowed in detected pulse width for radar detection */
+	uint16 feature_mask; /* 16-bit mask to specify enabled features */
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+	uint32 version; /* version */
+	uint16 thresh0_20_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh1_20_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh0_40_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh1_40_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh0_80_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh1_80_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh0_20_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh1_20_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh0_40_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh1_40_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh0_80_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+	uint16 thresh1_80_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+#ifdef WL11AC160
+	uint16 thresh0_160_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh1_160_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh0_160_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+	uint16 thresh1_160_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+#endif /* WL11AC160 */
+} wl_radar_thr_t;
+
+#define WL_RADAR_THR_VERSION	2
+
+/* RSSI per antenna */
+typedef struct {
+	uint32	version;		/* version field */
+	uint32	count;			/* number of valid antenna rssi */
+	int8 rssi_ant[WL_RSSI_ANT_MAX];	/* rssi per antenna */
+} wl_rssi_ant_t;
+
+/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+	uint state;		/* noted by WL_DFS_CACSTATE_XX. */
+	uint duration;		/* time spent in ms in state. */
+	/* as dfs enters ISM state, it removes the operational channel from quiet channel
+	 * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+	 */
+	chanspec_t chanspec_cleared;
+	/* chanspec cleared used to be a uint, add another to uint16 to maintain size */
+	uint16 pad;
+} wl_dfs_status_t;
+
+/* data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+	bool detected;
+	int count;
+	bool pretended;
+	uint32 radartype;
+	uint32 timenow;
+	uint32 timefromL;
+	int lp_csect_single;
+	int detected_pulse_index;
+	int nconsecq_pulses;
+	chanspec_t ch;
+	int pw[10];
+	int intv[10];
+	int fm[10];
+} wl_radar_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+	uint8 txpwr_band_max[NUM_PWRCTRL_RATES];	/* User set target */
+	uint8 txpwr_limit[NUM_PWRCTRL_RATES];		/* reg and local power limit */
+	uint8 txpwr_local_max;				/* local max according to the AP */
+	uint8 txpwr_local_constraint;			/* local constraint according to the AP */
+	uint8 txpwr_chan_reg_max;			/* Regulatory max for this channel */
+	uint8 txpwr_target[2][NUM_PWRCTRL_RATES];	/* Latest target for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_opo[NUM_PWRCTRL_RATES];		/* On G phy, OFDM power offset */
+	uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES];	/* Max CCK power for this band (SROM) */
+	uint8 txpwr_bphy_ofdm_max;			/* Max OFDM power for this band (SROM) */
+	uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES];	/* Max power for A band (SROM) */
+	int8  txpwr_antgain[2];				/* Ant gain for each band - from SROM */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY    45
+#define WL_TX_POWER_MCS20_FIRST         12
+#define WL_TX_POWER_MCS20_NUM           16
+#define WL_TX_POWER_MCS40_FIRST         28
+#define WL_TX_POWER_MCS40_NUM           17
+
+typedef struct {
+	uint32 flags;
+	chanspec_t chanspec;                 /* txpwr report for this channel */
+	chanspec_t local_chanspec;           /* channel on which we are associated */
+	uint8 local_max;                 /* local max according to the AP */
+	uint8 local_constraint;              /* local constraint according to the AP */
+	int8  antgain[2];                /* Ant gain for each band - from SROM */
+	uint8 rf_cores;                  /* count of RF Cores being reported */
+	uint8 est_Pout[4];                           /* Latest tx power out estimate per RF
+							  * chain without adjustment
+							  */
+	uint8 est_Pout_cck;                          /* Latest CCK tx power out estimate */
+	uint8 user_limit[WL_TX_POWER_RATES_LEGACY];  /* User limit */
+	uint8 reg_limit[WL_TX_POWER_RATES_LEGACY];   /* Regulatory power limit */
+	uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
+	uint8 target[WL_TX_POWER_RATES_LEGACY];      /* Latest target power */
+} tx_power_legacy2_t;
+
+/* TX Power index defines */
+#define WLC_NUM_RATES_CCK       WL_NUM_RATES_CCK
+#define WLC_NUM_RATES_OFDM      WL_NUM_RATES_OFDM
+#define WLC_NUM_RATES_MCS_1_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS_2_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS32     WL_NUM_RATES_MCS32
+#define WL_TX_POWER_CCK_NUM     WL_NUM_RATES_CCK
+#define WL_TX_POWER_OFDM_NUM        WL_NUM_RATES_OFDM
+#define WL_TX_POWER_MCS_1_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_2_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_32_NUM      WL_NUM_RATES_MCS32
+
+#define WL_NUM_2x2_ELEMENTS		4
+#define WL_NUM_3x3_ELEMENTS		6
+
+typedef struct {
+	uint16 ver;				/* version of this struct */
+	uint16 len;				/* length in bytes of this structure */
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint32     buflen;			/* ppr buffer length */
+	uint8      pprbuf[1];			/* Latest target power buffer */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION	1
+#define WL_TXPPR_LENGTH	(sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION	45
+/* number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM	(3)
+
+typedef struct chanspec_txpwr_max {
+	chanspec_t chanspec;   /* chanspec */
+	uint8 txpwr_max;       /* max txpwr in all the rates */
+	uint8 padding;
+} chanspec_txpwr_max_t;
+
+typedef struct  wl_chanspec_txpwr_max {
+	uint16 ver;			/* version of this struct */
+	uint16 len;			/* length in bytes of this structure */
+	uint32 count;		/* number of elements of (chanspec, txpwr_max) pair */
+	chanspec_txpwr_max_t txpwr[1];	/* array of (chanspec, max_txpwr) pair */
+} wl_chanspec_txpwr_max_t;
+
+#define WL_CHANSPEC_TXPWR_MAX_VER	1
+#define WL_CHANSPEC_TXPWR_MAX_LEN	(sizeof(wl_chanspec_txpwr_max_t))
+
+typedef struct tx_inst_power {
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+#define WL_NUM_TXCHAIN_MAX	4
+typedef struct wl_txchain_pwr_offsets {
+	int8 offset[WL_NUM_TXCHAIN_MAX];	/* quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value.  RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ *   uint8 akm[DOT11_OUI_LEN+1];     akm suite
+ *   uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ *   uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost value in dB
+ */
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define RATE_CCK_1MBPS 0
+#define RATE_CCK_2MBPS 1
+#define RATE_CCK_5_5MBPS 2
+#define RATE_CCK_11MBPS 3
+
+#define RATE_LEGACY_OFDM_6MBPS 0
+#define RATE_LEGACY_OFDM_9MBPS 1
+#define RATE_LEGACY_OFDM_12MBPS 2
+#define RATE_LEGACY_OFDM_18MBPS 3
+#define RATE_LEGACY_OFDM_24MBPS 4
+#define RATE_LEGACY_OFDM_36MBPS 5
+#define RATE_LEGACY_OFDM_48MBPS 6
+#define RATE_LEGACY_OFDM_54MBPS 7
+
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+
+typedef struct wl_bsstrans_rssi {
+	int8 rssi_2g;	/* RSSI in dbm for 2.4 G */
+	int8 rssi_5g;	/* RSSI in dbm for 5G, unused for cck */
+} wl_bsstrans_rssi_t;
+
+#define RSSI_RATE_MAP_MAX_STREAMS 4	/* max streams supported */
+
+/* RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map {
+	uint16 ver;
+	uint16 len; /* length of entire structure */
+	wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */
+	wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */
+	wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+	wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */
+} wl_bsstrans_rssi_rate_map_t;
+
+#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1
+
+/* Configure number of scans allowed per throttle period */
+typedef struct wl_bsstrans_roamthrottle {
+	uint16 ver;
+	uint16 period;
+	uint16 scans_allowed;
+} wl_bsstrans_roamthrottle_t;
+
+#define	NFIFO			6	/* # tx/rx fifopairs */
+#define NREINITREASONCOUNT	8
+#define REINITREASONIDX(_x)	(((_x) < NREINITREASONCOUNT) ? (_x) : 0)
+
+#define	WL_CNT_T_VERSION	10	/* current version of wl_cnt_t struct */
+
+typedef struct {
+	uint16	version;	/* see definition of WL_CNT_T_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	/* transmit stat counters */
+	uint32	txframe;	/* tx data frames */
+	uint32	txbyte;		/* tx data bytes */
+	uint32	txretrans;	/* tx mac retransmits */
+	uint32	txerror;	/* tx data errors (derived: sum of others) */
+	uint32	txctl;		/* tx management frames */
+	uint32	txprshort;	/* tx short preamble frames */
+	uint32	txserr;		/* tx status errors */
+	uint32	txnobuf;	/* tx out of buffers errors */
+	uint32	txnoassoc;	/* tx discard because we're not associated */
+	uint32	txrunt;		/* tx runt frames */
+	uint32	txchit;		/* tx header cache hit (fastpath) */
+	uint32	txcmiss;	/* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32	txuflo;		/* tx fifo underflows */
+	uint32	txphyerr;	/* tx phy errors (indicated in tx status) */
+	uint32	txphycrs;
+
+	/* receive stat counters */
+	uint32	rxframe;	/* rx data frames */
+	uint32	rxbyte;		/* rx data bytes */
+	uint32	rxerror;	/* rx data errors (derived: sum of others) */
+	uint32	rxctl;		/* rx management frames */
+	uint32	rxnobuf;	/* rx out of buffers errors */
+	uint32	rxnondata;	/* rx non data frames in the data channel errors */
+	uint32	rxbadds;	/* rx bad DS errors */
+	uint32	rxbadcm;	/* rx bad control or management frames */
+	uint32	rxfragerr;	/* rx fragmentation errors */
+	uint32	rxrunt;		/* rx runt frames */
+	uint32	rxgiant;	/* rx giant frames */
+	uint32	rxnoscb;	/* rx no scb error */
+	uint32	rxbadproto;	/* rx invalid frames */
+	uint32	rxbadsrcmac;	/* rx frames with Invalid Src Mac */
+	uint32	rxbadda;	/* rx frames tossed for invalid da */
+	uint32	rxfilter;	/* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32	rxoflo;		/* rx fifo overflow errors */
+	uint32	rxuflo[NFIFO];	/* rx dma descriptor underflow errors */
+
+	uint32	d11cnt_txrts_off;	/* d11cnt txrts value when reset d11cnt */
+	uint32	d11cnt_rxcrc_off;	/* d11cnt rxcrc value when reset d11cnt */
+	uint32	d11cnt_txnocts_off;	/* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32	dmade;		/* tx/rx dma descriptor errors */
+	uint32	dmada;		/* tx/rx dma data errors */
+	uint32	dmape;		/* tx/rx dma descriptor protocol errors */
+	uint32	reset;		/* reset count */
+	uint32	tbtt;		/* cnts the TBTT int's */
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	/* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/* total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/* number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/* number of CTS sent out by the MAC */
+	uint32	txackfrm;	/* number of ACK frames sent out */
+	uint32	txdnlfrm;	/* Not used */
+	uint32	txbcnfrm;	/* beacons transmitted */
+	uint32	txfunfl[6];	/* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;		/* transmit at fallback bw (dynamic bw) */
+	uint32	txtplunfl;	/* Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/* Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32	rxfrmtoolong;	/* Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt;	/* Received frame did not contain enough bytes for its frame type */
+	uint32	rxinvmachdr;	/* Either the protocol version != 0 or frame type not
+				 * data/control/management
+				 */
+	uint32	rxbadfcs;	/* number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/* parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/* PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/* Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32	rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32	rxcfrmucast;	/* number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/* number of ucast ACKS received (good FCS) */
+	uint32	rxdfrmocast;	/* number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmfrmocast;	/* number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxcfrmocast;	/* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/* number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/* number of received CTS not addressed to the MAC */
+	uint32	rxdfrmmcast;	/* number of RX Data multicast frames received by the MAC */
+	uint32	rxmfrmmcast;	/* number of RX Management multicast frames received by the MAC */
+	uint32	rxcfrmmcast;	/* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/* beacons received from member of BSS */
+	uint32	rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/* beacons received from other BSS */
+	uint32	rxrsptmout;	/* Number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxf0ovfl;	/* Number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/* Number of receive fifo 1 overflows (obsolete) */
+	uint32	rxf2ovfl;	/* Number of receive fifo 2 overflows (obsolete) */
+	uint32	txsfovfl;	/* Number of transmit status fifo overflows (obsolete) */
+	uint32	pmqovfl;	/* Number of PMQ overflows */
+	uint32	rxcgprqfrm;	/* Number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/* Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/* Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	rxnack;		/* obsolete */
+	uint32	frmscons;	/* obsolete */
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32	txfrag;		/* dot11TransmittedFragmentCount */
+	uint32	txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32	txfail;		/* dot11FailedCount */
+	uint32	txretry;	/* dot11RetryCount */
+	uint32	txretrie;	/* dot11MultipleRetryCount */
+	uint32	rxdup;		/* dot11FrameduplicateCount */
+	uint32	txrts;		/* dot11RTSSuccessCount */
+	uint32	txnocts;	/* dot11RTSFailureCount */
+	uint32	txnoack;	/* dot11ACKFailureCount */
+	uint32	rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32	rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32	rxcrc;		/* dot11FCSErrorCount */
+	uint32	txfrmsnt;	/* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32	rxundec;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay;	/* TKIPReplays */
+	uint32	ccmpfmterr;	/* CCMPFormatErrors */
+	uint32	ccmpreplay;	/* CCMPReplays */
+	uint32	ccmpundec;	/* CCMPDecryptErrors */
+	uint32	fourwayfail;	/* FourWayHandshakeFailures */
+	uint32	wepundec;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess;	/* DecryptSuccessCount */
+	uint32	tkipicverr;	/* TKIPICVErrorCount */
+	uint32	wepexcluded;	/* dot11WEPExcludedCount */
+
+	uint32	txchanrej;	/* Tx frames suppressed due to channel rejection */
+	uint32	psmwds;		/* Count PSM watchdogs */
+	uint32	phywatchdog;	/* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32	prq_entries_handled;	/* PRQ entries read in */
+	uint32	prq_undirected_entries;	/*    which were bcast bss & ssid */
+	uint32	prq_bad_entries;	/*    which could not be translated to info */
+	uint32	atim_suppress_count;	/* TX suppressions on ATIM fifo */
+	uint32	bcn_template_not_ready;	/* Template marked in use on send bcn ... */
+	uint32	bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32	late_tbtt_dpc;	/* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32	pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32	pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32	rfdisable;	/* count of radio disables */
+	uint32	bphy_rxcrsglitch;	/* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32	txexptime;	/* Tx frames suppressed due to timer expiration */
+
+	uint32	txmpdu_sgi;	/* count for sgi transmit */
+	uint32	rxmpdu_sgi;	/* count for sgi received */
+	uint32	txmpdu_stbc;	/* count for stbc transmit */
+	uint32	rxmpdu_stbc;	/* count for stbc received */
+
+	uint32	rxundec_mcst;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill_mcst;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr_mcst;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay_mcst;	/* TKIPReplays */
+	uint32	ccmpfmterr_mcst;	/* CCMPFormatErrors */
+	uint32	ccmpreplay_mcst;	/* CCMPReplays */
+	uint32	ccmpundec_mcst;	/* CCMPDecryptErrors */
+	uint32	fourwayfail_mcst;	/* FourWayHandshakeFailures */
+	uint32	wepundec_mcst;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr_mcst;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess_mcst;	/* DecryptSuccessCount */
+	uint32	tkipicverr_mcst;	/* TKIPICVErrorCount */
+	uint32	wepexcluded_mcst;	/* dot11WEPExcludedCount */
+
+	uint32	dma_hang;	/* count for dma hang */
+	uint32	reinit;		/* count for reinit */
+
+	uint32  pstatxucast;	/* count of ucast frames xmitted on all psta assoc */
+	uint32  pstatxnoassoc;	/* count of txnoassoc frames xmitted on all psta assoc */
+	uint32  pstarxucast;	/* count of ucast frames received on all psta assoc */
+	uint32  pstarxbcmc;	/* count of bcmc frames received on all psta */
+	uint32  pstatxbcmc;	/* count of bcmc frames transmitted on all psta */
+
+	uint32  cso_passthrough; /* hw cso required but passthrough */
+	uint32	cso_normal;	/* hw cso hdr for normal process */
+	uint32	chained;	/* number of frames chained */
+	uint32	chainedsz1;	/* number of chain size 1 frames */
+	uint32	unchained;	/* number of frames not chained */
+	uint32	maxchainsz;	/* max chain size so far */
+	uint32	currchainsz;	/* current chain size */
+	uint32	rxdrop20s;	/* drop secondary cnt */
+	uint32	pciereset;	/* Secondary Bus Reset issued by driver */
+	uint32	cfgrestore;	/* configspace restore by driver */
+	uint32	reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
+} wl_cnt_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct {
+	uint16  version;    /* see definition of WL_CNT_T_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32  txframe;    /* tx data frames */
+	uint32  txbyte;     /* tx data bytes */
+	uint32  txretrans;  /* tx mac retransmits */
+	uint32  txerror;    /* tx data errors (derived: sum of others) */
+	uint32  txctl;      /* tx management frames */
+	uint32  txprshort;  /* tx short preamble frames */
+	uint32  txserr;     /* tx status errors */
+	uint32  txnobuf;    /* tx out of buffers errors */
+	uint32  txnoassoc;  /* tx discard because we're not associated */
+	uint32  txrunt;     /* tx runt frames */
+	uint32  txchit;     /* tx header cache hit (fastpath) */
+	uint32  txcmiss;    /* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32  txuflo;     /* tx fifo underflows */
+	uint32  txphyerr;   /* tx phy errors (indicated in tx status) */
+	uint32  txphycrs;
+
+	/* receive stat counters */
+	uint32  rxframe;    /* rx data frames */
+	uint32  rxbyte;     /* rx data bytes */
+	uint32  rxerror;    /* rx data errors (derived: sum of others) */
+	uint32  rxctl;      /* rx management frames */
+	uint32  rxnobuf;    /* rx out of buffers errors */
+	uint32  rxnondata;  /* rx non data frames in the data channel errors */
+	uint32  rxbadds;    /* rx bad DS errors */
+	uint32  rxbadcm;    /* rx bad control or management frames */
+	uint32  rxfragerr;  /* rx fragmentation errors */
+	uint32  rxrunt;     /* rx runt frames */
+	uint32  rxgiant;    /* rx giant frames */
+	uint32  rxnoscb;    /* rx no scb error */
+	uint32  rxbadproto; /* rx invalid frames */
+	uint32  rxbadsrcmac;    /* rx frames with Invalid Src Mac */
+	uint32  rxbadda;    /* rx frames tossed for invalid da */
+	uint32  rxfilter;   /* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32  rxoflo;     /* rx fifo overflow errors */
+	uint32  rxuflo[NFIFO];  /* rx dma descriptor underflow errors */
+
+	uint32  d11cnt_txrts_off;   /* d11cnt txrts value when reset d11cnt */
+	uint32  d11cnt_rxcrc_off;   /* d11cnt rxcrc value when reset d11cnt */
+	uint32  d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32  dmade;      /* tx/rx dma descriptor errors */
+	uint32  dmada;      /* tx/rx dma data errors */
+	uint32  dmape;      /* tx/rx dma descriptor protocol errors */
+	uint32  reset;      /* reset count */
+	uint32  tbtt;       /* cnts the TBTT int's */
+	uint32  txdmawar;
+	uint32  pkt_callback_reg_fail;  /* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32  txallfrm;   /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+			     * Control Management (includes retransmissions)
+			     */
+	uint32  txrtsfrm;   /* number of RTS sent out by the MAC */
+	uint32  txctsfrm;   /* number of CTS sent out by the MAC */
+	uint32  txackfrm;   /* number of ACK frames sent out */
+	uint32  txdnlfrm;   /* Not used */
+	uint32  txbcnfrm;   /* beacons transmitted */
+	uint32  txfunfl[6]; /* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;	    /* transmit at fallback bw (dynamic bw) */
+	uint32  txtplunfl;  /* Template underflows (mac was too slow to transmit ACK/CTS
+			     * or BCN)
+			     */
+	uint32  txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+			     * driver enqueued frames
+			     */
+	uint32  rxfrmtoolong;   /* Received frame longer than legal limit (2346 bytes) */
+	uint32  rxfrmtooshrt;   /* Received frame did not contain enough bytes for its frame type */
+	uint32  rxinvmachdr;    /* Either the protocol version != 0 or frame type not
+				 * data/control/management
+			   */
+	uint32  rxbadfcs;   /* number of frames for which the CRC check failed in the MAC */
+	uint32  rxbadplcp;  /* parity check of the PLCP header failed */
+	uint32  rxcrsglitch;    /* PHY was able to correlate the preamble but not the header */
+	uint32  rxstrt;     /* Number of received frames with a good PLCP
+			     * (i.e. passing parity check)
+			     */
+	uint32  rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32  rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32  rxcfrmucast;    /* number of received CNTRL frames with good FCS and matching RA */
+	uint32  rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32  rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32  rxackucast; /* number of ucast ACKS received (good FCS) */
+	uint32  rxdfrmocast;    /* number of received DATA frames (good FCS and not matching RA) */
+	uint32  rxmfrmocast;    /* number of received MGMT frames (good FCS and not matching RA) */
+	uint32  rxcfrmocast;    /* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32  rxrtsocast; /* number of received RTS not addressed to the MAC */
+	uint32  rxctsocast; /* number of received CTS not addressed to the MAC */
+	uint32  rxdfrmmcast;    /* number of RX Data multicast frames received by the MAC */
+	uint32  rxmfrmmcast;    /* number of RX Management multicast frames received by the MAC */
+	uint32  rxcfrmmcast;    /* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32  rxbeaconmbss;   /* beacons received from member of BSS */
+	uint32  rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32  rxbeaconobss;   /* beacons received from other BSS */
+	uint32  rxrsptmout; /* Number of response timeouts for transmitted frames
+			     * expecting a response
+			     */
+	uint32  bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32  rxf0ovfl;   /* Number of receive fifo 0 overflows */
+	uint32  rxf1ovfl;   /* Number of receive fifo 1 overflows (obsolete) */
+	uint32  rxf2ovfl;   /* Number of receive fifo 2 overflows (obsolete) */
+	uint32  txsfovfl;   /* Number of transmit status fifo overflows (obsolete) */
+	uint32  pmqovfl;    /* Number of PMQ overflows */
+	uint32  rxcgprqfrm; /* Number of received Probe requests that made it into
+			     * the PRQ fifo
+			     */
+	uint32  rxcgprsqovfl;   /* Rx Probe Request Que overflow in the AP */
+	uint32  txcgprsfail;    /* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32  txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+	uint32  prs_timeout;    /* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32  rxnack;
+	uint32  frmscons;
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32  txfrag;     /* dot11TransmittedFragmentCount */
+	uint32  txmulti;    /* dot11MulticastTransmittedFrameCount */
+	uint32  txfail;     /* dot11FailedCount */
+	uint32  txretry;    /* dot11RetryCount */
+	uint32  txretrie;   /* dot11MultipleRetryCount */
+	uint32  rxdup;      /* dot11FrameduplicateCount */
+	uint32  txrts;      /* dot11RTSSuccessCount */
+	uint32  txnocts;    /* dot11RTSFailureCount */
+	uint32  txnoack;    /* dot11ACKFailureCount */
+	uint32  rxfrag;     /* dot11ReceivedFragmentCount */
+	uint32  rxmulti;    /* dot11MulticastReceivedFrameCount */
+	uint32  rxcrc;      /* dot11FCSErrorCount */
+	uint32  txfrmsnt;   /* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32  rxundec;    /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill;   /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr;    /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay; /* TKIPReplays */
+	uint32  ccmpfmterr; /* CCMPFormatErrors */
+	uint32  ccmpreplay; /* CCMPReplays */
+	uint32  ccmpundec;  /* CCMPDecryptErrors */
+	uint32  fourwayfail;    /* FourWayHandshakeFailures */
+	uint32  wepundec;   /* dot11WEPUndecryptableCount */
+	uint32  wepicverr;  /* dot11WEPICVErrorCount */
+	uint32  decsuccess; /* DecryptSuccessCount */
+	uint32  tkipicverr; /* TKIPICVErrorCount */
+	uint32  wepexcluded;    /* dot11WEPExcludedCount */
+
+	uint32  rxundec_mcst;   /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill_mcst;  /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr_mcst;   /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay_mcst;    /* TKIPReplays */
+	uint32  ccmpfmterr_mcst;    /* CCMPFormatErrors */
+	uint32  ccmpreplay_mcst;    /* CCMPReplays */
+	uint32  ccmpundec_mcst; /* CCMPDecryptErrors */
+	uint32  fourwayfail_mcst;   /* FourWayHandshakeFailures */
+	uint32  wepundec_mcst;  /* dot11WEPUndecryptableCount */
+	uint32  wepicverr_mcst; /* dot11WEPICVErrorCount */
+	uint32  decsuccess_mcst;    /* DecryptSuccessCount */
+	uint32  tkipicverr_mcst;    /* TKIPICVErrorCount */
+	uint32  wepexcluded_mcst;   /* dot11WEPExcludedCount */
+
+	uint32  txchanrej;  /* Tx frames suppressed due to channel rejection */
+	uint32  txexptime;  /* Tx frames suppressed due to timer expiration */
+	uint32  psmwds;     /* Count PSM watchdogs */
+	uint32  phywatchdog;    /* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32  prq_entries_handled;    /* PRQ entries read in */
+	uint32  prq_undirected_entries; /*    which were bcast bss & ssid */
+	uint32  prq_bad_entries;    /*    which could not be translated to info */
+	uint32  atim_suppress_count;    /* TX suppressions on ATIM fifo */
+	uint32  bcn_template_not_ready; /* Template marked in use on send bcn ... */
+	uint32  bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32  late_tbtt_dpc;  /* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;    /* packets rx at 1Mbps */
+	uint32  rx2mbps;    /* packets rx at 2Mbps */
+	uint32  rx5mbps5;   /* packets rx at 5.5Mbps */
+	uint32  rx6mbps;    /* packets rx at 6Mbps */
+	uint32  rx9mbps;    /* packets rx at 9Mbps */
+	uint32  rx11mbps;   /* packets rx at 11Mbps */
+	uint32  rx12mbps;   /* packets rx at 12Mbps */
+	uint32  rx18mbps;   /* packets rx at 18Mbps */
+	uint32  rx24mbps;   /* packets rx at 24Mbps */
+	uint32  rx36mbps;   /* packets rx at 36Mbps */
+	uint32  rx48mbps;   /* packets rx at 48Mbps */
+	uint32  rx54mbps;   /* packets rx at 54Mbps */
+	uint32  rx108mbps;  /* packets rx at 108mbps */
+	uint32  rx162mbps;  /* packets rx at 162mbps */
+	uint32  rx216mbps;  /* packets rx at 216 mbps */
+	uint32  rx270mbps;  /* packets rx at 270 mbps */
+	uint32  rx324mbps;  /* packets rx at 324 mbps */
+	uint32  rx378mbps;  /* packets rx at 378 mbps */
+	uint32  rx432mbps;  /* packets rx at 432 mbps */
+	uint32  rx486mbps;  /* packets rx at 486 mbps */
+	uint32  rx540mbps;  /* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32  pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32  rfdisable;  /* count of radio disables */
+	uint32  bphy_rxcrsglitch;   /* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32  txmpdu_sgi; /* count for sgi transmit */
+	uint32  rxmpdu_sgi; /* count for sgi received */
+	uint32  txmpdu_stbc;    /* count for stbc transmit */
+	uint32  rxmpdu_stbc;    /* count for stbc received */
+
+	uint32	rxdrop20s;	/* drop secondary cnt */
+
+} wl_cnt_ver_six_t;
+
+#define	WL_DELTA_STATS_T_VERSION	2	/* current version of wl_delta_stats_t struct */
+
+typedef struct {
+	uint16 version;     /* see definition of WL_DELTA_STATS_T_VERSION */
+	uint16 length;      /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txframe;     /* tx data frames */
+	uint32 txbyte;      /* tx data bytes */
+	uint32 txretrans;   /* tx mac retransmits */
+	uint32 txfail;      /* tx failures */
+
+	/* receive stat counters */
+	uint32 rxframe;     /* rx data frames */
+	uint32 rxbyte;      /* rx data bytes */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* phy stats */
+	uint32 rxbadplcp;
+	uint32 rxcrsglitch;
+	uint32 bphy_rxcrsglitch;
+	uint32 bphy_badplcp;
+
+} wl_delta_stats_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16	version;	/* see definition of WL_WME_CNT_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	wl_traffic_stats_t tx[AC_COUNT];	/* Packets transmitted */
+	wl_traffic_stats_t tx_failed[AC_COUNT];	/* Packets dropped or failed to transmit */
+	wl_traffic_stats_t rx[AC_COUNT];	/* Packets received */
+	wl_traffic_stats_t rx_failed[AC_COUNT];	/* Packets failed to receive */
+
+	wl_traffic_stats_t forward[AC_COUNT];	/* Packets forwarded by AP */
+
+	wl_traffic_stats_t tx_expired[AC_COUNT];	/* packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+struct wl_msglevel2 {
+	uint32 low;
+	uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+	uint16	version; /* Version for mkeep_alive */
+	uint16	length; /* length of fixed parameters in the structure */
+	uint32	period_msec;
+	uint16	len_bytes;
+	uint8	keep_alive_id; /* 0 - 3 for N = 4 */
+	uint8	data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION		1
+#define WL_MKEEP_ALIVE_FIXED_LEN	OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION	500
+
+/* TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+	struct ether_addr saddr;		/* src mac address */
+	struct ether_addr daddr;		/* dst mac address */
+	struct ipv4_addr sipaddr;		/* source IP addr */
+	struct ipv4_addr dipaddr;		/* dest IP addr */
+	uint16 sport;				/* src port */
+	uint16 dport;				/* dest port */
+	uint32 seq;				/* seq number */
+	uint32 ack;				/* ACK number */
+	uint16 tcpwin;				/* TCP window */
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/* TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+	uint16 interval;		/* interval timer */
+	uint16 retry_interval;		/* retry_interval timer */
+	uint16 retry_count;		/* retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+	uint32 wake_reason;
+	uint32 wake_info_len;		/* size of packet */
+	uchar  packet[1];
+} wake_info_t;
+
+typedef struct wake_pkt {
+	uint32 wake_pkt_len;		/* size of packet */
+	uchar  packet[1];
+} wake_pkt_t;
+
+
+#define WL_MTCPKEEP_ALIVE_VERSION		1
+
+#ifdef WLBA
+
+#define WLC_BA_CNT_VERSION  1   /* current version of wlc_ba_cnt_t */
+
+/* block ack related stats */
+typedef struct wlc_ba_cnt {
+	uint16  version;    /* WLC_BA_CNT_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txpdu;       /* pdus sent */
+	uint32 txsdu;       /* sdus sent */
+	uint32 txfc;        /* tx side flow controlled packets */
+	uint32 txfci;       /* tx side flow control initiated */
+	uint32 txretrans;   /* retransmitted pdus */
+	uint32 txbatimer;   /* ba resend due to timer */
+	uint32 txdrop;      /* dropped packets */
+	uint32 txaddbareq;  /* addba req sent */
+	uint32 txaddbaresp; /* addba resp sent */
+	uint32 txdelba;     /* delba sent */
+	uint32 txba;        /* ba sent */
+	uint32 txbar;       /* bar sent */
+	uint32 txpad[4];    /* future */
+
+	/* receive side counters */
+	uint32 rxpdu;       /* pdus recd */
+	uint32 rxqed;       /* pdus buffered before sending up */
+	uint32 rxdup;       /* duplicate pdus */
+	uint32 rxnobuf;     /* pdus discarded due to no buf */
+	uint32 rxaddbareq;  /* addba req recd */
+	uint32 rxaddbaresp; /* addba resp recd */
+	uint32 rxdelba;     /* delba recd */
+	uint32 rxba;        /* ba recd */
+	uint32 rxbar;       /* bar recd */
+	uint32 rxinvba;     /* invalid ba recd */
+	uint32 rxbaholes;   /* ba recd with holes */
+	uint32 rxunexp;     /* unexpected packets */
+	uint32 rxpad[4];    /* future */
+} wlc_ba_cnt_t;
+#endif /* WLBA */
+
+/* structure for per-tid ampdu control */
+struct ampdu_tid_control {
+	uint8 tid;			/* tid */
+	uint8 enable;			/* enable/disable */
+};
+
+/* struct for ampdu tx/rx aggregation control */
+struct ampdu_aggr {
+	int8 aggr_override;	/* aggr overrided by dongle. Not to be set by host. */
+	uint16 conf_TID_bmap;	/* bitmap of TIDs to configure */
+	uint16 enab_TID_bmap;	/* enable/disable per TID */
+};
+
+/* structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+	struct ether_addr ea;		/* Station address */
+	uint8 tid;			/* tid */
+	uint8 initiator;	/* 0 is recipient, 1 is originator */
+};
+/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+	uint8 tid;	/* tid */
+	uint8 retry;	/* retry value */
+};
+
+#define BDD_FNAME_LEN       32  /* Max length of friendly name */
+typedef struct bdd_fname {
+	uint8 len;          /* length of friendly name */
+	uchar name[BDD_FNAME_LEN];  /* friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/* For ioctls that take a list of TSPEC */
+struct tslist {
+	int count;			/* number of tspecs */
+	struct tsinfo_arg tsinfo[1];	/* variable length array of tsinfo */
+};
+
+#ifdef WLTDLS
+/* structure for tdls iovars */
+typedef struct tdls_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;			/* mode: depends on iovar */
+	chanspec_t chanspec;
+	uint32 pad;			/* future */
+} tdls_iovar_t;
+
+#define TDLS_WFD_IE_SIZE		512
+/* structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;
+	uint16 length;
+	uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+#endif /* WLTDLS */
+
+/* structure for addts/delts arguments */
+typedef struct tspec_arg {
+	uint16 version;			/* see definition of TSPEC_ARG_VERSION */
+	uint16 length;			/* length of entire structure */
+	uint flag;			/* bit field */
+	/* TSPEC Arguments */
+	struct tsinfo_arg tsinfo;	/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint min_srv_interval;		/* Minimum Service Interval (us) */
+	uint max_srv_interval;		/* Maximum Service Interval (us) */
+	uint inactivity_interval;	/* Inactivity Interval (us) */
+	uint suspension_interval;	/* Suspension Interval (us) */
+	uint srv_start_time;		/* Service Start Time (us) */
+	uint min_data_rate;		/* Minimum Data Rate (bps) */
+	uint mean_data_rate;		/* Mean Data Rate (bps) */
+	uint peak_data_rate;		/* Peak Data Rate (bps) */
+	uint max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint delay_bound;		/* Delay Bound (us) */
+	uint min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;		/* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+	uint16 medium_time;		/* Medium Time (32 us/s periods) */
+	uint8 dialog_token;		/* dialog token */
+} tspec_arg_t;
+
+/* tspec arg for desired station */
+typedef	struct tspec_per_sta_arg {
+	struct ether_addr ea;
+	struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/* structure for max bandwidth for each access category */
+typedef	struct wme_max_bandwidth {
+	uint32	ac[AC_COUNT];	/* max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define	TSPEC_ARG_VERSION		2	/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH		55	/* argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN	42	/* default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR	0x3000	/* default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE  80
+#define WLC_WOWL_MAX_KEEPALIVE	2
+
+/* Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+	uint32 ac;	        /* access class */
+	uint32 lifetime;    /* Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/* Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+	uint8 mode;		/* value 0 or 1 */
+	uint8 count;		/* count # of beacons before switching */
+	chanspec_t chspec;	/* chanspec */
+	uint8 reg;		/* regulatory class */
+	uint8 frame_type;		/* csa frame type, unicast or broadcast */
+} wl_chan_switch_t;
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+enum {
+	OFF_ADAPT,
+	SMART_ADAPT,
+	STRICT_ADAPT,
+	SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+
+#define SORT_CRITERIA_MASK	0x0001
+#define AUTO_NET_SWITCH_MASK	0x0002
+#define ENABLE_BKGRD_SCAN_MASK	0x0004
+#define IMMEDIATE_SCAN_MASK	0x0008
+#define AUTO_CONNECT_MASK	0x0010
+
+#define ENABLE_BD_SCAN_MASK	0x0020
+#define ENABLE_ADAPTSCAN_MASK	0x00c0
+#define IMMEDIATE_EVENT_MASK	0x0100
+#define SUPPRESS_SSID_MASK	0x0200
+#define ENABLE_NET_OFFLOAD_MASK	0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK	0x0800
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP			2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+/* PFN network info structure */
+typedef struct wl_pfn_subnet_info {
+	struct ether_addr BSSID;
+	uint8	channel; /* channel number only */
+	uint8	SSID_len;
+	uint8	SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+	wl_pfn_subnet_info_t pfnsubnet;
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint16	timestamp; /* age in seconds */
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_lnet_info {
+	wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */
+	uint16	flags; /* partial scan, etc */
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint32	timestamp; /* age in miliseconds */
+	uint16	rtt0; /* estimated distance to this AP in centimeters */
+	uint16	rtt1; /* standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_t;
+
+typedef struct wl_pfn_lscanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_lnet_info_t netinfo[1];
+} wl_pfn_lscanresults_t;
+
+/* this is used to report on 1-* pfn scan results */
+typedef struct wl_pfn_scanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+/* used to report exactly one scan result */
+/* plus reports detailed scan info in bss_info */
+typedef struct wl_pfn_scanresult {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo;
+	wl_bss_info_t bss_info;
+} wl_pfn_scanresult_t;
+
+/* PFN data structure */
+typedef struct wl_pfn_param {
+	int32 version;			/* PNO parameters version */
+	int32 scan_freq;		/* Scan frequency */
+	int32 lost_network_timeout;	/* Timeout in sec. to declare
+								* discovered network as lost
+								*/
+	int16 flags;			/* Bit field to control features
+							* of PFN such as sort criteria auto
+							* enable switch and background scan
+							*/
+	int16 rssi_margin;		/* Margin to avoid jitter for choosing a
+							* PFN based on RSSI sort criteria
+							*/
+	uint8 bestn; /* number of best networks in each scan */
+	uint8 mscan; /* number of scans recorded */
+	uint8 repeat; /* Minimum number of scan intervals
+				     *before scan frequency changes in adaptive scan
+				     */
+	uint8 exp; /* Exponent of 2 for maximum scan interval */
+	int32 slow_freq; /* slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+	struct ether_addr  macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16             flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+typedef struct wl_pfn_cfg {
+	uint32	reporttype;
+	int32	channel_num;
+	uint16	channel_list[WL_NUMCHANNELS];
+	uint32	flags;
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED	0xfffffffe	/* Remaining reserved for future use */
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			/* ssid name and its length */
+	int32			flags;			/* bit2: hidden */
+	int32			infra;			/* BSS Vs IBSS */
+	int32			auth;			/* Open Vs Closed */
+	int32			wpa_auth;		/* WPA type */
+	int32			wsec;			/* wsec value */
+} wl_pfn_t;
+
+typedef struct wl_pfn_list {
+	uint32		version;
+	uint32		enabled;
+	uint32		count;
+	wl_pfn_t	pfn[1];
+} wl_pfn_list_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t {
+	wlc_ssid_t ssid;
+	uint32	cipher_type;
+	uint32	auth_type;
+	uint8	channels[4];
+} BWL_POST_PACKED_STRUCT pfn_olmsg_params;
+
+#define WL_PFN_HIDDEN_BIT		2
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			3
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			90
+#endif
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Service discovery */
+typedef struct {
+	uint8	transaction_id;	/* Transaction id */
+	uint8	protocol;	/* Service protocol type */
+	uint16	query_len;	/* Length of query */
+	uint16	response_len;	/* Length of response */
+	uint8	qrbuf[1];
+} wl_p2po_qr_t;
+
+typedef struct {
+	uint16			period;			/* extended listen period */
+	uint16			interval;		/* extended listen interval */
+} wl_p2po_listen_t;
+
+/* GAS state machine tunable parameters.  Structure field values of 0 means use the default. */
+typedef struct wl_gas_config {
+	uint16 max_retransmit;		/* Max # of firmware/driver retransmits on no Ack
+					 * from peer (on top of the ucode retries).
+					 */
+	uint16 response_timeout;	/* Max time to wait for a GAS-level response
+					 * after sending a packet.
+					 */
+	uint16 max_comeback_delay;	/* Max GAS response comeback delay.
+					 * Exceeding this fails the GAS exchange.
+					 */
+	uint16 max_retries;		/* Max # of GAS state machine retries on failure
+					 * of a GAS frame exchange.
+					 */
+} wl_gas_config_t;
+
+/* P2P Find Offload parameters */
+typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config {
+	uint16 version;			/* Version of this struct */
+	uint16 length;			/* sizeof(wl_p2po_find_config_t) */
+	int32 search_home_time;		/* P2P search state home time when concurrent
+					 * connection exists.  -1 for default.
+					 */
+	uint8 num_social_channels;
+			/* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX.
+			 * 0 means use default social channels.
+			 */
+	uint8 flags;
+	uint16 social_channels[1];	/* Variable length array of social channels */
+} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t;
+#define WL_P2PO_FIND_CONFIG_VERSION 2	/* value for version field */
+
+/* wl_p2po_find_config_t flags */
+#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01	/* Whether to scan for all APs in the p2po_find
+						 * periodic scans of all channels.
+						 * 0 means scan for only P2P devices.
+						 * 1 means scan for P2P devices plus non-P2P APs.
+						 */
+
+
+/* For adding a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 seek_hdl;		/* unique id chosen by host */
+	uint8 addr[6];			/* Seek service from a specific device with this
+					 * MAC address, all 1's for any device.
+					 */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN];
+					/* Service name to seek, not null terminated */
+	uint8 service_info_req_len;
+	uint8 service_info_req[1];	/* Service info request, not null terminated.
+					 * Variable length specified by service_info_req_len.
+					 * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN.
+					 */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t;
+
+/* For deleting a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 seek_hdl;		/* delete service specified by id */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t;
+
+
+/* For adding a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 advertise_hdl;		/* unique id chosen by host */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint32 advertisement_id;
+	uint16 service_config_method;
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+					/* Service name , not null terminated */
+	uint8 service_status;
+	uint16 service_info_len;
+	uint8 service_info[1];		/* Service info, not null terminated.
+					 * Variable length specified by service_info_len.
+					 * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN.
+					 */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t;
+
+/* For deleting a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 advertise_hdl;	/* delete service specified by hdl */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t;
+
+/* P2P Offload discovery mode for the p2po_state iovar */
+typedef enum {
+	WL_P2PO_DISC_STOP,
+	WL_P2PO_DISC_LISTEN,
+	WL_P2PO_DISC_DISCOVERY
+} disc_mode_t;
+
+/* ANQP offload */
+
+#define ANQPO_MAX_QUERY_SIZE		256
+typedef struct {
+	uint16 max_retransmit;		/* ~0 use default, max retransmit on no ACK from peer */
+	uint16 response_timeout;	/* ~0 use default, msec to wait for resp after tx packet */
+	uint16 max_comeback_delay;	/* ~0 use default, max comeback delay in resp else fail */
+	uint16 max_retries;			/* ~0 use default, max retries on failure */
+	uint16 query_len;			/* length of ANQP query */
+	uint8 query_data[1];		/* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
+} wl_anqpo_set_t;
+
+typedef struct {
+	uint16 channel;				/* channel of the peer */
+	struct ether_addr addr;		/* addr of the peer */
+} wl_anqpo_peer_t;
+
+#define ANQPO_MAX_PEER_LIST			64
+typedef struct {
+	uint16 count;				/* number of peers in list */
+	wl_anqpo_peer_t peer[1];	/* max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_t;
+
+#define ANQPO_MAX_IGNORE_SSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of SSID in list */
+	wlc_ssid_t ssid[1];			/* max ANQPO_MAX_IGNORE_SSID */
+} wl_anqpo_ignore_ssid_list_t;
+
+#define ANQPO_MAX_IGNORE_BSSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of addr in list */
+	struct ether_addr bssid[1];	/* max ANQPO_MAX_IGNORE_BSSID */
+} wl_anqpo_ignore_bssid_list_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+struct toe_ol_stats_t {
+	/* Num of tx packets that don't need to be checksummed */
+	uint32 tx_summed;
+
+	/* Num of tx packets where checksum is filled by offload engine */
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	/*  Num of rx packets where toe finds out if checksum is good or bad */
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	/* Num of tx packets in which csum error is injected */
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	/* Num of rx packets in which csum error is injected */
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+/* Arp offload statistic counts */
+struct arp_ol_stats_t {
+	uint32  host_ip_entries;	/* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;	/* Host IP table additions skipped due to overflow */
+
+	uint32  arp_table_entries;	/* ARP table entries */
+	uint32  arp_table_overflow;	/* ARP table additions skipped due to overflow */
+
+	uint32  host_request;		/* ARP requests from host */
+	uint32  host_reply;		/* ARP replies from host */
+	uint32  host_service;		/* ARP requests from host serviced by ARP Agent */
+
+	uint32  peer_request;		/* ARP requests received from network */
+	uint32  peer_request_drop;	/* ARP requests from network that were dropped */
+	uint32  peer_reply;		/* ARP replies received from network */
+	uint32  peer_reply_drop;	/* ARP replies from network that were dropped */
+	uint32  peer_service;		/* ARP request from host serviced by ARP Agent */
+};
+
+/* NS offload statistic counts */
+struct nd_ol_stats_t {
+	uint32  host_ip_entries;    /* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;   /* Host IP table additions skipped due to overflow */
+	uint32  peer_request;       /* NS requests received from network */
+	uint32  peer_request_drop;  /* NS requests from network that were dropped */
+	uint32  peer_reply_drop;    /* NA replies from network that were dropped */
+	uint32  peer_service;       /* NS request from host serviced by firmware */
+};
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/* NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+	uint32	period_msec;	/* Retransmission period (0 to disable packet re-transmits) */
+	uint16	len_bytes;	/* Size of packet to transmit (0 to disable packet re-transmits) */
+	uint8	data[1];	/* Variable length packet to transmit.  Contents should include
+				 * entire ethernet packet (enet header, IP header, UDP header,
+				 * and UDP payload) in network byte order.
+				 */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN		OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES	    (DOT11_A3_HDR_LEN +			    \
+				     DOT11_QOS_LEN +			    \
+				     sizeof(struct dot11_llc_snap_header) + \
+				     ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+	uint32	status;		/* Is the wake reason a packet (if all the other field's valid) */
+	uint32	pattern_id;	/* Pattern ID that matched */
+	uint32	original_packet_size;
+	uint32	saved_packet_size;
+	uchar	packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH=0,	/* Pattern matching filter */
+	WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */
+	WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */
+	WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+	{ "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH },       \
+	{ "MAGIC",   WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \
+	{ "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }
+
+/* Secured WOWL packet was encrypted, need decrypted before check filter match */
+typedef struct wl_pkt_decrypter {
+		uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+		void*  dec_ctx;
+} wl_pkt_decrypter_t;
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+	union {
+		uint32	offset;		/* Offset within received packet to start pattern matching.
+				 * Offset '0' is the first byte of the ethernet header.
+				 */
+		wl_pkt_decrypter_t*	decrypt_ctx;	/* Decrypt context */
+	};
+	uint32	size_bytes;	/* Size of the pattern.  Bitmask must be the same size. */
+	uint8   mask_and_pattern[1]; /* Variable length mask and pattern data.  mask starts
+				      * at offset 0.  Pattern immediately follows mask.
+				      */
+} wl_pkt_filter_pattern_t;
+
+/* A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+	uint16 rel_offs;	/* Offset to begin match (relative to 'base' below) */
+	uint16 base_offs;	/* Base for offset (defined below) */
+	uint16 size_bytes;	/* Size of mask/pattern */
+	uint16 match_flags;	/* Addition flags controlling the match */
+	uint8  mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+	uint8 list_cnt;		/* Number of elements in the list */
+	uint8 PAD1[1];		/* Reserved (possible version: reserved) */
+	uint16 totsize;		/* Total size of this pattern list (includes this struct) */
+	wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */
+} wl_pkt_filter_pattern_list_t;
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+	uint32	id;		/* Unique filter id, specified by app. */
+	uint32	type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	uint32	negate_match;	/* Negate the result of filter matches */
+	union {			/* Filter definitions */
+		wl_pkt_filter_pattern_t pattern;	/* Pattern matching filter */
+		wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */
+	} u;
+} wl_pkt_filter_t;
+
+/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+	uint32	val1;
+	uint32	val2;
+} wl_tcp_keep_set_t;
+
+#define WL_PKT_FILTER_FIXED_LEN		  OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN	  OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN	\
+			OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
+
+/* IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+	uint32	id;		/* Unique filter id */
+	uint32	enable;		/* Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+	uint32	num;		/* Number of installed packet filters */
+	wl_pkt_filter_t	filter[1];	/* Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN	  OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+	uint32	num_pkts_matched;	/* # filter matches for specified filter id */
+	uint32	num_pkts_forwarded;	/* # packets fwded from dongle to host for all filters */
+	uint32	num_pkts_discarded;	/* # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/* IOVAR "pkt_filter_ports" parameter.  Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+	uint8 version;		/* Be proper */
+	uint8 reserved;		/* Be really proper */
+	uint16 count;		/* Number of ports following */
+	/* End of fixed data */
+	uint16 ports[1];	/* Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN	OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION	0
+#define WL_PKT_FILTER_PORTS_MAX		128
+
+#define RSN_KCK_LENGTH 16
+#define RSN_KEK_LENGTH 16
+#define RSN_REPLAY_LEN 8
+typedef struct _gtkrefresh {
+	uchar	KCK[RSN_KCK_LENGTH];
+	uchar	KEK[RSN_KEK_LENGTH];
+	uchar	ReplayCounter[RSN_REPLAY_LEN];
+} gtk_keyinfo_t, *pgtk_keyinfo_t;
+
+/* Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;		/* common ioctl definition */
+	uint32 len;		/* length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES	4
+
+/* These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC)		|| \
+	 ((cmd) == WLC_GET_VERSION)		|| \
+	 ((cmd) == WLC_GET_AP)			|| \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;			/* Inter-packet delay */
+	uint32 nframes;			/* Number of frames */
+	uint32 length;			/* Packet length */
+	uint8  seqno;			/* Enable/disable sequence no. */
+	struct ether_addr dest;		/* Destination address */
+	struct ether_addr src;		/* Source address */
+} wl_pkteng_t;
+
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;		/* RX PER test: no of frames lost (skip seqno) */
+	int32 rssi;			/* RSSI */
+	int32 snr;			/* signal to noise ratio */
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+	uint8 rssi_qdb;			/* qdB portion of the computed rssi */
+} wl_pkteng_stats_t;
+
+typedef struct wl_txcal_params {
+	wl_pkteng_t pkteng;
+	uint8 gidx_start;
+	int8 gidx_step;
+	uint8 gidx_stop;
+} wl_txcal_params_t;
+
+
+typedef enum {
+	wowl_pattern_type_bitmap = 0,
+	wowl_pattern_type_arp,
+	wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+	uint32		    masksize;		/* Size of the mask in #of bytes */
+	uint32		    offset;		/* Pattern byte offset in packet */
+	uint32		    patternoffset;	/* Offset of start of pattern in the structure */
+	uint32		    patternsize;	/* Size of the pattern itself in #of bytes */
+	uint32		    id;			/* id */
+	uint32		    reasonsize;		/* Size of the wakeup reason code */
+	wowl_pattern_type_t type;		/* Type of pattern */
+	/* Mask follows the structure above */
+	/* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct wl_wowl_pattern_list {
+	uint			count;
+	wl_wowl_pattern_t	pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct wl_wowl_wakeind {
+	uint8	pci_wakeind;	/* Whether PCI PMECSR PMEStatus bit was set */
+	uint32	ucode_wakeind;	/* What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+typedef struct {
+	uint32		pktlen;		    /* size of packet */
+	void		*sdu;
+} tcp_keepalive_wake_pkt_infop_t;
+
+/* per AC rate control related data structure */
+typedef struct wl_txrate_class {
+	uint8		init_rate;
+	uint8		min_rate;
+	uint8		max_rate;
+} wl_txrate_class_t;
+
+/* structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+	int16	passive_dwell;
+	int16	active_dwell;
+	int16	bss_widthscan_interval;
+	int16	passive_total;
+	int16	active_total;
+	int16	chanwidth_transition_delay;
+	int16	activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN	sizeof(wl_obss_scan_arg_t)
+
+/* RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+	uint32 rate_limit_msec;		/* # of events posted to application will be limited to
+					 * one per specified period (0 to disable rate limit).
+					 */
+	uint8 num_rssi_levels;		/* Number of entries in rssi_levels[] below */
+	int8 rssi_levels[MAX_RSSI_LEVELS];	/* Variable number of RSSI levels. An event
+						 * will be posted each time the RSSI of received
+						 * beacons/packets crosses a level.
+						 */
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/* IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN  4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_addrs;
+	char   addr_type[WL_IOV_MAC_PARAM_LEN];
+	struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t;
+
+/* This is extra info that follows wl_iov_mac_params_t */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t;
+
+/* Combined structure */
+typedef struct {
+	wl_iov_mac_params_t params;
+	wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/* Parameter block for PKTQ_LOG statistics */
+#define PKTQ_LOG_COUNTERS_V4 \
+	/* packets requested to be stored */ \
+	uint32 requested; \
+	/* packets stored */ \
+	uint32 stored; \
+	/* packets saved, because a lowest priority queue has given away one packet */ \
+	uint32 saved; \
+	/* packets saved, because an older packet from the same queue has been dropped */ \
+	uint32 selfsaved; \
+	/* packets dropped, because pktq is full with higher precedence packets */ \
+	uint32 full_dropped; \
+	 /* packets dropped because pktq per that precedence is full */ \
+	uint32 dropped; \
+	/* packets dropped, in order to save one from a queue of a highest priority */ \
+	uint32 sacrificed; \
+	/* packets droped because of hardware/transmission error */ \
+	uint32 busy; \
+	/* packets re-sent because they were not received */ \
+	uint32 retry; \
+	/* packets retried again (ps pretend) prior to moving power save mode */ \
+	uint32 ps_retry; \
+	 /* suppressed packet count */ \
+	uint32 suppress; \
+	/* packets finally dropped after retry limit */ \
+	uint32 retry_drop; \
+	/* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+	uint32 max_avail; \
+	/* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+	uint32 max_used; \
+	 /* the maximum capacity of the queue */ \
+	uint32 queue_capacity; \
+	/* count of rts attempts that failed to receive cts */ \
+	uint32 rtsfail; \
+	/* count of packets sent (acked) successfully */ \
+	uint32 acked; \
+	/* running total of phy rate of packets sent successfully */ \
+	uint32 txrate_succ; \
+	/* running total of phy 'main' rate */ \
+	uint32 txrate_main; \
+	/* actual data transferred successfully */ \
+	uint32 throughput; \
+	/* time difference since last pktq_stats */ \
+	uint32 time_delta;
+
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
+
+/* v5 is the same as V4 with extra parameter */
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+	/* cumulative time to transmit */
+	uint32 airtime;
+} pktq_log_counters_v05_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v04_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v04_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v05_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v05_t;
+
+
+typedef struct {
+	uint32               version;
+	wl_iov_mac_params_t  params;
+	union {
+		pktq_log_format_v04_t v04;
+		pktq_log_format_v05_t v05;
+	} pktq_log;
+} wl_iov_pktq_log_t;
+
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO     (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION	1
+
+/* The actual counters maintained for each station */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	/* The following counters are a subset of what pktq_stats provides per precedence. */
+	uint32 retry;          /* packets re-sent because they were not received */
+	uint32 retry_drop;     /* packets finally dropped after retry limit */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running total of phy 'main' rate */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 time_delta;     /* time difference since last pktq_stats */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t;
+
+/* The structure for individual station information. */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_addr	station_address;	/* The station MAC address */
+	uint16			station_flags;		/* Bit mask of flags, for future use. */
+	iov_bs_data_counters_t	station_counters;	/* The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	structure_version;	/* Structure version number (for wl/wlu matching) */
+	uint16	structure_count;	/* Number of iov_bs_data_record_t records following */
+	iov_bs_data_record_t	structure_record[1];	/* 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
+
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+	SCB_BS_DATA_FLAG_NO_RESET = (1<<0)	/* Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
+
+typedef struct wlc_extlog_cfg {
+	int max_number;
+	uint16 module;	/* bitmap */
+	uint8 level;
+	uint8 flag;
+	uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+	uint32 time;
+	uint16 module;
+	uint16 id;
+	uint8 level;
+	uint8 sub_unit;
+	uint8 seq_num;
+	int32 arg;
+	char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+	uint32 from_last;
+	uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+	uint16	id;
+	uint16	flag;
+	uint8	arg_type;
+	const char	*fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER		1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+	FMTSTR_DRIVER_UP_ID = 0,
+	FMTSTR_DRIVER_DOWN_ID = 1,
+	FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+	FMTSTR_NO_PROGRESS_ID = 3,
+	FMTSTR_RFDISABLE_ID = 4,
+	FMTSTR_REG_PRINT_ID = 5,
+	FMTSTR_EXPTIME_ID = 6,
+	FMTSTR_JOIN_START_ID = 7,
+	FMTSTR_JOIN_COMPLETE_ID = 8,
+	FMTSTR_NO_NETWORKS_ID = 9,
+	FMTSTR_SECURITY_MISMATCH_ID = 10,
+	FMTSTR_RATE_MISMATCH_ID = 11,
+	FMTSTR_AP_PRUNED_ID = 12,
+	FMTSTR_KEY_INSERTED_ID = 13,
+	FMTSTR_DEAUTH_ID = 14,
+	FMTSTR_DISASSOC_ID = 15,
+	FMTSTR_LINK_UP_ID = 16,
+	FMTSTR_LINK_DOWN_ID = 17,
+	FMTSTR_RADIO_HW_OFF_ID = 18,
+	FMTSTR_RADIO_HW_ON_ID = 19,
+	FMTSTR_EVENT_DESC_ID = 20,
+	FMTSTR_PNP_SET_POWER_ID = 21,
+	FMTSTR_RADIO_SW_OFF_ID = 22,
+	FMTSTR_RADIO_SW_ON_ID = 23,
+	FMTSTR_PWD_MISMATCH_ID = 24,
+	FMTSTR_FATAL_ERROR_ID = 25,
+	FMTSTR_AUTH_FAIL_ID = 26,
+	FMTSTR_ASSOC_FAIL_ID = 27,
+	FMTSTR_IBSS_FAIL_ID = 28,
+	FMTSTR_EXTAP_FAIL_ID = 29,
+	FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+	uint32 flags_idx;	/* lower 8 bits: overlay index; upper 24 bits: flags */
+	uint32 offset;		/* offset into overlay region to write code */
+	uint32 len;			/* overlay code len */
+	/* overlay code follows this struct */
+} wl_ioctl_overlay_t;
+#endif /* DONGLEOVERLAYS */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* 11k Neighbor Report element */
+typedef struct nbr_element {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+	uint8 pad;
+} nbr_element_t;
+
+
+typedef enum event_msgs_ext_command {
+	EVENTMSGS_NONE		=	0,
+	EVENTMSGS_SET_BIT	=	1,
+	EVENTMSGS_RESET_BIT	=	2,
+	EVENTMSGS_SET_MASK	=	3
+} event_msgs_ext_command_t;
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE	OFFSETOF(eventmsgs_ext_t, mask[0])
+
+/* len-	for SET it would be mask size from the application to the firmware */
+/*		for GET it would be actual firmware mask size */
+/* maxgetsize -	is only used for GET. indicate max mask size that the */
+/*				application can read from the firmware */
+typedef struct eventmsgs_ext
+{
+	uint8	ver;
+	uint8	command;
+	uint8	len;
+	uint8	maxgetsize;
+	uint8	mask[1];
+} eventmsgs_ext_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params {
+	/* no of host dma descriptors programmed by the firmware before a commit */
+	uint16		max_dma_descriptors;
+
+	uint16		host_buf_len; /* length of host buffer */
+	dmaaddr_t	host_buf_addr; /* physical address for bus_throughput_buf */
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t;
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats {
+	uint16		time_taken; /* no of secs the test is run */
+	uint16		nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */
+
+	/* no of desciptors fo which dma is sucessfully completed within the test time */
+	uint32		count;
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+typedef struct keepalives_max_idle {
+	uint16  keepalive_count;        /* nmbr of keepalives per bss_max_idle period */
+	uint8   mkeepalive_index;       /* mkeepalive_index for keepalive frame to be used */
+	uint8   PAD;			/* to align next field */
+	uint16  max_interval;           /* seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+/* ##### Power Stats section ##### */
+
+#define WL_PWRSTATS_VERSION	2
+
+/* Input structure for pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query {
+	uint16 length;		/* Number of entries in type array. */
+	uint16 type[1];		/* Types (tags) to retrieve.
+				 * Length 0 (no types) means get all.
+				 */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t;
+
+/* This structure is for version 2; version 1 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats {
+	uint16 version;		      /* Version = 2 is TLV format */
+	uint16 length;		      /* Length of entire structure */
+	uint8 data[1];		      /* TLV data, a series of structures,
+				       * each starting with type and length.
+				       *
+				       * Padded as necessary so each section
+				       * starts on a 4-byte boundary.
+				       *
+				       * Both type and len are uint16, but the
+				       * upper nibble of length is reserved so
+				       * valid len values are 0-4095.
+				       */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_t;
+#define WL_PWR_STATS_HDRLEN	OFFSETOF(wl_pwrstats_t, data)
+
+/* Type values for the data section */
+#define WL_PWRSTATS_TYPE_PHY		0 /* struct wl_pwr_phy_stats */
+#define WL_PWRSTATS_TYPE_SCAN		1 /* struct wl_pwr_scan_stats */
+#define WL_PWRSTATS_TYPE_USB_HSIC	2 /* struct wl_pwr_usb_hsic_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE	3 /* struct wl_pwr_pm_awake_stats */
+#define WL_PWRSTATS_TYPE_CONNECTION	4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
+#define WL_PWRSTATS_TYPE_PCIE		6 /* struct wl_pwr_pcie_stats */
+
+/* Bits for wake reasons */
+#define WLC_PMD_WAKE_SET		0x1
+#define WLC_PMD_PM_AWAKE_BCN		0x2
+#define WLC_PMD_BTA_ACTIVE		0x4
+#define WLC_PMD_SCAN_IN_PROGRESS	0x8
+#define WLC_PMD_RM_IN_PROGRESS		0x10
+#define WLC_PMD_AS_IN_PROGRESS		0x20
+#define WLC_PMD_PM_PEND			0x40
+#define WLC_PMD_PS_POLL			0x80
+#define WLC_PMD_CHK_UNALIGN_TBTT	0x100
+#define WLC_PMD_APSD_STA_UP		0x200
+#define WLC_PMD_TX_PEND_WAR		0x400
+#define WLC_PMD_GPTIMER_STAY_AWAKE	0x800
+#define WLC_PMD_PM2_RADIO_SOFF_PEND	0x2000
+#define WLC_PMD_NON_PRIM_STA_UP		0x4000
+#define WLC_PMD_AP_UP			0x8000
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug {
+	uint32 timestamp;	     /* timestamp in millisecond */
+	uint32 reason;		     /* reason(s) for staying awake */
+} BWL_POST_PACKED_STRUCT wlc_pm_debug_t;
+
+/* Data sent as part of pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data {
+	uint32 curr_time;	/* ms */
+	uint32 hw_macc;		/* HW maccontrol */
+	uint32 sw_macc;		/* SW maccontrol */
+	uint32 pm_dur;		/* Total sleep time in PM, usecs */
+	uint32 mpc_dur;		/* Total sleep time in MPC, usecs */
+
+	/* int32 drifts = remote - local; +ve drift => local-clk slow */
+	int32 last_drift;	/* Most recent TSF drift from beacon */
+	int32 min_drift;	/* Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/* Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/* Avg TSF drift from beacon */
+
+	/* Wake history tracking */
+
+	/* pmstate array (type wlc_pm_debug_t) start offset */
+	uint16 pm_state_offset;
+	/* pmstate number of array entries */
+	uint16 pm_state_len;
+
+	/* array (type uint32) start offset */
+	uint16 pmd_event_wake_dur_offset;
+	/* pmd_event_wake_dur number of array entries */
+	uint16 pmd_event_wake_dur_len;
+
+	uint32 drift_cnt;	/* Count of drift readings over which avg_drift was computed */
+	uint8  pmwake_idx;	/* for stepping through pm_state */
+	uint8  pad[3];
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT pm_awake_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_PM_AWAKE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	pm_awake_data_t awake_data;
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_t;
+
+/* Original bus structure is for HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct bus_metrics {
+	uint32 suspend_ct;	/* suspend count */
+	uint32 resume_ct;	/* resume count */
+	uint32 disconnect_ct;	/* disconnect count */
+	uint32 reconnect_ct;	/* reconnect count */
+	uint32 active_dur;	/* msecs in bus, usecs for user */
+	uint32 suspend_dur;	/* msecs in bus, usecs for user */
+	uint32 disconnect_dur;	/* msecs in bus, usecs for user */
+} BWL_POST_PACKED_STRUCT bus_metrics_t;
+
+/* Bus interface info for USB/HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_USB_HSIC */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	bus_metrics_t hsic;	/* stats from hsic bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics {
+	uint32 d3_suspend_ct;	/* suspend count */
+	uint32 d0_resume_ct;	/* resume count */
+	uint32 perst_assrt_ct;	/* PERST# assert count */
+	uint32 perst_deassrt_ct;	/* PERST# de-assert count */
+	uint32 active_dur;	/* msecs */
+	uint32 d3_suspend_dur;	/* msecs */
+	uint32 perst_dur;	/* msecs */
+	uint32 l0_cnt;		/* L0 entry count */
+	uint32 l0_usecs;	/* L0 duration in usecs */
+	uint32 l1_cnt;		/* L1 entry count */
+	uint32 l1_usecs;	/* L1 duration in usecs */
+	uint32 l1_1_cnt;	/* L1_1ss entry count */
+	uint32 l1_1_usecs;	/* L1_1ss duration in usecs */
+	uint32 l1_2_cnt;	/* L1_2ss entry count */
+	uint32 l1_2_usecs;	/* L1_2ss duration in usecs */
+	uint32 l2_cnt;		/* L2 entry count */
+	uint32 l2_usecs;	/* L2 duration in usecs */
+} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t;
+
+/* Bus interface info for PCIE */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_PCIE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	pcie_bus_metrics_t pcie;	/* stats from pcie bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t;
+
+/* Scan information history per category */
+typedef BWL_PRE_PACKED_STRUCT struct scan_data {
+	uint32 count;		/* Number of scans performed */
+	uint32 dur;		/* Total time (in us) used */
+} BWL_POST_PACKED_STRUCT scan_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Scan history */
+	scan_data_t user_scans;	  /* User-requested scans: (i/e/p)scan */
+	scan_data_t assoc_scans;  /* Scans initiated by association requests */
+	scan_data_t roam_scans;	  /* Scans initiated by the roam engine */
+	scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */
+	scan_data_t other_scans;  /* Scan engine usage not assigned to the above */
+} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Connection (Association + Key exchange) data */
+	uint32 count;	/* Number of connections performed */
+	uint32 dur;		/* Total time (in ms) used */
+} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats {
+	uint16 type;	    /* WL_PWRSTATS_TYPE_PHY */
+	uint16 len;	    /* Up to 4K-1, top 4 bits are reserved */
+	uint32 tx_dur;	    /* TX Active duration in us */
+	uint32 rx_dur;	    /* RX Active duration in us */
+} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t;
+
+
+/* ##### End of Power Stats section ##### */
+
+/* IPV4 Arp offloads for ndis context */
+BWL_PRE_PACKED_STRUCT struct hostip_id {
+	struct ipv4_addr ipa;
+	uint8 id;
+} BWL_POST_PACKED_STRUCT;
+
+#if 0 && (NDISVER >= 0x0600)
+/* Return values */
+#define ND_REPLY_PEER		0x1	/* Reply was sent to service NS request from peer */
+#define ND_REQ_SINK			0x2	/* Input packet should be discarded */
+#define ND_FORCE_FORWARD	0X3	/* For the dongle to forward req to HOST */
+
+
+/* Neighbor Solicitation Response Offload IOVAR param */
+typedef BWL_PRE_PACKED_STRUCT struct nd_param {
+	struct ipv6_addr	host_ip[2];
+	struct ipv6_addr	solicit_ip;
+	struct ipv6_addr	remote_ip;
+	uint8	host_mac[ETHER_ADDR_LEN];
+	uint32	offload_id;
+} BWL_POST_PACKED_STRUCT nd_param_t;
+#endif 
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh {
+	uint32 pfn_alert_thresh; /* time in ms */
+	uint32 roam_alert_thresh; /* time in ms */
+} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t;
+
+
+/* Reasons for wl_pmalert_t */
+#define PM_DUR_EXCEEDED			(1<<0)
+#define MPC_DUR_EXCEEDED		(1<<1)
+#define ROAM_ALERT_THRESH_EXCEEDED	(1<<2)
+#define PFN_ALERT_THRESH_EXCEEDED	(1<<3)
+#define CONST_AWAKE_DUR_ALERT		(1<<4)
+#define CONST_AWAKE_DUR_RECOVERY	(1<<5)
+
+#define MIN_PM_ALERT_LEN 9
+
+/* Data sent in EXCESS_PM_WAKE event */
+#define WL_PM_ALERT_VERSION 3
+
+#define MAX_P2P_BSS_DTIM_PRD 4
+
+/* This structure is for version 3; version 2 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert {
+	uint16 version;		/* Version = 3 is TLV format */
+	uint16 length;		/* Length of entire structure */
+	uint32 reasons;		/* reason(s) for pm_alert */
+	uint8 data[1];		/* TLV data, a series of structures,
+				 * each starting with type and length.
+				 *
+				 * Padded as necessary so each section
+				 * starts on a 4-byte boundary.
+				 *
+				 * Both type and len are uint16, but the
+				 * upper nibble of length is reserved so
+				 * valid len values are 0-4095.
+				*/
+} BWL_POST_PACKED_STRUCT wl_pmalert_t;
+
+/* Type values for the data section */
+#define WL_PMALERT_FIXED	0 /* struct wl_pmalert_fixed_t, fixed fields */
+#define WL_PMALERT_PMSTATE	1 /* struct wl_pmalert_pmstate_t, variable */
+#define WL_PMALERT_EVENT_DUR	2 /* struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG	3 /* struct wl_pmalert_ucode_dbg_t, variable */
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed {
+	uint16 type;	     /* WL_PMALERT_FIXED */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	uint32 prev_stats_time;	/* msecs */
+	uint32 curr_time;	/* ms */
+	uint32 prev_pm_dur;	/* usecs */
+	uint32 pm_dur;		/* Total sleep time in PM, usecs */
+	uint32 prev_mpc_dur;	/* usecs */
+	uint32 mpc_dur;		/* Total sleep time in MPC, usecs */
+	uint32 hw_macc;		/* HW maccontrol */
+	uint32 sw_macc;		/* SW maccontrol */
+
+	/* int32 drifts = remote - local; +ve drift -> local-clk slow */
+	int32 last_drift;	/* Most recent TSF drift from beacon */
+	int32 min_drift;	/* Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/* Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/* Avg TSF drift from beacon */
+	uint32 drift_cnt;	/* Count of drift readings over which avg_drift was computed */
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate {
+	uint16 type;	     /* WL_PMALERT_PMSTATE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	uint8 pmwake_idx;   /* for stepping through pm_state */
+	uint8 pad[3];
+	/* Array of pmstate; len of array is based on tlv len */
+	wlc_pm_debug_t pmstate[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur {
+	uint16 type;	     /* WL_PMALERT_EVENT_DUR */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Array of event_dur, len of array is based on tlv len */
+	uint32 event_dur[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg {
+	uint16 type;	     /* WL_PMALERT_UCODE_DBG */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	uint32 macctrl;
+	uint16 m_p2p_hps;
+	uint32 psm_brc;
+	uint32 ifsstat;
+	uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+	uint32 psmdebug[20];
+	uint32 phydebug[20];
+} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN		4	/* length of the set command string:
+					 * "add", "del" (+ NUL)
+					 */
+
+#define VNDR_IE_INFO_HDR_LEN	(sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	vndr_ie_t vndr_ie_data;		/* vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the vndr_ie_list[] array */
+	vndr_ie_info_t vndr_ie_list[1];	/* variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* vndr_ie IOVar set command : "add", "del" + NUL */
+	vndr_ie_buf_t vndr_ie_buffer;	/* buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	tlv_t ie_data;		/* IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the ie_list[] array */
+	ie_info_t ie_list[1];	/* variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* ie IOVar set command : "add" + NUL */
+	ie_buf_t ie_buffer;	/* buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;		/* bitmask indicating which packet(s) contain this IE */
+	uint8 id;		/* IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char   dest[ETHER_ADDR_LEN]; /* destination MAC */
+	uint8  prio;            /* Packet Priority */
+	uint8  flags;           /* Flags           */
+	uint32 tsf_l;           /* TSF timer low   */
+	uint32 tsf_h;           /* TSF timer high  */
+	uint16 rates;           /* Main Rates      */
+	uint16 txstatus;        /* TX Status       */
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif /* WLMEDIA_TXFAILEVENT */
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint8 local_max;			/* local max according to the AP */
+	uint8 local_constraint;			/* local constraint according to the AP */
+	int8  antgain[2];			/* Ant gain for each band - from SROM */
+	uint8 rf_cores;				/* count of RF Cores being reported */
+	uint8 est_Pout[4];			/* Latest tx power out estimate per RF chain */
+	uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */
+	uint8 est_Pout_cck;			/* Latest CCK tx power out estimate */
+	uint8 tx_power_max[4];		/* Maximum target power among all rates */
+	uint tx_power_max_rate_ind[4];		/* Index of the rate with the max target power */
+	int8 sar;					/* SAR limit for display by wl executable */
+	int8 channel_bandwidth;		/* 20, 40 or 80 MHz bandwidth? */
+	uint8 version;				/* Version of the data format wlu <--> driver */
+	uint8 display_core;			/* Displayed curpower core */
+	int8 target_offsets[4];		/* Target power offsets for current rate per core */
+	uint32 last_tx_ratespec;	/* Ratespec for last transmition */
+	uint   user_target;		/* user limit */
+	uint32 ppr_len;		/* length of each ppr serialization buffer */
+	int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+	uint8  pprdata[1];		/* ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ipv4_addr	ipv4_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_entry;
+	ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY	64
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define TXPWR_TARGET_VERSION  0
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 version;		/* version number */
+	chanspec_t chanspec;	/* txpwr report for this channel */
+	int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */
+	uint8 rf_cores;		/* count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+
+#define BSS_PEER_INFO_PARAM_CUR_VER	0
+/* Input structure for IOV_BSS_PEER_INFO */
+typedef BWL_PRE_PACKED_STRUCT	struct {
+	uint16			version;
+	struct	ether_addr ea;	/* peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+
+#define BSS_PEER_INFO_CUR_VER		0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	struct ether_addr	ea;
+	int32			rssi;
+	uint32			tx_rate;	/* current tx rate */
+	uint32			rx_rate;	/* current rx rate */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			age;		/* age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+
+#define BSS_PEER_LIST_INFO_CUR_VER	0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	uint16			bss_peer_info_len;	/* length of bss_peer_info_t */
+	uint32			count;			/* number of peer info */
+	bss_peer_info_t		peer_info[1];		/* peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0	0
+
+/* structure used to configure AIBSS beacon force xmit */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16	len;
+	uint32 initial_min_bcn_dur;	/* dur in ms to check a bcn in bcn_flood period */
+	uint32 min_bcn_dur;	/* dur in ms to check a bcn after bcn_flood period */
+	uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+
+#define AIBSS_TXFAIL_CONFIG_VER_0    0
+
+/* structure used to configure aibss tx fail event */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16  len;
+	uint32 bcn_timeout;     /* dur in seconds to receive 1 bcn */
+	uint32 max_tx_retry;     /* no of consecutive no acks to send txfail event */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
+	uint16 version;
+	uint16 len;
+	uint32 flags;
+	struct ether_addr addr;
+	chanspec_t chspec;
+} BWL_POST_PACKED_STRUCT wl_aibss_if_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry {
+	struct ipv4_addr ip_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl {
+	uint32 num_entry;
+	wlc_ipfo_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t;
+
+#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
+#define WL_MAX_IPFO_ROUTE_TBL_ENTRY	64
+
+/* no strict structure packing */
+#include <packed_section_end.h>
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+	/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER	0x0100
+#define MAX_ASSRTSTR_LEN	64
+
+	typedef struct assert_record {
+		uint32 time;
+		uint8 seq_num;
+		char str[MAX_ASSRTSTR_LEN];
+	} assert_record_t;
+
+	typedef struct assertlog_results {
+		uint16 version;
+		uint16 record_len;
+		uint32 num;
+		assert_record_t logs[1];
+	} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN	8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+#ifdef BCMWAPI_WAI
+#define IV_LEN 16
+	struct wapi_sta_msg_t
+	{
+		uint16	msg_type;
+		uint16	datalen;
+		uint8	vap_mac[6];
+		uint8	reserve_data1[2];
+		uint8	sta_mac[6];
+		uint8	reserve_data2[2];
+		uint8	gsn[IV_LEN];
+		uint8	wie[256];
+	};
+#endif /* BCMWAPI_WAI */
+
+	/* chanim acs record */
+	typedef struct {
+		bool valid;
+		uint8 trigger;
+		chanspec_t selected_chspc;
+		int8 bgnoise;
+		uint32 glitch_cnt;
+		uint8 ccastats;
+		uint timestamp;
+	} chanim_acs_record_t;
+
+	typedef struct {
+		chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+		uint8 count;
+		uint timestamp;
+	} wl_acs_record_t;
+
+	typedef struct chanim_stats {
+		uint32 glitchcnt;               /* normalized as per second count */
+		uint32 badplcp;                 /* normalized as per second count */
+		uint8 ccastats[CCASTATS_MAX];   /* normalized as 0-255 */
+		int8 bgnoise;			/* background noise level (in dBm) */
+		chanspec_t chanspec;
+		uint32 timestamp;
+		uint32 bphy_glitchcnt;          /* normalized as per second count */
+		uint32 bphy_badplcp;            /* normalized as per second count */
+		uint8 chan_idle;                /* normalized as 0~255 */
+	} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 2
+
+typedef struct {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/* Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE	0x1
+
+/* scb probe parameter */
+typedef struct {
+	uint32 scb_timeout;
+	uint32 scb_activity_time;
+	uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/* selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+	uint32 count;
+	uint16 code;  /* SC or RC code */
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+	uint32 version;
+	uint16 length;	/* reserved for future usage */
+	uint8 type;
+	uint8 codetype;
+	uint32 ignored_cnt;
+	uint32 malformed_cnt;
+	uint32 count_total; /* count included the interested group */
+	wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+	SMFS_CODETYPE_SC,
+	SMFS_CODETYPE_RC
+};
+
+typedef enum smfs_type {
+	SMFS_TYPE_AUTH,
+	SMFS_TYPE_ASSOC,
+	SMFS_TYPE_REASSOC,
+	SMFS_TYPE_DISASSOC_TX,
+	SMFS_TYPE_DISASSOC_RX,
+	SMFS_TYPE_DEAUTH_TX,
+	SMFS_TYPE_DEAUTH_RX,
+	SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+	/* Tx IQ/LO calibration coeffs */
+	int16 tx_iqlocal_a;
+	int16 tx_iqlocal_b;
+	int8 tx_iqlocal_ci;
+	int8 tx_iqlocal_cq;
+	int8 tx_iqlocal_di;
+	int8 tx_iqlocal_dq;
+	int8 tx_iqlocal_ei;
+	int8 tx_iqlocal_eq;
+	int8 tx_iqlocal_fi;
+	int8 tx_iqlocal_fq;
+
+	/* Rx IQ calibration coeffs */
+	int16 rx_iqcal_a;
+	int16 rx_iqcal_b;
+
+	uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */
+	uint32 papd_epsilon_table[64]; /* PAPD epsilon table */
+	int16 papd_epsilon_offset; /* PAPD epsilon offset */
+	uint8 curr_tx_pwrindex; /* Tx power index */
+	int8 idle_tssi; /* Idle TSSI */
+	int8 est_tx_pwr; /* Estimated Tx Power (dB) */
+	int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */
+	uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */
+	uint16 init_gaincode; /* initgain required for ACI */
+	int8 estirr_tx;
+	int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+	int version;
+	int8 num_phy_cores; /* number of cores */
+	int8 curr_temperature; /* on-chip temperature sensor reading */
+	chanspec_t chspec; /* channspec for this state */
+	bool aci_state; /* ACI state: ON/OFF */
+	uint16 crsminpower; /* crsminpower required for ACI */
+	uint16 crsminpowerl; /* crsminpowerl required for ACI */
+	uint16 crsminpoweru; /* crsminpoweru required for ACI */
+	wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif /* PHYMON */
+
+/* discovery state */
+typedef struct wl_p2p_disc_st {
+	uint8 state;	/* see state */
+	chanspec_t chspec;	/* valid in listen state */
+	uint16 dwell;	/* valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/* scan request */
+typedef struct wl_p2p_scan {
+	uint8 type;		/* 'S' for WLC_SCAN, 'E' for "escan" */
+	uint8 reserved[3];
+	/* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/* i/f request */
+typedef struct wl_p2p_if {
+	struct ether_addr addr;
+	uint8 type;	/* see i/f type */
+	chanspec_t chspec;	/* for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/* i/f query */
+typedef struct wl_p2p_ifq {
+	uint bsscfgidx;
+	char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/* OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+	uint8 ops;	/* 0: disable 1: enable */
+	uint8 ctw;	/* >= 10 */
+} wl_p2p_ops_t;
+
+/* absence and presence request */
+typedef struct wl_p2p_sched_desc {
+	uint32 start;
+	uint32 interval;
+	uint32 duration;
+	uint32 count;	/* see count */
+} wl_p2p_sched_desc_t;
+
+typedef struct wl_p2p_sched {
+	uint8 type;	/* see schedule type */
+	uint8 action;	/* see schedule action */
+	uint8 option;	/* see schedule option */
+	wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+
+typedef struct wl_p2p_wfds_hash {
+	uint32	advt_id;
+	uint16	nw_cfg_method;
+	uint8	wfds_hash[6];
+	uint8	name_len;
+	uint8	service_name[MAX_WFDS_SVC_NAME_LEN];
+} wl_p2p_wfds_hash_t;
+
+typedef struct wl_bcmdcs_data {
+	uint reason;
+	chanspec_t chspec;
+} wl_bcmdcs_data_t;
+
+
+/* NAT configuration */
+typedef struct {
+	uint32 ipaddr;		/* interface ip address */
+	uint32 ipaddr_mask;	/* interface ip address mask */
+	uint32 ipaddr_gateway;	/* gateway ip address */
+	uint8 mac_gateway[6];	/* gateway mac address */
+	uint32 ipaddr_dns;	/* DNS server ip address, valid only for public if */
+	uint8 mac_dns[6];	/* DNS server mac address,  valid only for public if */
+	uint8 GUID[38];		/* interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+	uint op;		/* operation code */
+	bool pub_if;		/* set for public if, clear for private if */
+	nat_if_info_t if_info;	/* interface info */
+} nat_cfg_t;
+
+typedef struct {
+	int state;	/* NAT state returned */
+} nat_state_t;
+
+
+#define BTA_STATE_LOG_SZ	64
+
+/* BTAMP Statemachine states */
+enum {
+	HCIReset = 1,
+	HCIReadLocalAMPInfo,
+	HCIReadLocalAMPASSOC,
+	HCIWriteRemoteAMPASSOC,
+	HCICreatePhysicalLink,
+	HCIAcceptPhysicalLinkRequest,
+	HCIDisconnectPhysicalLink,
+	HCICreateLogicalLink,
+	HCIAcceptLogicalLink,
+	HCIDisconnectLogicalLink,
+	HCILogicalLinkCancel,
+	HCIAmpStateChange,
+	HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+	uint32 txfifobmp;
+	uint32 hwtxfifoflush;
+	struct ether_addr ea;
+} flush_txfifo_t;
+
+enum {
+	SPATIAL_MODE_2G_IDX = 0,
+	SPATIAL_MODE_5G_LOW_IDX,
+	SPATIAL_MODE_5G_MID_IDX,
+	SPATIAL_MODE_5G_HIGH_IDX,
+	SPATIAL_MODE_5G_UPPER_IDX,
+	SPATIAL_MODE_MAX_IDX
+};
+
+#define WLC_TXCORE_MAX	4	/* max number of txcore supports */
+#define WLC_SUBBAND_MAX	4	/* max number of sub-band supports */
+typedef struct {
+	uint8	band2g[WLC_TXCORE_MAX];
+	uint8	band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+} sar_limit_t;
+
+#define WLC_TXCAL_CORE_MAX 2	/* max number of txcore supports for txcal */
+#define MAX_NUM_TXCAL_MEAS 128
+
+typedef struct wl_txcal_meas {
+	uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	uint8 valid_cnt;
+} wl_txcal_meas_t;
+
+typedef struct wl_txcal_power_tssi {
+	uint8 set_core;
+	uint8 channel;
+	int16 pwr_start[WLC_TXCAL_CORE_MAX];
+	uint8 num_entries[WLC_TXCAL_CORE_MAX];
+	uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	bool gen_tbl;
+} wl_txcal_power_tssi_t;
+
+/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+	int	num;		/* Number of memory pools */
+	bcm_mp_stats_t s[1];	/* Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+typedef struct {
+	uint32 ipaddr;
+	uint32 ipaddr_netmask;
+	uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/* Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+	trf_mgmt_priority_low           = 0,        /* Maps to 802.1p BK */
+	trf_mgmt_priority_medium        = 1,        /* Maps to 802.1p BE */
+	trf_mgmt_priority_high          = 2,        /* Maps to 802.1p VI */
+	trf_mgmt_priority_nochange	= 3,	    /* do not update the priority */
+	trf_mgmt_priority_invalid       = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/* Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+	uint32  trf_mgmt_enabled;                           /* 0 - disabled, 1 - enabled */
+	uint32  flags;                                      /* See TRF_MGMT_FLAG_xxx defines */
+	uint32  host_ip_addr;                               /* My IP address to determine subnet */
+	uint32  host_subnet_mask;                           /* My subnet mask */
+	uint32  downlink_bandwidth;                         /* In units of kbps */
+	uint32  uplink_bandwidth;                           /* In units of kbps */
+	uint32  min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed tx bandwidth */
+	uint32  min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/* Traffic management filter */
+typedef struct trf_mgmt_filter {
+	struct ether_addr           dst_ether_addr;         /* His L2 address */
+	uint32                      dst_ip_addr;            /* His IP address */
+	uint16                      dst_port;               /* His L4 port */
+	uint16                      src_port;               /* My L4 port */
+	uint16                      prot;                   /* L4 protocol (only TCP or UDP) */
+	uint16                      flags;                  /* TBD. For now, this must be zero. */
+	trf_mgmt_priority_class_t   priority;               /* Priority for filtered packets */
+	uint32                      dscp;                   /* DSCP */
+} trf_mgmt_filter_t;
+
+/* Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list     {
+	uint32              num_filters;
+	trf_mgmt_filter_t   filter[1];
+} trf_mgmt_filter_list_t;
+
+/* Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+	uint32  maximum_bytes_per_second;
+	uint32  maximum_bytes_per_sampling_period;
+	uint32  total_bytes_consumed_per_second;
+	uint32  total_bytes_consumed_per_sampling_period;
+	uint32  total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/* Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+	uint32  gauranteed_bandwidth_percentage;
+	uint32  guaranteed_bytes_per_second;
+	uint32  guaranteed_bytes_per_sampling_period;
+	uint32  num_bytes_produced_per_second;
+	uint32  num_bytes_consumed_per_second;
+	uint32  num_queued_packets;                         /* Number of packets in queue */
+	uint32  num_queued_bytes;                           /* Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/* Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+	trf_mgmt_global_info_t   tx_global_shaping_info;
+	trf_mgmt_shaping_info_t  tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_global_info_t   rx_global_shaping_info;
+	trf_mgmt_shaping_info_t  rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/* Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+	uint32  num_processed_packets;      /* Number of packets processed */
+	uint32  num_processed_bytes;        /* Number of bytes processed */
+	uint32  num_discarded_packets;      /* Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/* Traffic management statisics array */
+typedef struct trf_mgmt_stats_array {
+	trf_mgmt_stats_t  tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_stats_t  rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+typedef struct powersel_params {
+	/* LPC Params exposed via IOVAR */
+	int32		tp_ratio_thresh;  /* Throughput ratio threshold */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		pwr_sel_exp_time; /* Time lapse for expiry of database */
+} powersel_params_t;
+
+typedef struct lpc_params {
+	/* LPC Params exposed via IOVAR */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		lpc_exp_time; /* Time lapse for expiry of database */
+	uint8		pwrup_slow_step; /* Step size for slow step up */
+	uint8		pwrup_fast_step; /* Step size for fast step up */
+	uint8		pwrdn_slow_step; /* Step size for slow step down */
+} lpc_params_t;
+
+/* tx pkt delay statistics */
+#define	SCB_RETRY_SHORT_DEF	7	/* Default Short retry Limit */
+#define WLPKTDLY_HIST_NBINS	16	/* number of bins used in the Delay histogram */
+
+/* structure to store per-AC delay statistics */
+typedef struct scb_delay_stats {
+	uint32 txmpdu_lost;	/* number of MPDUs lost */
+	uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */
+	uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */
+	uint32 delay_min;	/* minimum packet latency observed */
+	uint32 delay_max;	/* maximum packet latency observed */
+	uint32 delay_avg;	/* packet latency average */
+	uint32 delay_hist[WLPKTDLY_HIST_NBINS];	/* delay histogram */
+} scb_delay_stats_t;
+
+/* structure for txdelay event */
+typedef struct txdelay_event {
+	uint8	status;
+	int		rssi;
+	chanim_stats_t		chanim_stats;
+	scb_delay_stats_t	delay_stats[AC_COUNT];
+} txdelay_event_t;
+
+/* structure for txdelay parameters */
+typedef struct txdelay_params {
+	uint16	ratio;	/* Avg Txdelay Delta */
+	uint8	cnt;	/* Sample cnt */
+	uint8	period;	/* Sample period */
+	uint8	tune;	/* Debug */
+} txdelay_params_t;
+
+enum {
+	WNM_SERVICE_DMS = 1,
+	WNM_SERVICE_FMS = 2,
+	WNM_SERVICE_TFS = 3
+};
+
+/* Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+	uint8 user_priority;
+	uint8 fc_len;
+	dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE	OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+	uint32 num;
+	wl_tclas_t tclas[1];
+} wl_tclas_list_t;
+
+/* Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+	uint8 tfs_id;
+	uint8 tfs_actcode;
+	uint8 tfs_subelem_id;
+	uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+	uint8 status;			/* Status returned by the AP */
+	uint8 tclas_proc;		/* TCLAS processing value (0:and, 1:or)  */
+	uint8 tclas_cnt;		/* count of all wl_tclas_t in tclas array */
+	uint8 tclas[1];			/* VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE	OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+	struct ether_addr ea;		/* Address of AP/STA involved with this filter set */
+	uint8 tfs_id;			/* TFS ID field chosen by STA host */
+	uint8 status;			/* Internal status TFS_STATUS_xxx */
+	uint8 actcode;			/* Action code DOT11_TFS_ACTCODE_xxx */
+	uint8 token;			/* Token used in last request frame */
+	uint8 notify;			/* Notify frame sent/received because of this set */
+	uint8 filter_cnt;		/* count of all wl_tfs_filter_t in filter array */
+	uint8 filter[1];		/* VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE		OFFSETOF(wl_tfs_fset_t, filter)
+
+enum {
+	TFS_STATUS_DISABLED = 0,	/* TFS filter set disabled by user */
+	TFS_STATUS_DISABLING = 1,	/* Empty request just sent to AP */
+	TFS_STATUS_VALIDATED = 2,	/* Filter set validated by AP (but maybe not enabled!) */
+	TFS_STATUS_VALIDATING = 3,	/* Filter set just sent to AP */
+	TFS_STATUS_NOT_ASSOC = 4,	/* STA not associated */
+	TFS_STATUS_NOT_SUPPORT = 5,	/* TFS not supported by AP */
+	TFS_STATUS_DENIED = 6,		/* Filter set refused by AP (=> all sets are disabled!) */
+};
+
+typedef struct wl_tfs_status {
+	uint8 fset_cnt;			/* count of all wl_tfs_fset_t in fset array */
+	wl_tfs_fset_t fset[1];		/* VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+	uint8 send;			/* Immediatly register registered sets on AP side */
+	uint8 tfs_id;			/* ID of a specific set (existing or new), or nul for all */
+	uint8 actcode;			/* Action code for this filter set */
+	uint8 tclas_proc;		/* TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+	uint8 del;			/* Delete internal set once confirmation received */
+	uint8 tfs_id;			/* ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
+enum {
+	DMS_STATUS_DISABLED = 0,	/* DMS desc disabled by user */
+	DMS_STATUS_ACCEPTED = 1,	/* Request accepted by AP */
+	DMS_STATUS_NOT_ASSOC = 2,	/* STA not associated */
+	DMS_STATUS_NOT_SUPPORT = 3,	/* DMS not supported by AP */
+	DMS_STATUS_DENIED = 4,		/* Request denied by AP */
+	DMS_STATUS_TERM = 5,		/* Request terminated by AP */
+	DMS_STATUS_REMOVING = 6,	/* Remove request just sent */
+	DMS_STATUS_ADDING = 7,		/* Add request just sent */
+	DMS_STATUS_ERROR = 8,		/* Non compliant AP behvior */
+	DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */
+	DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */
+};
+
+typedef struct wl_dms_desc {
+	uint8 user_id;
+	uint8 status;
+	uint8 token;
+	uint8 dms_id;
+	uint8 tclas_proc;
+	uint8 mac_len;		/* length of all ether_addr in data array, 0 if STA */
+	uint8 tclas_len;	/* length of all wl_tclas_t in data array */
+	uint8 data[1];		/* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE	OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+	uint32 cnt;
+	wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+	uint8 send;
+	uint8 user_id;
+	uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+	uint8 del;
+	uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+	uint8 service;
+	union {
+		wl_dms_term_t dms;
+	} u;
+} wl_service_term_t;
+
+/* Definitions for WNM/NPS BSS Transistion */
+typedef struct wl_bsstrans_req {
+	uint16 tbtt;			/* time of BSS to end of life, in unit of TBTT */
+	uint16 dur;			/* time of BSS to keep off, in unit of minute */
+	uint8 reqmode;			/* request mode of BSS transition request */
+	uint8 unicast;			/* request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+	BSSTRANS_RESP_AUTO = 0,		/* Currently equivalent to ENABLE */
+	BSSTRANS_RESP_DISABLE = 1,	/* Never answer BSS Trans Req frames */
+	BSSTRANS_RESP_ENABLE = 2,	/* Always answer Req frames with preset data */
+	BSSTRANS_RESP_WAIT = 3,		/* Send ind, wait and/or send preset data (NOT IMPL) */
+	BSSTRANS_RESP_IMMEDIATE = 4	/* After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+	uint8 policy;
+	uint8 status;
+	uint8 delay;
+	struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception.
+ * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF,
+ * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan
+ * mandates different behavior on receiving BSS-transition request. To accomodate
+ * such divergent behaviors these policies have been created.
+ */
+enum {
+	WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0,	/* Roam (or disassociate) in all cases */
+	WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1,	/* Roam only if requested by Request Mode field */
+	WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2,	/* Roam only if Preferred BSS provided */
+	WL_BSSTRANS_POLICY_WAIT = 3,		/* Wait for deauth and send Accepted status */
+	WL_BSSTRANS_POLICY_PRODUCT = 4,		/* Policy for real product use cases (non-pf) */
+};
+
+/* Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+	int16 offset;		/* offset in us */
+	uint16 fix_intv;	/* override interval sent from STA */
+	uint16 rate_override;	/* use rate override to send high rate TIM broadcast frame */
+	uint8 tsf_present;	/* show timestamp in TIM broadcast frame */
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+	uint8 interval;		/* Interval in DTIM wished or required. */
+	uint8 flags;		/* Bitfield described below */
+	uint16 rate_min;	/* Minimum rate required for High/Low TIM frames. Optionnal */
+	uint16 rate_max;	/* Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+	WL_TIMBC_SET_TSF_REQUIRED = 1,	/* Enable TIMBC only if TSF in TIM frames */
+	WL_TIMBC_SET_NO_OVERRIDE = 2,	/* ... if AP does not override interval */
+	WL_TIMBC_SET_PROXY_ARP = 4,	/* ... if AP support Proxy ARP */
+	WL_TIMBC_SET_DMS_ACCEPTED = 8	/* ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+	uint8 status_sta;		/* Status from internal state machine (check below) */
+	uint8 status_ap;		/* From AP response frame (check 8.4.2.86 from 802.11) */
+	uint8 interval;
+	uint8 pad;
+	int32 offset;
+	uint16 rate_high;
+	uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+	WL_TIMBC_STATUS_DISABLE = 0,		/* TIMBC disabled by user */
+	WL_TIMBC_STATUS_REQ_MISMATCH = 1,	/* AP settings do no match user requirements */
+	WL_TIMBC_STATUS_NOT_ASSOC = 2,		/* STA not associated */
+	WL_TIMBC_STATUS_NOT_SUPPORT = 3,	/* TIMBC not supported by AP */
+	WL_TIMBC_STATUS_DENIED = 4,		/* Req to disable TIMBC sent to AP */
+	WL_TIMBC_STATUS_ENABLE = 5		/* TIMBC enabled */
+};
+
+/* Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+	uint8 logic;			/* DFRTS logic: see WL_DFRTS_LOGIC_* below */
+	uint16 low_ms;			/* Low FRTS timeout */
+	uint16 high_ms;			/* High FRTS timeout */
+	uint16 rx_pkts_threshold;	/* switching threshold: # rx pkts */
+	uint16 tx_pkts_threshold;	/* switching threshold: # tx pkts */
+	uint16 txrx_pkts_threshold;	/* switching threshold: # (tx+rx) pkts */
+	uint32 rx_bytes_threshold;	/* switching threshold: # rx bytes */
+	uint32 tx_bytes_threshold;	/* switching threshold: # tx bytes */
+	uint32 txrx_bytes_threshold;	/* switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar.  When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned.  These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE	0	/* Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL	1	/* Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM	2	/* Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION	   1
+#define WL_RMC_TR_VERSION	   1
+#define WL_RMC_MAX_CLIENT	   32
+#define WL_RMC_FLAG_INBLACKLIST	   1
+#define WL_RMC_FLAG_ACTIVEACKER	   2
+#define WL_RMC_FLAG_RELMCAST	   4
+#define WL_RMC_MAX_TABLE_ENTRY     4
+
+#define WL_RMC_VER		   1
+#define WL_RMC_INDEX_ACK_ALL       255
+#define WL_RMC_NUM_OF_MC_STREAMS   4
+#define WL_RMC_MAX_TRS_PER_GROUP   1
+#define WL_RMC_MAX_TRS_IN_ACKALL   1
+#define WL_RMC_ACK_MCAST0          0x02
+#define WL_RMC_ACK_MCAST_ALL       0x01
+#define WL_RMC_ACTF_TIME_MIN       300	 /* time in ms */
+#define WL_RMC_ACTF_TIME_MAX       20000 /* time in ms */
+#define WL_RMC_MAX_NUM_TRS	   32	 /* maximun transmitters allowed */
+#define WL_RMC_ARTMO_MIN           350	 /* time in ms */
+#define WL_RMC_ARTMO_MAX           40000	 /* time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+	RELMCAST_ENTRY_OP_DISABLE = 0,   /* Disable multi-cast group */
+	RELMCAST_ENTRY_OP_DELETE  = 1,   /* Delete multi-cast group */
+	RELMCAST_ENTRY_OP_ENABLE  = 2,   /* Enable multi-cast group */
+	RELMCAST_ENTRY_OP_ACK_ALL = 3    /* Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+	WL_RMC_MODE_RECEIVER    = 0,	 /* Receiver mode by default */
+	WL_RMC_MODE_TRANSMITTER = 1,	 /* Transmitter mode using wl ackreq */
+	WL_RMC_MODE_INITIATOR   = 2	 /* Initiator mode using wl ackreq */
+};
+
+/* Each RMC mcast client info */
+typedef struct wl_relmcast_client {
+	uint8 flag;			/* status of client such as AR, R, or blacklisted */
+	int16 rssi;			/* rssi value of RMC client */
+	struct ether_addr addr;		/* mac address of RMC client */
+} wl_relmcast_client_t;
+
+/* RMC Counters */
+typedef struct wl_rmc_cnts {
+	uint16  version;		/* see definition of WL_CNT_T_VERSION */
+	uint16  length;			/* length of entire structure */
+	uint16	dupcnt;			/* counter for duplicate rmc MPDU */
+	uint16	ackreq_err;		/* counter for wl ackreq error    */
+	uint16	af_tx_err;		/* error count for action frame transmit   */
+	uint16	null_tx_err;		/* error count for rmc null frame transmit */
+	uint16	af_unicast_tx_err;	/* error count for rmc unicast frame transmit */
+	uint16	mc_no_amt_slot;		/* No mcast AMT entry available */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_no_glb_slot;		/* No mcast entry available in global table */
+	uint16	mc_not_mirrored;	/* mcast group is not mirrored */
+	uint16	mc_existing_tr;		/* mcast group is already taken by transmitter */
+	uint16	mc_exist_in_amt;	/* mcast group is already programmed in amt */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_not_exist_in_gbl;	/* mcast group is not in global table */
+	uint16	mc_not_exist_in_amt;	/* mcast group is not in AMT table */
+	uint16	mc_utilized;		/* mcast addressed is already taken */
+	uint16	mc_taken_other_tr;	/* multi-cast addressed is already taken */
+	uint32	rmc_rx_frames_mac;      /* no of mc frames received from mac */
+	uint32	rmc_tx_frames_mac;      /* no of mc frames transmitted to mac */
+	uint32	mc_null_ar_cnt;         /* no. of times NULL AR is received */
+	uint32	mc_ar_role_selected;	/* no. of times took AR role */
+	uint32	mc_ar_role_deleted;	/* no. of times AR role cancelled */
+	uint32	mc_noacktimer_expired;  /* no. of times noack timer expired */
+	uint16  mc_no_wl_clk;           /* no wl clk detected when trying to access amt */
+	uint16  mc_tr_cnt_exceeded;     /* No of transmitters in the network exceeded */
+} wl_rmc_cnts_t;
+
+/* RMC Status */
+typedef struct wl_relmcast_st {
+	uint8         ver;		/* version of RMC */
+	uint8         num;		/* number of clients detected by transmitter */
+	wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+	uint16        err;		/* error status (used in infra) */
+	uint16        actf_time;	/* action frame time period */
+} wl_relmcast_status_t;
+
+/* Entry for each STA/node */
+typedef struct wl_rmc_entry {
+	/* operation on multi-cast entry such add,
+	 * delete, ack-all
+	 */
+	int8    flag;
+	struct ether_addr addr;		/* multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/* RMC table */
+typedef struct wl_rmc_entry_table {
+	uint8   index;			/* index to a particular mac entry in table */
+	uint8   opcode;			/* opcodes or operation on entry */
+	wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+typedef struct wl_rmc_trans_elem {
+	struct ether_addr tr_mac;	/* transmitter mac */
+	struct ether_addr ar_mac;	/* ar mac */
+	uint16 artmo;			/* AR timeout */
+	uint8 amt_idx;			/* amt table entry */
+	uint16 flag;			/* entry will be acked, not acked, programmed, full etc */
+} wl_rmc_trans_elem_t;
+
+/* RMC transmitters */
+typedef struct wl_rmc_trans_in_network {
+	uint8         ver;		/* version of RMC */
+	uint8         num_tr;		/* number of transmitters in the network */
+	wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS];
+} wl_rmc_trans_in_network_t;
+
+/* To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+	uint8	oui[DOT11_OUI_LEN];
+	uint16	payload;	/* IE Data Payload */
+} wl_rmc_vsie_t;
+
+
+/* structures  & defines for proximity detection  */
+enum proxd_method {
+	PROXD_UNDEFINED_METHOD = 0,
+	PROXD_RSSI_METHOD = 1,
+	PROXD_TOF_METHOD = 2
+};
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+
+#define WL_PROXD_ACTION_STOP		0
+#define WL_PROXD_ACTION_START		1
+
+#define WL_PROXD_FLAG_TARGET_REPORT	0x1
+#define WL_PROXD_FLAG_REPORT_FAILURE	0x2
+#define WL_PROXD_FLAG_INITIATOR_REPORT	0x4
+#define WL_PROXD_FLAG_NOCHANSWT		0x8
+#define WL_PROXD_FLAG_NETRUAL		0x10
+#define WL_PROXD_FLAG_INITIATOR_RPTRTT	0x20
+#define WL_PROXD_FLAG_ONEWAY		0x40
+#define WL_PROXD_FLAG_SEQ_EN		0x80
+
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+typedef struct wl_proxd_iovar {
+	uint16	method;		/* Proxmity Detection method */
+	uint16	mode;		/* Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/*
+ * structures for proximity detection parameters
+ * consists of two parts, common and method specific params
+ * common params should be placed at the beginning
+ */
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+typedef	BWL_PRE_PACKED_STRUCT struct	wl_proxd_params_common	{
+	chanspec_t	chanspec;	/* channel spec */
+	int16		tx_power;	/* tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/* timeout value */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method {
+	chanspec_t	chanspec;	/* chanspec for home channel */
+	int16		tx_power;	/* tx power of Proximity Detection frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD frames, 500kbps units */
+	uint16		timeout;	/* state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+					/* method specific ones go after this line */
+	int16		rssi_thresh;	/* RSSI threshold (in dBm) */
+	uint16		maxconvergtmo;	/* max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+#define Q1_NS			25	/* Q1 time units */
+
+#define TOF_BW_NUM		3	/* number of bandwidth that the TOF can support */
+#define TOF_BW_SEQ_NUM		(TOF_BW_NUM+2)	/* number of total index */
+enum tof_bw_index {
+	TOF_BW_20MHZ_INDEX = 0,
+	TOF_BW_40MHZ_INDEX = 1,
+	TOF_BW_80MHZ_INDEX = 2,
+	TOF_BW_SEQTX_INDEX = 3,
+	TOF_BW_SEQRX_INDEX = 4
+};
+
+#define BANDWIDTH_BASE	20	/* base value of bandwidth */
+#define TOF_BW_20MHZ    (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_BW_40MHZ    (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_BW_80MHZ    (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX)
+#define TOF_BW_10MHZ    10
+
+#define NFFT_BASE		64	/* base size of fft */
+#define TOF_NFFT_20MHZ  (NFFT_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_NFFT_40MHZ  (NFFT_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_NFFT_80MHZ  (NFFT_BASE << TOF_BW_80MHZ_INDEX)
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method {
+	chanspec_t	chanspec;	/* chanspec for home channel */
+	int16		tx_power;	/* tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/* state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+	/* specific for the method go after this line */
+	struct ether_addr tgt_mac;	/* target mac addr for TOF method */
+	uint16		ftm_cnt;	/* number of the frames txed by initiator */
+	uint16		retry_cnt;	/* number of retransmit attampts for ftm frames */
+	int16		vht_rate;	/* ht or vht rate */
+	/* add more params required for other methods can be added here  */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
+	uint32		Ki;			/* h/w delay K factor for initiator */
+	uint32		Kt;			/* h/w delay K factor for target */
+	int16		vhtack;			/* enable/disable VHT ACK */
+	int16		N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+	int16		w_offset[TOF_BW_NUM];	/* offset of threshold crossing window(per BW) */
+	int16		w_len[TOF_BW_NUM];	/* length of threshold crossing window(per BW) */
+	int32		maxDT;			/* max time difference of T4/T1 or T3/T2 */
+	int32		minDT;			/* min time difference of T4/T1 or T3/T2 */
+	uint8		totalfrmcnt;	/* total count of transfered measurement frames */
+	uint16		rsv_media;		/* reserve media value for TOF */
+	uint32		flags;			/* flags */
+	uint8		core;			/* core to use for tx */
+	uint8		force_K;		/* set to force value of K  */
+	int16		N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+	uint8		sw_adj;			/* enable sw assisted timestamp adjustment */
+	uint8		hw_adj;			/* enable hw assisted timestamp adjustment */
+	uint8		seq_en;			/* enable ranging sequence */
+	uint8		ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
+
+typedef struct wl_proxd_params_iovar {
+	uint16	method;			/* Proxmity Detection method */
+	union {
+		/* common params for pdsvc */
+		wl_proxd_params_common_t	cmn_params;	/* common parameters */
+		/*  method specific */
+		wl_proxd_params_rssi_method_t	rssi_params;	/* RSSI method parameters */
+		wl_proxd_params_tof_method_t	tof_params;	/* TOF meothod parameters */
+		/* tune parameters */
+		wl_proxd_params_tof_tune_t	tof_tune;	/* TOF tune parameters */
+	} u;				/* Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+#define PROXD_COLLECT_GET_STATUS	0
+#define PROXD_COLLECT_SET_STATUS	1
+#define PROXD_COLLECT_QUERY_HEADER	2
+#define PROXD_COLLECT_QUERY_DATA	3
+#define PROXD_COLLECT_QUERY_DEBUG	4
+#define PROXD_COLLECT_REMOTE_REQUEST	5
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
+	uint32		method;		/* method */
+	uint8		request;	/* Query request. */
+	uint8		status;		/* 0 -- disable, 1 -- enable collection, */
+					/* 2 -- enable collection & debug */
+	uint16		index;		/* The current frame index [0 to total_frames - 1]. */
+	uint16		mode;		/* Initiator or Target */
+	bool		busy;		/* tof sm is busy */
+	bool		remote;		/* Remote collect data */
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header {
+	uint16		total_frames;			/* The totral frames for this collect. */
+	uint16		nfft;				/* nfft value */
+	uint16		bandwidth;			/* bandwidth */
+	uint16		channel;			/* channel number */
+	uint32		chanspec;			/* channel spec */
+	uint32		fpfactor;			/* avb timer value factor */
+	uint16		fpfactor_shift;			/* avb timer value shift bits */
+	int32		distance;			/* distance calculated by fw */
+	uint32		meanrtt;			/* mean of RTTs */
+	uint32		modertt;			/* mode of RTTs */
+	uint32		medianrtt;			/* median of RTTs */
+	uint32		sdrtt;				/* standard deviation of RTTs */
+	uint32		clkdivisor;			/* clock divisor */
+	uint16		chipnum;			/* chip type */
+	uint8		chiprev;			/* chip revision */
+	uint8		phyver;				/* phy version */
+	struct ether_addr	loaclMacAddr;		/* local mac address */
+	struct ether_addr	remoteMacAddr;		/* remote mac address */
+	wl_proxd_params_tof_tune_t params;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+
+
+/*  ********************** NAN wl interface struct types and defs ******************** */
+
+#define WL_NAN_IOCTL_VERSION	0x1
+
+/*   wl_nan_sub_cmd may also be used in dhd  */
+typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t;
+typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv);
+/* nan cmd list entry  */
+struct wl_nan_sub_cmd {
+	char *name;
+	uint8  version;		/* cmd  version */
+	uint16 id;			/* id for the dongle f/w switch/case  */
+	uint16 type;		/* base type of argument */
+	cmd_handler_t *handler; /* cmd handler  */
+};
+
+/* container for nan iovtls & events */
+typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc {
+	uint16	version;	/* interface command or event version */
+	uint16	id;			/* nan ioctl cmd  ID  */
+	uint16	len;		/* total length of all tlv records in data[]  */
+	uint8	data [1];	/* var len payload of bcm_xtlv_t type */
+} BWL_POST_PACKED_STRUCT wl_nan_ioc_t;
+
+typedef struct wl_nan_status {
+	uint8 inited;
+	uint8 joined;
+	uint8 role;
+	uint8 hop_count;
+	uint32 chspec;
+	uint8 amr[8];			/* Anchor Master Rank */
+	uint32 cnt_pend_txfrm;		/* pending TX frames */
+	uint32 cnt_bcn_tx;		/* TX disc/sync beacon count */
+	uint32 cnt_bcn_rx;		/* RX disc/sync beacon count */
+	uint32 cnt_svc_disc_tx;		/* TX svc disc frame count */
+	uint32 cnt_svc_disc_rx;		/* RX svc disc frame count */
+	struct ether_addr cid;
+} wl_nan_status_t;
+
+/* various params and ctl swithce for nan_debug instance  */
+typedef struct nan_debug_params {
+	uint8	enabled; /* runtime debuging enabled */
+	uint8	collect; /* enables debug svc sdf monitor mode  */
+	uint16	cmd;	/* debug cmd to perform a debug action */
+	uint32	msglevel; /* msg level if enabled */
+	uint16	status;
+} nan_debug_params_t;
+
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params {
+	uint16 scan_time;
+	uint16 home_time;
+	uint16 chspec_num;
+	chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */
+} BWL_POST_PACKED_STRUCT nan_scan_params_t;
+
+enum wl_nan_role {
+	WL_NAN_ROLE_AUTO = 0,
+	WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1,
+	WL_NAN_ROLE_NON_MASTER_SYNC = 2,
+	WL_NAN_ROLE_MASTER = 3,
+	WL_NAN_ROLE_ANCHOR_MASTER = 4
+};
+#define NAN_MASTER_RANK_LEN 8
+/* nan cmd IDs */
+enum wl_nan_cmds {
+	 /* nan cfg /disc & dbg ioctls */
+	WL_NAN_CMD_ENABLE = 1,
+	WL_NAN_CMD_ATTR = 2,
+	WL_NAN_CMD_NAN_JOIN = 3,
+	WL_NAN_CMD_LEAVE = 4,
+	WL_NAN_CMD_MERGE = 5,
+	WL_NAN_CMD_STATUS = 6,
+	/*  discovery engine commands */
+	WL_NAN_CMD_PUBLISH = 20,
+	WL_NAN_CMD_SUBSCRIBE = 21,
+	WL_NAN_CMD_CANCEL_PUBLISH = 22,
+	WL_NAN_CMD_CANCEL_SUBSCRIBE = 23,
+	WL_NAN_CMD_TRANSMIT = 24,
+	WL_NAN_CMD_CONNECTION = 25,
+	WL_NAN_CMD_SHOW = 26,
+	WL_NAN_CMD_STOP = 27,	/* stop nan for a given cluster ID  */
+	/*  nan debug iovars & cmds  */
+	WL_NAN_CMD_SCAN_PARAMS = 46,
+	WL_NAN_CMD_SCAN = 47,
+	WL_NAN_CMD_SCAN_RESULTS = 48,
+	WL_NAN_CMD_EVENT_MASK = 49,
+	WL_NAN_CMD_EVENT_CHECK = 50,
+
+	WL_NAN_CMD_DEBUG = 60,
+	WL_NAN_CMD_TEST1 = 61,
+	WL_NAN_CMD_TEST2 = 62,
+	WL_NAN_CMD_TEST3 = 63
+};
+
+/*
+ * tlv IDs uniquely identifies  cmd parameters
+ * packed into wl_nan_ioc_t container
+ */
+enum wl_nan_cmd_xtlv_id {
+	/* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */
+	WL_NAN_XTLV_ZERO = 0,		/* used as tlv buf end marker */
+#ifdef NAN_STD_TLV 				/* rfu, don't use yet */
+	WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */
+	WL_NAN_XTLV_CLUSTER = 2,	/* == NAN_ATTR_CLUSTER, */
+	WL_NAN_XTLV_VENDOR = 221,	/* == NAN_ATTR_VENDOR, */
+#endif
+	/* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */
+	/* 0x100 ~ : private TLV ID defined just for NAN command */
+	/* common types */
+	WL_NAN_XTLV_BUFFER = 0x101, /* generic type, function depends on cmd context */
+	WL_NAN_XTLV_MAC_ADDR = 0x102,	/* used in various cmds */
+	WL_NAN_XTLV_REASON = 0x103,
+	WL_NAN_XTLV_ENABLE = 0x104,
+	/* explicit types, primarily for discovery engine iovars  */
+	WL_NAN_XTLV_SVC_PARAMS = 0x120,     /* Contains required params: wl_nan_disc_params_t */
+	WL_NAN_XTLV_MATCH_RX = 0x121,       /* Matching filter to evaluate on receive */
+	WL_NAN_XTLV_MATCH_TX = 0x122,       /* Matching filter to send */
+	WL_NAN_XTLV_SVC_INFO = 0x123,       /* Service specific info */
+	WL_NAN_XTLV_SVC_NAME = 0x124,       /* Optional UTF-8 service name, for debugging. */
+	WL_NAN_XTLV_INSTANCE_ID = 0x125,    /* Identifies unique publish or subscribe instance */
+	WL_NAN_XTLV_PRIORITY = 0x126,       /* used in transmit cmd context */
+	WL_NAN_XTLV_REQUESTOR_ID = 0x127,	/* Requestor instance ID */
+	WL_NAN_XTLV_VNDR = 0x128,		/* Vendor specific attribute */
+	/* explicit types, primarily for NAN MAC iovars   */
+	WL_NAN_XTLV_DW_LEN = 0x140,            /* discovery win length */
+	WL_NAN_XTLV_BCN_INTERVAL = 0x141,      /* beacon interval, both sync and descovery bcns?  */
+	WL_NAN_XTLV_CLUSTER_ID = 0x142,
+	WL_NAN_XTLV_IF_ADDR = 0x143,
+	WL_NAN_XTLV_MC_ADDR = 0x144,
+	WL_NAN_XTLV_ROLE = 0x145,
+	WL_NAN_XTLV_START = 0x146,
+
+	WL_NAN_XTLV_MASTER_PREF = 0x147,
+	WL_NAN_XTLV_DW_INTERVAL = 0x148,
+	WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149,
+	/*  nan status command xtlvs  */
+	WL_NAN_XTLV_MAC_INITED = 0x14a,
+	WL_NAN_XTLV_MAC_ENABLED = 0x14b,
+	WL_NAN_XTLV_MAC_CHANSPEC = 0x14c,
+	WL_NAN_XTLV_MAC_AMR = 0x14d,	/* anchormaster rank u8 amr[8] */
+	WL_NAN_XTLV_MAC_HOPCNT = 0x14e,
+	WL_NAN_XTLV_MAC_AMBTT = 0x14f,
+	WL_NAN_XTLV_MAC_TXRATE = 0x150,
+	WL_NAN_XTLV_MAC_STATUS = 0x151,  /* xtlv payload is nan_status_t */
+	WL_NAN_XTLV_NAN_SCANPARAMS = 0x152,  /* payload is nan_scan_params_t */
+	WL_NAN_XTLV_DEBUGPARAMS = 0x153,  /* payload is nan_scan_params_t */
+	WL_NAN_XTLV_SUBSCR_ID = 0x154,   /* subscriber id  */
+	WL_NAN_XTLV_PUBLR_ID = 0x155,	/* publisher id */
+	WL_NAN_XTLV_EVENT_MASK = 0x156,
+	WL_NAN_XTLV_MERGE = 0x157
+};
+
+/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */
+#define WL_NAN_RANGE_LIMITED           0x0040
+/* Bits specific to Publish */
+/* Unsolicited transmissions */
+#define WL_NAN_PUB_UNSOLICIT           0x1000
+/* Solicited transmissions */
+#define WL_NAN_PUB_SOLICIT             0x2000
+#define WL_NAN_PUB_BOTH                0x3000
+/* Set for broadcast solicited transmission
+ * Do not set for unicast solicited transmission
+ */
+#define WL_NAN_PUB_BCAST               0x4000
+/* Generate event on each solicited transmission */
+#define WL_NAN_PUB_EVENT               0x8000
+/* Used for one-time solicited Publish functions to indicate transmision occurred */
+#define WL_NAN_PUB_SOLICIT_PENDING	0x10000
+/* Follow-up frames */
+#define WL_NAN_FOLLOWUP			0x20000
+/* Bits specific to Subscribe */
+/* Active subscribe mode (Leave unset for passive) */
+#define WL_NAN_SUB_ACTIVE              0x1000
+
+/* Special values for time to live (ttl) parameter */
+#define WL_NAN_TTL_UNTIL_CANCEL	0xFFFFFFFF
+/* Publish -  runs until first transmission
+ * Subscribe - runs until first  DiscoveryResult event
+ */
+#define WL_NAN_TTL_FIRST	0
+
+/* The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN	6
+
+/* Instance ID type (unique identifier) */
+typedef uint8 wl_nan_instance_id_t;
+
+/* Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */
+typedef struct wl_nan_disc_params_s {
+	/* Periodicity of unsolicited/query transmissions, in DWs */
+	uint32 period;
+	/* Time to live in DWs */
+	uint32 ttl;
+	/* Flag bits */
+	uint32 flags;
+	/* Publish or subscribe service id, i.e. hash of the service name */
+	uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+	/* Publish or subscribe id */
+	wl_nan_instance_id_t instance_id;
+} wl_nan_disc_params_t;
+
+/*
+* desovery interface event structures *
+*/
+
+/* NAN Ranging */
+
+/* Bit defines for global flags */
+#define WL_NAN_RANGING_ENABLE		1 /* enable RTT */
+#define WL_NAN_RANGING_RANGED		2 /* Report to host if ranged as target */
+typedef struct nan_ranging_config {
+	uint32 chanspec;		/* Ranging chanspec */
+	uint16 timeslot;		/* NAN RTT start time slot  1-511 */
+	uint16 duration;		/* NAN RTT duration in ms */
+	struct ether_addr allow_mac;	/* peer initiated ranging: the allowed peer mac
+					 * address, a unicast (for one peer) or
+					 * a broadcast for all. Setting it to all zeros
+					 * means responding to none,same as not setting
+					 * the flag bit NAN_RANGING_RESPOND
+					 */
+	uint16 flags;
+} wl_nan_ranging_config_t;
+
+/* list of peers for self initiated ranging */
+/* Bit defines for per peer flags */
+#define WL_NAN_RANGING_REPORT (1<<0)	/* Enable reporting range to target */
+typedef struct nan_ranging_peer {
+	uint32 chanspec;		/* desired chanspec for this peer */
+	uint32 abitmap;			/* available bitmap */
+	struct ether_addr ea;		/* peer MAC address */
+	uint8 frmcnt;			/* frame count */
+	uint8 retrycnt;			/* retry count */
+	uint16 flags;			/* per peer flags, report or not */
+} wl_nan_ranging_peer_t;
+typedef struct nan_ranging_list {
+	uint8 count;			/* number of MAC addresses */
+	uint8 num_peers_done;		/* host set to 0, when read, shows number of peers
+					 * completed, success or fail
+					 */
+	uint8 num_dws;			/* time period to do the ranging, specified in dws */
+	uint8 reserve;			/* reserved field */
+	wl_nan_ranging_peer_t rp[1];	/* variable length array of peers */
+} wl_nan_ranging_list_t;
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define WL_NAN_RANGING_STATUS_SUCCESS		1
+#define WL_NAN_RANGING_STATUS_FAIL			2
+#define WL_NAN_RANGING_STATUS_TIMEOUT		3
+#define WL_NAN_RANGING_STATUS_ABORT		4 /* with partial results if sounding count > 0 */
+typedef struct nan_ranging_result {
+	uint8 status;			/* 1: Success, 2: Fail 3: Timeout 4: Aborted */
+	uint8 sounding_count;		/* number of measurements completed (0 = failure) */
+	struct ether_addr ea;		/* initiator MAC address */
+	uint32 chanspec;		/* Chanspec where the ranging was done */
+	uint32 timestamp;		/* 32bits of the TSF timestamp ranging was completed at */
+	uint32 distance;		/* mean distance in meters expressed as Q4 number.
+					 * Only valid when sounding_count > 0. Examples:
+					 * 0x08 = 0.5m
+					 * 0x10 = 1m
+					 * 0x18 = 1.5m
+					 * set to 0xffffffff to indicate invalid number
+					 */
+	int32 rtt_var;			/* standard deviation in 10th of ns of RTTs measured.
+					 * Only valid when sounding_count > 0
+					 */
+	struct ether_addr tgtea;	/* target MAC address */
+} wl_nan_ranging_result_t;
+typedef struct nan_ranging_event_data {
+	uint8 mode;			/* 1: Result of host initiated ranging */
+					/* 2: Result of peer initiated ranging */
+	uint8 reserved;
+	uint8 success_count;		/* number of peers completed successfully */
+	uint8 count;			/* number of peers in the list */
+	wl_nan_ranging_result_t rr[1];	/* variable array of ranging peers */
+} wl_nan_ranging_event_data_t;
+
+/* ********************* end of NAN section ******************************** */
+
+
+#define RSSI_THRESHOLD_SIZE 16
+#define MAX_IMP_RESP_SIZE 256
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias {
+	int32		version;			/* version */
+	int32		threshold[RSSI_THRESHOLD_SIZE];	/* threshold */
+	int32		peak_offset;		/* peak offset */
+	int32		bias;				/* rssi bias */
+	int32		gd_delta;			/* GD - GD_ADJ */
+	int32		imp_resp[MAX_IMP_RESP_SIZE];	/* (Hi*Hi)+(Hr*Hr) */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg {
+	int32		avg_threshold[RSSI_THRESHOLD_SIZE];	/* avg threshold */
+	int32		avg_peak_offset;			/* avg peak offset */
+	int32		avg_rssi;				/* avg rssi */
+	int32		avg_bias;				/* avg bias */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info {
+	uint16		type;	 /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */
+	uint16		index;		/* The current frame index, from 1 to total_frames. */
+	uint16		tof_cmd;	/* M_TOF_CMD      */
+	uint16		tof_rsp;	/* M_TOF_RSP      */
+	uint16		tof_avb_rxl;	/* M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/* M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/* M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/* M_TOF_AVB_TX_H */
+	uint16		tof_id;		/* M_TOF_ID */
+	uint8		tof_frame_type;
+	uint8		tof_frame_bw;
+	int8		tof_rssi;
+	int32		tof_cfo;
+	int32		gd_adj_ns;	/* gound delay */
+	int32		gd_h_adj_ns;	/* group delay + threshold crossing */
+#ifdef RSSI_REFINE
+	wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */
+#endif
+	int16		nfft;		/* number of samples stored in H */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t;
+
+#define k_tof_collect_H_pad  1
+#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad)
+#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size)
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data {
+	wl_proxd_collect_info_t  info;
+	uint32	H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data {
+	uint8		count;		/* number of packets */
+	uint8		stage;		/* state machone stage */
+	uint8		received;	/* received or txed */
+	uint8		paket_type;	/* packet type */
+	uint8		category;	/* category field */
+	uint8		action;		/* action field */
+	uint8		token;		/* token number */
+	uint8		follow_token;	/* following token number */
+	uint16		index;		/* index of the packet */
+	uint16		tof_cmd;	/* M_TOF_CMD */
+	uint16		tof_rsp;	/* M_TOF_RSP */
+	uint16		tof_avb_rxl;	/* M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/* M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/* M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/* M_TOF_AVB_TX_H */
+	uint16		tof_id;		/* M_TOF_ID */
+	uint16		tof_status0;	/* M_TOF_STATUS_0 */
+	uint16		tof_status2;	/* M_TOF_STATUS_2 */
+	uint16		tof_chsm0;	/* M_TOF_CHNSM_0 */
+	uint16		tof_phyctl0;	/* M_TOF_PHYCTL0 */
+	uint16		tof_phyctl1;	/* M_TOF_PHYCTL1 */
+	uint16		tof_phyctl2;	/* M_TOF_PHYCTL2 */
+	uint16		tof_lsig;	/* M_TOF_LSIG */
+	uint16		tof_vhta0;	/* M_TOF_VHTA0 */
+	uint16		tof_vhta1;	/* M_TOF_VHTA1 */
+	uint16		tof_vhta2;	/* M_TOF_VHTA2 */
+	uint16		tof_vhtb0;	/* M_TOF_VHTB0 */
+	uint16		tof_vhtb1;	/* M_TOF_VHTB1 */
+	uint16		tof_apmductl;	/* M_TOF_AMPDU_CTL */
+	uint16		tof_apmdudlim;	/* M_TOF_AMPDU_DLIM */
+	uint16		tof_apmdulen;	/* M_TOF_AMPDU_LEN */
+} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t;
+
+/* version of the wl_wsec_info structure */
+#define WL_WSEC_INFO_VERSION 0x01
+
+/* start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+
+/* size of len and type fields of wl_wsec_info_tlv_t struct */
+#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data)
+
+/* Allowed wl_wsec_info properties; not all of them may be supported. */
+typedef enum {
+	WL_WSEC_INFO_NONE = 0,
+	WL_WSEC_INFO_MAX_KEYS = 1,
+	WL_WSEC_INFO_NUM_KEYS = 2,
+	WL_WSEC_INFO_NUM_HW_KEYS = 3,
+	WL_WSEC_INFO_MAX_KEY_IDX = 4,
+	WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5,
+	WL_WSEC_INFO_SUPPORTED_ALGOS = 6,
+	WL_WSEC_INFO_MAX_KEY_LEN = 7,
+	WL_WSEC_INFO_FLAGS = 8,
+	/* add global/per-wlc properties above */
+	WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1),
+	WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2),
+	WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3),
+	WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
+	WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
+	/* add per-BSS properties above */
+	WL_WSEC_INFO_MAX = 0xffff
+} wl_wsec_info_type_t;
+
+/* tlv used to return wl_wsec_info properties */
+typedef struct {
+	uint16 type;
+	uint16 len;		/* data length */
+	uint8 data[1];	/* data follows */
+} wl_wsec_info_tlv_t;
+
+/* input/output data type for wsec_info iovar */
+typedef struct wl_wsec_info {
+	uint8 version; /* structure version */
+	uint8 pad[2];
+	uint8 num_tlvs;
+	wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */
+} wl_wsec_info_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+enum rssi_reason {
+	RSSI_REASON_UNKNOW = 0,
+	RSSI_REASON_LOWRSSI = 1,
+	RSSI_REASON_NSYC = 2,
+	RSSI_REASON_TIMEOUT = 3
+};
+
+enum tof_reason {
+	TOF_REASON_OK = 0,
+	TOF_REASON_REQEND = 1,
+	TOF_REASON_TIMEOUT = 2,
+	TOF_REASON_NOACK = 3,
+	TOF_REASON_INVALIDAVB = 4,
+	TOF_REASON_INITIAL = 5,
+	TOF_REASON_ABORT = 6
+};
+
+enum rssi_state {
+	RSSI_STATE_POLL = 0,
+	RSSI_STATE_TPAIRING = 1,
+	RSSI_STATE_IPAIRING = 2,
+	RSSI_STATE_THANDSHAKE = 3,
+	RSSI_STATE_IHANDSHAKE = 4,
+	RSSI_STATE_CONFIRMED = 5,
+	RSSI_STATE_PIPELINE = 6,
+	RSSI_STATE_NEGMODE = 7,
+	RSSI_STATE_MONITOR = 8,
+	RSSI_STATE_LAST = 9
+};
+
+enum tof_state {
+	TOF_STATE_IDLE	 = 0,
+	TOF_STATE_IWAITM = 1,
+	TOF_STATE_TWAITM = 2,
+	TOF_STATE_ILEGACY = 3,
+	TOF_STATE_IWAITCL = 4,
+	TOF_STATE_TWAITCL = 5,
+	TOF_STATE_ICONFIRM = 6,
+	TOF_STATE_IREPORT = 7
+};
+
+enum tof_mode_type {
+	TOF_LEGACY_UNKNOWN	= 0,
+	TOF_LEGACY_AP		= 1,
+	TOF_NONLEGACY_AP	= 2
+};
+
+enum tof_way_type {
+	TOF_TYPE_ONE_WAY = 0,
+	TOF_TYPE_TWO_WAY = 1,
+	TOF_TYPE_REPORT = 2
+};
+
+enum tof_rate_type {
+	TOF_FRAME_RATE_VHT = 0,
+	TOF_FRAME_RATE_LEGACY = 1
+};
+
+#define TOF_ADJ_TYPE_NUM	4	/* number of assisted timestamp adjustment */
+enum tof_adj_mode {
+	TOF_ADJ_SOFTWARE = 0,
+	TOF_ADJ_HARDWARE = 1,
+	TOF_ADJ_SEQ = 2,
+	TOF_ADJ_NONE = 3
+};
+
+#define FRAME_TYPE_NUM		4	/* number of frame type */
+enum frame_type {
+	FRAME_TYPE_CCK	= 0,
+	FRAME_TYPE_OFDM	= 1,
+	FRAME_TYPE_11N	= 2,
+	FRAME_TYPE_11AC	= 3
+};
+
+typedef struct wl_proxd_status_iovar {
+	uint16			method;				/* method */
+	uint8			mode;				/* mode */
+	uint8			peermode;			/* peer mode */
+	uint8			state;				/* state */
+	uint8			reason;				/* reason code */
+	uint32			distance;			/* distance */
+	uint32			txcnt;				/* tx pkt counter */
+	uint32			rxcnt;				/* rx pkt counter */
+	struct ether_addr	peer;				/* peer mac address */
+	int8			avg_rssi;			/* average rssi */
+	int8			hi_rssi;			/* highest rssi */
+	int8			low_rssi;			/* lowest rssi */
+	uint32			dbgstatus;			/* debug status */
+	uint16			frame_type_cnt[FRAME_TYPE_NUM];	/* frame types */
+	uint8			adj_type_cnt[TOF_ADJ_TYPE_NUM];	/* adj types HW/SW */
+} wl_proxd_status_iovar_t;
+
+#ifdef NET_DETECT
+typedef struct net_detect_adapter_features {
+	bool	wowl_enabled;
+	bool	net_detect_enabled;
+	bool	nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+	nd_bss_any = 0,
+	nd_ibss,
+	nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+	wlc_ssid_t		ssid;
+	net_detect_bss_type_t   bss_type;	/* Ignore for now since Phase 1 is only for ESS */
+	uint32			cipher_type;	/* DOT11_CIPHER_ALGORITHM enumeration values */
+	uint32			auth_type;	/* DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+	uint32			num_nd_profiles;
+	net_detect_profile_t	nd_profile[0];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+	bool			    nd_enabled;
+	uint32			    scan_interval;
+	uint32			    wait_period;
+	bool			    wake_if_connected;
+	bool			    wake_if_disconnected;
+	net_detect_profile_list_t   nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+	nd_reason_unknown,
+	nd_net_detected,
+	nd_wowl_event,
+	nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+	net_detect_wake_reason_t    nd_wake_reason;
+	uint32			    nd_wake_date_length;
+	uint8			    nd_wake_data[0];	    /* Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+#endif /* NET_DETECT */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct bcnreq {
+	uint8 bcn_mode;
+	int dur;
+	int channel;
+	struct ether_addr da;
+	uint16 random_int;
+	wlc_ssid_t ssid;
+	uint16 reps;
+} bcnreq_t;
+
+typedef struct rrmreq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	uint16 reps;
+} rrmreq_t;
+
+typedef struct framereq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	struct ether_addr ta;
+	uint16 reps;
+} framereq_t;
+
+typedef struct statreq {
+	struct ether_addr da;
+	struct ether_addr peer;
+	uint16 random_int;
+	uint16 dur;
+	uint8 group_id;
+	uint16 reps;
+} statreq_t;
+
+#define WL_RRM_RPT_VER		0
+#define WL_RRM_RPT_MAX_PAYLOAD	64
+#define WL_RRM_RPT_MIN_PAYLOAD	7
+#define WL_RRM_RPT_FALG_ERR	0
+#define WL_RRM_RPT_FALG_OK	1
+typedef struct {
+	uint16 ver;		/* version */
+	struct ether_addr addr;	/* STA MAC addr */
+	uint32 timestamp;	/* timestamp of the report */
+	uint16 flag;		/* flag */
+	uint16 len;		/* length of payload data */
+	unsigned char data[WL_RRM_RPT_MAX_PAYLOAD];
+} statrpt_t;
+
+typedef struct wlc_l2keepalive_ol_params {
+	uint8	flags;
+	uint8	prio;
+	uint16	period_ms;
+} wlc_l2keepalive_ol_params_t;
+
+typedef struct wlc_dwds_config {
+	uint32		enable;
+	uint32		mode; /* STA/AP interface */
+	struct ether_addr ea;
+} wlc_dwds_config_t;
+
+typedef struct wl_el_set_params_s {
+	uint8 set;	/* Set number */
+	uint32 size;	/* Size to make/expand */
+} wl_el_set_params_t;
+
+typedef struct wl_el_tag_params_s {
+	uint16 tag;
+	uint8 set;
+	uint8 flags;
+} wl_el_tag_params_t;
+
+/* Video Traffic Interference Monitor config */
+#define INTFER_VERSION		1
+typedef struct wl_intfer_params {
+	uint16 version;			/* version */
+	uint8 period;			/* sample period */
+	uint8 cnt;			/* sample cnt */
+	uint8 txfail_thresh;	/* non-TCP txfail threshold */
+	uint8 tcptxfail_thresh;	/* tcptxfail threshold */
+} wl_intfer_params_t;
+
+typedef struct wl_staprio_cfg {
+	struct ether_addr ea;	/* mac addr */
+	uint8 prio;		/* scb priority */
+} wl_staprio_cfg_t;
+
+typedef enum wl_stamon_cfg_cmd_type {
+	STAMON_CFG_CMD_DEL = 0,
+	STAMON_CFG_CMD_ADD = 1
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+	wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */
+	struct ether_addr ea;
+} wlc_stamon_sta_config_t;
+
+#ifdef SR_DEBUG
+typedef struct /* pmu_reg */{
+	uint32  pmu_control;
+	uint32  pmu_capabilities;
+	uint32  pmu_status;
+	uint32  res_state;
+	uint32  res_pending;
+	uint32  pmu_timer1;
+	uint32  min_res_mask;
+	uint32  max_res_mask;
+	uint32  pmu_chipcontrol1[4];
+	uint32  pmu_regcontrol[5];
+	uint32  pmu_pllcontrol[5];
+	uint32  pmu_rsrc_up_down_timer[31];
+	uint32  rsrc_dep_mask[31];
+} pmu_reg_t;
+#endif /* pmu_reg */
+
+typedef struct wl_taf_define {
+	struct ether_addr ea;	/* STA MAC or 0xFF... */
+	uint16 version;         /* version */
+	uint32 sch;             /* method index */
+	uint32 prio;            /* priority */
+	uint32 misc;            /* used for return value */
+	char   text[1];         /* used to pass and return ascii text */
+} wl_taf_define_t;
+
+/* Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN		OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+	uint16	ver;				/* version field */
+	uint16	cur_index;			/* current pointed index in ring buffer */
+	uint32	max_bcnlen;		/* Max beacon length received */
+	uint32	min_bcnlen;		/* Min beacon length received */
+	uint32	ringbuff_len;		/* Length of the ring buffer 'bcnlen_ring' */
+	uint32	bcnlen_ring[1];	/* ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE  0x0 /* The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS   0x1 /* The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS  0x2 /* The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+	bool is_static;
+	uint16 sta_count;
+	uint8 chan_util;
+	uint16 aac;
+} wl_bssload_static_t;
+
+
+/* LTE coex info */
+/* Analogue of HCI Set MWS Signaling cmd */
+typedef struct {
+	uint16	mws_rx_assert_offset;
+	uint16	mws_rx_assert_jitter;
+	uint16	mws_rx_deassert_offset;
+	uint16	mws_rx_deassert_jitter;
+	uint16	mws_tx_assert_offset;
+	uint16	mws_tx_assert_jitter;
+	uint16	mws_tx_deassert_offset;
+	uint16	mws_tx_deassert_jitter;
+	uint16	mws_pattern_assert_offset;
+	uint16	mws_pattern_assert_jitter;
+	uint16	mws_inact_dur_assert_offset;
+	uint16	mws_inact_dur_assert_jitter;
+	uint16	mws_scan_freq_assert_offset;
+	uint16	mws_scan_freq_assert_jitter;
+	uint16	mws_prio_assert_offset_req;
+} wci2_config_t;
+
+/* Analogue of HCI MWS Channel Params */
+typedef struct {
+	uint16	mws_rx_center_freq; /* MHz */
+	uint16	mws_tx_center_freq;
+	uint16	mws_rx_channel_bw;  /* KHz */
+	uint16	mws_tx_channel_bw;
+	uint8	mws_channel_en;
+	uint8	mws_channel_type;   /* Don't care for WLAN? */
+} mws_params_t;
+
+/* MWS wci2 message */
+typedef struct {
+	uint8	mws_wci2_data; /* BT-SIG msg */
+	uint16	mws_wci2_interval; /* Interval in us */
+	uint16	mws_wci2_repeat; /* No of msgs to send */
+} mws_wci2_msg_t;
+
+typedef struct {
+	uint32 config;	/* MODE: AUTO (-1), Disable (0), Enable (1) */
+	uint32 status;	/* Current state: Disabled (0), Enabled (1) */
+} wl_config_t;
+
+#define WLC_RSDB_MODE_AUTO_MASK 0x80
+#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
+
+#define	WL_IF_STATS_T_VERSION 1	/* current version of wl_if_stats structure */
+
+/* per interface counters */
+typedef struct wl_if_stats {
+	uint16	version;		/* version of the structure */
+	uint16	length;			/* length of the entire structure */
+	uint32	PAD;			/* padding */
+
+	/* transmit stat counters */
+	uint64	txframe;		/* tx data frames */
+	uint64	txbyte;			/* tx data bytes */
+	uint64	txerror;		/* tx data errors (derived: sum of others) */
+	uint64  txnobuf;		/* tx out of buffer errors */
+	uint64  txrunt;			/* tx runt frames */
+	uint64  txfail;			/* tx failed frames */
+	uint64	txretry;		/* tx retry frames */
+	uint64	txretrie;		/* tx multiple retry frames */
+	uint64	txfrmsnt;		/* tx sent frames */
+	uint64	txmulti;		/* tx mulitcast sent frames */
+	uint64	txfrag;			/* tx fragments sent */
+
+	/* receive stat counters */
+	uint64	rxframe;		/* rx data frames */
+	uint64	rxbyte;			/* rx data bytes */
+	uint64	rxerror;		/* rx data errors (derived: sum of others) */
+	uint64	rxnobuf;		/* rx out of buffer errors */
+	uint64  rxrunt;			/* rx runt frames */
+	uint64  rxfragerr;		/* rx fragment errors */
+	uint64	rxmulti;		/* rx multicast frames */
+}
+wl_if_stats_t;
+
+typedef struct wl_band {
+	uint16		bandtype;		/* WL_BAND_2G, WL_BAND_5G */
+	uint16		bandunit;		/* bandstate[] index */
+	uint16		phytype;		/* phytype */
+	uint16		phyrev;
+}
+wl_band_t;
+
+#define	WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */
+
+/* wlc interface version */
+typedef struct wl_wlc_version {
+	uint16	version;		/* version of the structure */
+	uint16	length;			/* length of the entire structure */
+
+	/* epi version numbers */
+	uint16	epi_ver_major;		/* epi major version number */
+	uint16	epi_ver_minor;		/* epi minor version number */
+	uint16	epi_rc_num;		/* epi RC number */
+	uint16	epi_incr_num;		/* epi increment number */
+
+	/* wlc interface version numbers */
+	uint16	wlc_ver_major;		/* wlc interface major version number */
+	uint16	wlc_ver_minor;		/* wlc interface minor version number */
+}
+wl_wlc_version_t;
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ * For the discussion related to versions update policy refer to
+ * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer
+ * For now the policy is to increment WLC_VERSION_MAJOR each time
+ * there is a change that involves both WLC layer and per-port layer.
+ * WLC_VERSION_MINOR is currently not in use.
+ */
+#define WLC_VERSION_MAJOR	3
+#define WLC_VERSION_MINOR	0
+
+
+/* require strict packing */
+#include <packed_section_start.h>
+/* Data returned by the bssload_report iovar.
+ * This is also the WLC_E_BSS_LOAD event data.
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
+	uint16 sta_count;		/* station count */
+	uint16 aac;			/* available admission capacity */
+	uint8 chan_util;		/* channel utilization */
+} BWL_POST_PACKED_STRUCT wl_bssload_t;
+
+/* Maximum number of configurable BSS Load levels.  The number of BSS Load
+ * ranges is always 1 more than the number of configured levels.  eg. if
+ * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges:
+ * 0-10, 11-20, 21-30, 31-255.  A WLC_E_BSS_LOAD event is generated each time
+ * the utilization level crosses into another range, subject to the rate limit.
+ */
+#define MAX_BSSLOAD_LEVELS 8
+#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1)
+
+/* BSS Load event notification configuration. */
+typedef struct wl_bssload_cfg {
+	uint32 rate_limit_msec;	/* # of events posted to application will be limited to
+				 * one per specified period (0 to disable rate limit).
+				 */
+	uint8 num_util_levels;	/* Number of entries in util_levels[] below */
+	uint8 util_levels[MAX_BSSLOAD_LEVELS];
+				/* Variable number of BSS Load utilization levels in
+				 * low to high order.  An event will be posted each time
+				 * a received beacon's BSS Load IE channel utilization
+				 * value crosses a level.
+				 */
+} wl_bssload_cfg_t;
+
+/* Multiple roaming profile suport */
+#define WL_MAX_ROAM_PROF_BRACKETS	4
+
+#define WL_MAX_ROAM_PROF_VER	0
+
+#define WL_ROAM_PROF_NONE	(0 << 0)
+#define WL_ROAM_PROF_LAZY	(1 << 0)
+#define WL_ROAM_PROF_NO_CI	(1 << 1)
+#define WL_ROAM_PROF_SUSPEND	(1 << 2)
+#define WL_ROAM_PROF_SYNC_DTIM	(1 << 6)
+#define WL_ROAM_PROF_DEFAULT	(1 << 7)	/* backward compatible single default profile */
+
+typedef struct wl_roam_prof {
+	int8	roam_flags;		/* bit flags */
+	int8	roam_trigger;		/* RSSI trigger level per profile/RSSI bracket */
+	int8	rssi_lower;
+	int8	roam_delta;
+	int8	rssi_boost_thresh;	/* Min RSSI to qualify for RSSI boost */
+	int8	rssi_boost_delta;	/* RSSI boost for AP in the other band */
+	uint16	nfscan;			/* nuber of full scan to start with */
+	uint16	fullscan_period;
+	uint16	init_scan_period;
+	uint16	backoff_multiplier;
+	uint16	max_scan_period;
+} wl_roam_prof_t;
+
+typedef struct wl_roam_prof_band {
+	uint32	band;			/* Must be just one band */
+	uint16	ver;			/* version of this struct */
+	uint16	len;			/* length in bytes of this structure */
+	wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _wlioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 0000000000000000000000000000000000000000..8224ef5c89840f06f5a9fbbf089173b4799a005f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,1680 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: linux_osl.c 490846 2014-07-12 13:08:59Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+#include <asm/cacheflush.h>
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+
+#include <linux/random.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+
+
+#include <linux/fs.h>
+
+#ifdef BCM47XX_ACP_WAR
+#include <linux/spinlock.h>
+extern spinlock_t l2x0_reg_lock;
+#endif
+
+#define PCI_CFG_RETRY		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN	24		/* Mem. filename length */
+#define DUMPBUFSZ 1024
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define DHD_SKB_HDRSIZE		336
+#define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+
+#define STATIC_BUF_MAX_NUM	16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+	struct semaphore static_sem;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define STATIC_PKT_MAX_NUM	8
+#if defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM	1
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* ENHANCED_STATIC_BUF */
+
+typedef struct bcm_static_pkt {
+	struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
+	struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+#ifdef ENHANCED_STATIC_BUF
+	struct sk_buff *skb_16k;
+#endif
+	struct semaphore osl_pkt_sem;
+	unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	void 	*osh;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+	atomic_t malloced;
+	atomic_t pktalloced;    /* Number of allocated packet buffers */
+	spinlock_t dbgmem_lock;
+	bcm_mem_link_t *dbgmem_list;
+	spinlock_t pktalloc_lock;
+	atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+#ifdef CTFPOOL
+	ctfpool_t *ctfpool;
+#endif /* CTFPOOL */
+	uint magic;
+	void *pdev;
+	uint failed;
+	uint bustype;
+	osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+	void *bus_handle;
+#ifdef BCMDBG_CTRACE
+	spinlock_t ctrace_lock;
+	struct list_head ctrace_list;
+	int ctrace_num;
+#endif /* BCMDBG_CTRACE */
+	uint32  flags;		/* If specific cases to be handled in the OSL */
+};
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+/* PCMCIA attribute space access macros */
+
+/* Global ASSERT type flag */
+uint32 g_assert_type = 0;
+module_param(g_assert_type, int, 0);
+
+static int16 linuxbcmerrormap[] =
+{	0, 			/* 0 */
+	-EINVAL,		/* BCME_ERROR */
+	-EINVAL,		/* BCME_BADARG */
+	-EINVAL,		/* BCME_BADOPTION */
+	-EINVAL,		/* BCME_NOTUP */
+	-EINVAL,		/* BCME_NOTDOWN */
+	-EINVAL,		/* BCME_NOTAP */
+	-EINVAL,		/* BCME_NOTSTA */
+	-EINVAL,		/* BCME_BADKEYIDX */
+	-EINVAL,		/* BCME_RADIOOFF */
+	-EINVAL,		/* BCME_NOTBANDLOCKED */
+	-EINVAL, 		/* BCME_NOCLK */
+	-EINVAL, 		/* BCME_BADRATESET */
+	-EINVAL, 		/* BCME_BADBAND */
+	-E2BIG,			/* BCME_BUFTOOSHORT */
+	-E2BIG,			/* BCME_BUFTOOLONG */
+	-EBUSY, 		/* BCME_BUSY */
+	-EINVAL, 		/* BCME_NOTASSOCIATED */
+	-EINVAL, 		/* BCME_BADSSIDLEN */
+	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
+	-EINVAL, 		/* BCME_BADCHAN */
+	-EFAULT, 		/* BCME_BADADDR */
+	-ENOMEM, 		/* BCME_NORESOURCE */
+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
+	-EMSGSIZE,		/* BCME_BADLENGTH */
+	-EINVAL,		/* BCME_NOTREADY */
+	-EPERM,			/* BCME_EPERM */
+	-ENOMEM, 		/* BCME_NOMEM */
+	-EINVAL, 		/* BCME_ASSOCIATED */
+	-ERANGE, 		/* BCME_RANGE */
+	-EINVAL, 		/* BCME_NOTFOUND */
+	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
+	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
+	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
+	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
+	-EIO,			/* BCME_SDIO_ERROR */
+	-ENODEV,		/* BCME_DONGLE_DOWN */
+	-EINVAL,		/* BCME_VERSION */
+	-EIO,			/* BCME_TXFAIL */
+	-EIO,			/* BCME_RXFAIL */
+	-ENODEV,		/* BCME_NODEVICE */
+	-EINVAL,		/* BCME_NMODE_DISABLED */
+	-ENODATA,		/* BCME_NONRESIDENT */
+	-EINVAL,		/* BCME_SCANREJECT */
+	-EINVAL,		/* BCME_USAGE_ERROR */
+	-EIO,     		/* BCME_IOCTL_ERROR */
+	-EIO,			/* BCME_SERIAL_PORT_ERR */
+	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
+	-EIO,			/* BCME_DECERR */
+	-EIO,			/* BCME_ENCERR */
+	-EIO,			/* BCME_MICERR */
+	-ERANGE,		/* BCME_REPLAY */
+	-EINVAL,		/* BCME_IE_NOTFOUND */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -52
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif
+};
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	/* Array bounds covered by ASSERT in osl_attach */
+	return linuxbcmerrormap[-bcmerror];
+}
+#ifdef SHARED_OSL_CMN
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+{
+#else
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+	void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
+	osl_t *osh;
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if (!(osh = kmalloc(sizeof(osl_t), flags)))
+		return osh;
+
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	if (osl_cmn == NULL || *osl_cmn == NULL) {
+		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+			kfree(osh);
+			return NULL;
+		}
+		bzero(osh->cmn, sizeof(osl_cmn_t));
+		if (osl_cmn)
+			*osl_cmn = osh->cmn;
+		atomic_set(&osh->cmn->malloced, 0);
+		osh->cmn->dbgmem_list = NULL;
+		spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+		spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+	} else {
+		osh->cmn = *osl_cmn;
+	}
+	atomic_add(1, &osh->cmn->refcount);
+
+	/* Check that error map has the right number of entries in it */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->failed = 0;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+	osh->magic = OS_HANDLE_MAGIC;
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+		case RPC_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+#ifdef BCMDBG_CTRACE
+	spin_lock_init(&osh->ctrace_lock);
+	INIT_LIST_HEAD(&osh->ctrace_list);
+	osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
+
+
+	return osh;
+}
+
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+		if (!bcm_static_buf && adapter) {
+			if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+				3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+				printf("can not alloc static buf!\n");
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				kfree(osh);
+				return -ENOMEM;
+			}
+			else
+				printf("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+			sema_init(&bcm_static_buf->static_sem, 1);
+
+			bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+		}
+
+#ifdef BCMSDIO
+		if (!bcm_static_skb && adapter) {
+			int i;
+			void *skb_buff_ptr = 0;
+			bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+			skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+			if (!skb_buff_ptr) {
+				printf("cannot alloc static buf!\n");
+				bcm_static_buf = NULL;
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				kfree(osh);
+				return -ENOMEM;
+			}
+
+			bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+				(STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
+			for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
+				bcm_static_skb->pkt_use[i] = 0;
+
+			sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+		}
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	return 0;
+}
+
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+	osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+	return osh->bus_handle;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	atomic_sub(1, &osh->cmn->refcount);
+	if (atomic_read(&osh->cmn->refcount) == 0) {
+			kfree(osh->cmn);
+	}
+	kfree(osh);
+}
+
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+#ifdef BCMSDIO
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	return 0;
+}
+
+static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
+{
+	struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
+	flags |= GFP_ATOMIC;
+#endif
+	skb = __dev_alloc_skb(len, flags);
+#else
+	skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+	return skb;
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+	/* No need to allocate more objects */
+	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Allocate a new skb and add it to the ctfpool */
+	skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
+	if (skb == NULL) {
+		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+		       osh->ctfpool->obj_size);
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Add to ctfpool */
+	skb->next = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = skb;
+	osh->ctfpool->fast_frees++;
+	osh->ctfpool->curr_obj++;
+
+	/* Hijack a skb member to store ptr to ctfpool */
+	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+	/* Use bit flag to indicate skb from fast ctfpool */
+	PKTFAST(osh, skb) = FASTBUF;
+
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	return skb;
+}
+
+/*
+ * Add new objects to the pool.
+ */
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	/* Do nothing if no refills are required */
+	while ((osh->ctfpool->refills > 0) && (thresh--)) {
+		osl_ctfpool_add(osh);
+		osh->ctfpool->refills--;
+	}
+}
+
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
+	ASSERT(osh->ctfpool);
+
+	osh->ctfpool->max_obj = numobj;
+	osh->ctfpool->obj_size = size;
+
+	spin_lock_init(&osh->ctfpool->lock);
+
+	while (numobj--) {
+		if (!osl_ctfpool_add(osh))
+			return -1;
+		osh->ctfpool->fast_frees--;
+	}
+
+	return 0;
+}
+
+/*
+ * Cleanup the packet pool objects.
+ */
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+	struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+
+	skb = osh->ctfpool->head;
+
+	while (skb != NULL) {
+		nskb = skb->next;
+		dev_kfree_skb(skb);
+		skb = nskb;
+		osh->ctfpool->curr_obj--;
+	}
+
+	ASSERT(osh->ctfpool->curr_obj == 0);
+	osh->ctfpool->head = NULL;
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	kfree(osh->ctfpool);
+	osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+	struct bcmstrbuf *bb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+#ifdef BCMSDIO
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	bb = b;
+
+	ASSERT((osh != NULL) && (bb != NULL));
+
+	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
+	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+	            osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	/* Try to do fast allocate. Return null if ctfpool is not in use
+	 * or if there are no items in the ctfpool.
+	 */
+	if (osh->ctfpool == NULL)
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	if (osh->ctfpool->head == NULL) {
+		ASSERT(osh->ctfpool->curr_obj == 0);
+		osh->ctfpool->slow_allocs++;
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	if (len > osh->ctfpool->obj_size) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	ASSERT(len <= osh->ctfpool->obj_size);
+
+	/* Get an object from ctfpool */
+	skb = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = (void *)skb->next;
+
+	osh->ctfpool->fast_allocs++;
+	osh->ctfpool->curr_obj--;
+	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	/* Init skb struct */
+	skb->next = skb->prev = NULL;
+#if defined(__ARM_ARCH_7A__)
+	skb->data = skb->head + NET_SKB_PAD;
+	skb->tail = skb->head + NET_SKB_PAD;
+#else
+	skb->data = skb->head + 16;
+	skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+	skb->len = 0;
+	skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+	skb->list = NULL;
+#endif
+	atomic_set(&skb->users, 1);
+
+	PKTSETCLINK(skb, NULL);
+	PKTCCLRATTR(skb);
+	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
+
+	return skb;
+}
+#endif /* CTFPOOL */
+
+#if defined(BCM_GMAC3)
+/* Account for a packet delivered to downstream forwarder.
+ * Decrement a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+	atomic_sub(skb_cnt, &osh->cmn->pktalloced);
+}
+
+/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
+ * Increment a GMAC forwarder interface's pktalloced count.
+ */
+#ifdef BCMDBG_CTRACE
+void BCMFASTPATH
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
+#else
+void BCMFASTPATH
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
+#endif /* BCMDBG_CTRACE */
+{
+#if defined(BCMDBG_CTRACE)
+	int i;
+	struct sk_buff *skb;
+#endif 
+
+#if defined(BCMDBG_CTRACE)
+	if (skb_cnt > 1) {
+		struct sk_buff **skb_array = (struct sk_buff **)skbs;
+		for (i = 0; i < skb_cnt; i++) {
+			skb = skb_array[i];
+#if defined(BCMDBG_CTRACE)
+			ASSERT(!PKTISCHAINED(skb));
+			ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+		}
+	} else {
+		skb = (struct sk_buff *)skbs;
+#if defined(BCMDBG_CTRACE)
+		ASSERT(!PKTISCHAINED(skb));
+		ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+	}
+#endif 
+
+	atomic_add(skb_cnt, &osh->cmn->pktalloced);
+}
+
+#endif /* BCM_GMAC3 */
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+	struct sk_buff *nskb1, *nskb2;
+#endif
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Decrement the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+			if (PKTISCHAINED(nskb1)) {
+				nskb2 = PKTCLINK(nskb1);
+			}
+			else
+				nskb2 = NULL;
+
+			DEL_CTRACE(osh, nskb1);
+		}
+#endif /* BCMDBG_CTRACE */
+	}
+	return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+#ifdef BCMDBG_CTRACE
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
+#else
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+	struct sk_buff *nskb1, *nskb2;
+#endif
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Increment the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+			if (PKTISCHAINED(nskb1)) {
+				nskb2 = PKTCLINK(nskb1);
+			}
+			else
+				nskb2 = NULL;
+
+			ADD_CTRACE(osh, nskb1, file, line);
+		}
+#endif /* BCMDBG_CTRACE */
+	}
+	return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+#ifdef BCMDBG_CTRACE
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len, int line, char *file)
+#else
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *skb;
+
+#ifdef CTFPOOL
+	/* Allocate from local pool */
+	skb = osl_pktfastget(osh, len);
+	if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
+#else /* CTFPOOL */
+	if ((skb = osl_alloc_skb(osh, len))) {
+#endif /* CTFPOOL */
+		skb->tail += len;
+		skb->len  += len;
+		skb->priority = 0;
+
+#ifdef BCMDBG_CTRACE
+		ADD_CTRACE(osh, skb, file, line);
+#endif
+		atomic_inc(&osh->cmn->pktalloced);
+	}
+
+	return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+	ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+	skb->tstamp.tv.sec = 0;
+#else
+	skb->stamp.tv_sec = 0;
+#endif
+
+	/* We only need to init the fields that we change */
+	skb->dev = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	skb->dst = NULL;
+#endif
+	OSL_PKTTAG_CLEAR(skb);
+	skb->ip_summed = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	skb_orphan(skb);
+#else
+	skb->destructor = NULL;
+#endif
+
+	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+	ASSERT(ctfpool != NULL);
+
+	/* Add object to the ctfpool */
+	CTFPOOL_LOCK(ctfpool, flags);
+	skb->next = (struct sk_buff *)ctfpool->head;
+	ctfpool->head = (void *)skb;
+
+	ctfpool->fast_frees++;
+	ctfpool->curr_obj++;
+
+	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+	CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif /* CTFPOOL */
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+	struct sk_buff *skb, *nskb;
+	if (osh == NULL)
+		return;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+	/* perversion: we use skb->next to chain multi-skb packets */
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+#ifdef BCMDBG_CTRACE
+		DEL_CTRACE(osh, skb);
+#endif
+
+
+#ifdef CTFPOOL
+		if (PKTISFAST(osh, skb)) {
+			if (atomic_read(&skb->users) == 1)
+				smp_rmb();
+			else if (!atomic_dec_and_test(&skb->users))
+				goto next_skb;
+			osl_pktfastfree(osh, skb);
+		} else
+#endif
+		{
+			dev_kfree_skb_any(skb);
+		}
+#ifdef CTFPOOL
+next_skb:
+#endif
+		atomic_dec(&osh->cmn->pktalloced);
+		skb = nskb;
+	}
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+
+	if (len > DHD_SKB_MAX_BUFSIZE) {
+		printf("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+		return osl_pktget(osh, len);
+	}
+
+	down(&bcm_static_skb->osl_pkt_sem);
+
+	if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+			if (bcm_static_skb->pkt_use[i] == 0)
+				break;
+		}
+
+		if (i != STATIC_PKT_MAX_NUM) {
+			bcm_static_skb->pkt_use[i] = 1;
+
+			skb = bcm_static_skb->skb_4k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+			if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM]
+				== 0)
+				break;
+		}
+
+		if (i != STATIC_PKT_MAX_NUM) {
+			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
+			skb = bcm_static_skb->skb_8k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+#if defined(ENHANCED_STATIC_BUF)
+	if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
+
+		skb = bcm_static_skb->skb_16k;
+		skb->tail = skb->data + len;
+		skb->len = len;
+
+		up(&bcm_static_skb->osl_pkt_sem);
+		return skb;
+	}
+#endif
+
+	up(&bcm_static_skb->osl_pkt_sem);
+	printf("%s: all static pkt in use!\n", __FUNCTION__);
+	return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+	if (!bcm_static_skb) {
+		osl_pktfree(osh, p, send);
+		return;
+	}
+
+	down(&bcm_static_skb->osl_pkt_sem);
+	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+		if (p == bcm_static_skb->skb_4k[i]) {
+			bcm_static_skb->pkt_use[i] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+
+	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+		if (p == bcm_static_skb->skb_8k[i]) {
+			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+#ifdef ENHANCED_STATIC_BUF
+	if (p == bcm_static_skb->skb_16k) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0;
+		up(&bcm_static_skb->osl_pkt_sem);
+		return;
+	}
+#endif
+	up(&bcm_static_skb->osl_pkt_sem);
+	osl_pktfree(osh, p, send);
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+#else
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+#endif
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
+#else
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+#endif
+}
+
+/* return domain # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_domain(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return the pci device pointed by osh->pdev */
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+	gfp_t flags;
+
+	/* only ASSERT if osh is defined */
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		int i = 0;
+		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+		{
+			down(&bcm_static_buf->static_sem);
+
+			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+			{
+				if (bcm_static_buf->buf_use[i] == 0)
+					break;
+			}
+
+			if (i == STATIC_BUF_MAX_NUM)
+			{
+				up(&bcm_static_buf->static_sem);
+				printf("all static buff in use!\n");
+				goto original;
+			}
+
+			bcm_static_buf->buf_use[i] = 1;
+			up(&bcm_static_buf->static_sem);
+
+			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+			if (osh)
+				atomic_add(size, &osh->cmn->malloced);
+
+			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+		}
+	}
+original:
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if ((addr = kmalloc(size, flags)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh && osh->cmn)
+		atomic_add(size, &osh->cmn->malloced);
+
+	return (addr);
+}
+
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+	void *ptr;
+
+	ptr = osl_malloc(osh, size);
+
+	if (ptr != NULL) {
+		bzero(ptr, size);
+	}
+
+	return ptr;
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+		{
+			int buf_idx = 0;
+
+			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+			down(&bcm_static_buf->static_sem);
+			bcm_static_buf->buf_use[buf_idx] = 0;
+			up(&bcm_static_buf->static_sem);
+
+			if (osh && osh->cmn) {
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				atomic_sub(size, &osh->cmn->malloced);
+			}
+			return;
+		}
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	if (osh && osh->cmn) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+		ASSERT(size <= osl_malloced(osh));
+
+		atomic_sub(size, &osh->cmn->malloced);
+	}
+	kfree(addr);
+}
+
+uint
+osl_check_memleak(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->malloced));
+	else
+		return 0;
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (atomic_read(&osh->cmn->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+	return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
+{
+	void *va;
+	uint16 align = (1 << align_bits);
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+		size += align;
+	*alloced = size;
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
+	if (va)
+		*pap = (ulong)__virt_to_phys((ulong)va);
+#else
+	{
+		dma_addr_t pap_lin;
+		va = pci_alloc_consistent(osh->pdev, size, &pap_lin);
+		*pap = (dmaaddr_t)pap_lin;
+	}
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+	return va;
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+	kfree(va);
+#else
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+}
+
+dmaaddr_t BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
+{
+	int dir;
+#ifdef BCM47XX_ACP_WAR
+	uint pa;
+#endif
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
+	if (dmah != NULL) {
+		int32 nsegs, i, totsegs = 0, totlen = 0;
+		struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
+#ifdef BCM47XX_ACP_WAR
+		struct scatterlist *s;
+#endif
+		struct sk_buff *skb;
+		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
+			sg = &_sg[totsegs];
+			if (skb_is_nonlinear(skb)) {
+				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
+				ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
+#ifdef BCM47XX_ACP_WAR
+				for_each_sg(sg, s, nsegs, i) {
+					if (sg_phys(s) >= ACP_WIN_LIMIT) {
+						dma_map_page(&((struct pci_dev *)osh->pdev)->dev,
+							sg_page(s), s->offset, s->length, dir);
+					}
+				}
+#else
+				pci_map_sg(osh->pdev, sg, nsegs, dir);
+#endif
+			} else {
+				nsegs = 1;
+				ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
+				sg->page_link = 0;
+				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
+#ifdef BCM47XX_ACP_WAR
+				if (virt_to_phys(PKTDATA(osh, skb)) >= ACP_WIN_LIMIT)
+#endif
+				pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
+			}
+			totsegs += nsegs;
+			totlen += PKTLEN(osh, skb);
+		}
+		dmah->nsegs = totsegs;
+		dmah->origsize = totlen;
+		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
+			dmah->segs[i].addr = sg_phys(sg);
+			dmah->segs[i].length = sg->length;
+		}
+		return dmah->segs[0].addr;
+	}
+#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
+
+#ifdef BCM47XX_ACP_WAR
+	pa = virt_to_phys(va);
+	if (pa < ACP_WIN_LIMIT)
+		return (pa);
+#endif
+	return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+#ifdef BCM47XX_ACP_WAR
+	if (pa < ACP_WIN_LIMIT)
+		return;
+#endif
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+#ifdef BCM47XX_ACP_WAR
+	if (virt_to_phys(va) < ACP_WIN_LIMIT)
+		return;
+#endif
+	if (size > 0)
+		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+#ifdef BCM47XX_ACP_WAR
+	if (virt_to_phys(va) < ACP_WIN_LIMIT)
+		return;
+#endif
+	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
+}
+
+inline void osl_prefetch(const void *ptr)
+{
+	/* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */
+	__asm__ __volatile__(
+		"pld\t%0"
+		:
+		: "o" (*(char *)ptr)
+		: "cc");
+}
+
+int osl_arch_is_coherent(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	return 0;
+#else
+	return arch_is_coherent();
+#endif
+}
+#endif 
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+	char tempbuf[256];
+	const char *basename;
+
+	basename = strrchr(file, '/');
+	/* skip the '/' */
+	if (basename)
+		basename++;
+
+	if (!basename)
+		basename = file;
+
+#ifdef BCMASSERT_LOG
+	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+		exp, basename, line);
+	printf("%s", tempbuf);
+#endif /* BCMASSERT_LOG */
+
+
+}
+#endif 
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+void
+osl_sleep(uint ms)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if (ms < 20)
+		usleep_range(ms*1000, ms*1000 + 1000);
+	else
+#endif
+	msleep(ms);
+}
+
+
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+#ifdef BCMDBG_CTRACE
+void *
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else
+void *
+osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCMDBG_CTRACE */
+{
+	void * p;
+
+	ASSERT(!PKTISCHAINED(skb));
+
+	/* clear the CTFBUF flag if set and map the rest of the buffer
+	 * before cloning.
+	 */
+	PKTCTFMAP(osh, skb);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#else
+	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#endif
+		return NULL;
+
+#ifdef CTFPOOL
+	if (PKTISFAST(osh, skb)) {
+		ctfpool_t *ctfpool;
+
+		/* if the buffer allocated from ctfpool is cloned then
+		 * we can't be sure when it will be freed. since there
+		 * is a chance that we will be losing a buffer
+		 * from our pool, we increment the refill count for the
+		 * object to be alloced later.
+		 */
+		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+		ASSERT(ctfpool != NULL);
+		PKTCLRFAST(osh, p);
+		PKTCLRFAST(osh, skb);
+		ctfpool->refills++;
+	}
+#endif /* CTFPOOL */
+
+	/* Clear PKTC  context */
+	PKTSETCLINK(p, NULL);
+	PKTCCLRFLAGS(p);
+	PKTCSETCNT(p, 1);
+	PKTCSETLEN(p, PKTLEN(osh, skb));
+
+	/* skb_clone copies skb->cb.. we don't want that */
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(p);
+
+	/* Increment the packet counter */
+	atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCMDBG_CTRACE
+	ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
+#endif
+	return (p);
+}
+
+#ifdef BCMDBG_CTRACE
+int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ck = FALSE;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (pkt == skb) {
+			ck = TRUE;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+	return ck;
+}
+
+void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int idx = 0;
+	int i, j;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	if (b != NULL)
+		bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
+	else
+		printf(" Total %d sbk not free\n", osh->ctrace_num);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (b != NULL)
+			bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
+		else
+			printf("[%d] skb %p:\n", ++idx, skb);
+
+		for (i = 0; i < skb->ctrace_count; i++) {
+			j = (skb->ctrace_start + i) % CTRACE_NUM;
+			if (b != NULL)
+				bcm_bprintf(b, "    [%s(%d)]\n", skb->func[j], skb->line[j]);
+			else
+				printf("    [%s(%d)]\n", skb->func[j], skb->line[j]);
+		}
+		if (b != NULL)
+			bcm_bprintf(b, "\n");
+		else
+			printf("\n");
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+
+	return;
+}
+#endif /* BCMDBG_CTRACE */
+
+
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->pktalloced));
+	else
+		return 0;
+}
+
+uint32
+osl_rand(void)
+{
+	uint32 rand;
+
+	get_random_bytes(&rand, sizeof(rand));
+
+	return rand;
+}
+
+/* Linux Kernel: File Operations: start */
+void *
+osl_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+int
+osl_os_image_size(void *image)
+{
+	int len = 0, curroffset;
+
+	if (image) {
+		/* store the current offset */
+		curroffset = generic_file_llseek(image, 0, 1);
+		/* goto end of file to get length */
+		len = generic_file_llseek(image, 0, 2);
+		/* restore back the offset */
+		generic_file_llseek(image, curroffset, 0);
+	}
+	return len;
+}
+
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCM47XX_ACP_WAR
+inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
+{
+	uint32 flags;
+	int pci_access = 0;
+
+	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
+		pci_access = 1;
+
+	if (pci_access)
+		spin_lock_irqsave(&l2x0_reg_lock, flags);
+	switch (size) {
+	case sizeof(uint8):
+		*(uint8*)v = readb((volatile uint8*)(addr));
+		break;
+	case sizeof(uint16):
+		*(uint16*)v = readw((volatile uint16*)(addr));
+		break;
+	case sizeof(uint32):
+		*(uint32*)v = readl((volatile uint32*)(addr));
+		break;
+	case sizeof(uint64):
+		*(uint64*)v = *((volatile uint64*)(addr));
+		break;
+	}
+	if (pci_access)
+		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
+}
+#endif /* BCM47XX_ACP_WAR */
+
+/* APIs to set/get specific quirks in OSL layer */
+void
+osl_flag_set(osl_t *osh, uint32 mask)
+{
+	osh->flags |= mask;
+}
+
+bool
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+	return (osh->flags & mask);
+}
diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c
new file mode 100644
index 0000000000000000000000000000000000000000..508eccb2bcce3d5f780143169dc27cf3cdba2087
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/pcie_core.c
@@ -0,0 +1,70 @@
+/** @file pcie_core.c
+ *
+ * Contains PCIe related functions that are shared between different driver models (e.g. firmware
+ * builds, DHD builds, BMAC builds), in order to avoid code duplication.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: pcie_core.c 444841 2013-12-21 04:32:29Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+
+#include "pcie_core.h"
+
+/* local prototypes */
+
+/* local variables */
+
+/* function definitions */
+
+#ifdef BCMDRIVER
+
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+	uint32 val, i, lsc;
+	uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
+		PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
+		PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA,
+		PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
+		PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
+		PCIECFGREG_REG_BAR3_CONFIG};
+	sbpcieregs_t *pcie = NULL;
+	uint32 origidx = si_coreidx(sih);
+
+	/* Switch to PCIE2 core */
+	pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+	BCM_REFERENCE(pcie);
+	ASSERT(pcie != NULL);
+
+	/* Disable/restore ASPM Control to protect the watchdog reset */
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	lsc = R_REG(osh, &sbpcieregs->configdata);
+	val = lsc & (~PCIE_ASPM_ENAB);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+	OSL_DELAY(100000);
+
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	W_REG(osh, &sbpcieregs->configdata, lsc);
+
+	/* Write configuration registers back to the shadow registers
+	 * cause shadow registers are cleared out after watchdog reset.
+	 */
+	for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+		W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+		val = R_REG(osh, &sbpcieregs->configdata);
+		W_REG(osh, &sbpcieregs->configdata, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 0000000000000000000000000000000000000000..4894d9aaccceb8b24fb28b2ad947fbd5f715e153
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,1087 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	void *corereg;
+	sbconfig_t *sb;
+	uint origidx, intflag, intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+	corereg = si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(corereg != NULL);
+	sb = REGS2SB(corereg);
+	intflag = R_SBREG(sii, &sb->sbflagst);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+
+	return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == cores_info->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+#endif
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+		    (coreidx == SI_CC_IDX) &&
+		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+			w = val;
+		} else
+			w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
+			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			cores_info->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		cores_info->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (cores_info->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6))) {
+				ASSERT(cc);
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			} else {
+				/* Older chips */
+				uint chip = CHIPID(sii->pub.chip);
+
+				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
+					numcores = 6;
+				else if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (cores_info->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+	uint32 origsba;
+	sbconfig_t *sb;
+	si_info_t *sii = SI_INFO(sih);
+
+	sb = REGS2SB(sii->curmap);
+
+	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 sbaddr = cores_info->coresba[coreidx];
+	void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+#endif	/* BCMSDIO */
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+		ASSERT(ccregs != NULL);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		BCM_REFERENCE(dummy);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+#ifdef BCMSDIO
+		case SDIO_BUS:
+#endif
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting sbconfig registers */
+void
+sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	sbconfig_t *sb;
+	uint origidx, i, intr_val = 0;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	origidx = sii->curidx;
+
+	INTR_OFF(sii, intr_val);
+
+	for (i = 0; i < sii->numcores; i++) {
+		sb = REGS2SB(sb_setcoreidx(sih, i));
+
+		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+
+		if (sii->pub.socirev > SONICS_2_2)
+			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
+
+		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
+		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
+		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
+		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
+		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
+	}
+
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 0000000000000000000000000000000000000000..c01ab441f598c32ac5cc0c8d34ad4b0215db3759
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,3004 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: siutils.c 481602 2014-05-29 22:43:34Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
+#include <hndpmu.h>
+
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+#ifdef HNDGCI
+#include <hndgci.h>
+#endif /* HNDGCI */
+
+#include "siutils_priv.h"
+
+/**
+ * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
+ * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
+ * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
+ */
+#define PMUREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
+
+#define CHIPCREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs);
+
+
+static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+#ifdef BCMLTECOEX
+static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio,
+	uint8 gpioctl_mask, uint8 gpioctl_val);
+#endif /* BCMLTECOEX */
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+#ifdef SR_DEBUG
+static const uint32 si_power_island_test_array[] = {
+	0x0000, 0x0001, 0x0010, 0x0011,
+	0x0100, 0x0101, 0x0110, 0x0111,
+	0x1000, 0x1001, 0x1010, 0x1011,
+	0x1100, 0x1101, 0x1110, 0x1111
+};
+#endif /* SR_DEBUG */
+
+int do_4360_pcie2_war = 0;
+
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL, making dereferencing of this parameter undesired.
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+	si_cores_info_t *cores_info;
+	/* alloc si_info_t */
+	if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	/* alloc si_cores_info_t */
+	if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->cores_info = cores_info;
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		MFREE(osh, sii, sizeof(si_info_t));
+		MFREE(osh, cores_info, sizeof(si_cores_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+
+static uint32	wd_msticks;		/* watchdog timer ticks normalized to ms */
+
+/** generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+	si_cores_info_t *cores_info;
+
+	if (!ksii_attached) {
+		void *regs = NULL;
+		regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+		cores_info = (si_cores_info_t *)&ksii_cores_info;
+		ksii.cores_info = cores_info;
+
+		ASSERT(osh);
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &(ksii.vars) : NULL,
+		                osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+				/* based on 32KHz ILP clock */
+				wd_msticks = 32;
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        ksii.pub.ccrev, wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+#if defined(BCMSDIO)
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	bool pci, pcie, pcie_gen2 = FALSE;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (sii->pub.ccrev >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+	/* get chipcommon extended capabilities */
+
+	if (sii->pub.ccrev >= 35)
+		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		if (AOB_ENAB(&sii->pub)) {
+			uint pmucoreidx;
+			pmuregs_t *pmu;
+			pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+			pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+			sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
+			si_setcoreidx(&sii->pub, SI_CC_IDX);
+		} else
+			sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = (uint)NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = (uint)NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+		        i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
+
+		if (BUSTYPE(bustype) == SI_BUS) {
+			/* now look at the chipstatus register to figure the pacakge */
+			/* for SDIO but downloaded on PCIE dev */
+			if (cid == PCIE2_CORE_ID) {
+				if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) ||
+					((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) &&
+					CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
+					pcieidx = i;
+					pcierev = crev;
+					pcie = TRUE;
+					pcie_gen2 = TRUE;
+				}
+			}
+
+		}
+		else if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+				if (cid == PCIE2_CORE_ID)
+					pcie_gen2 = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#ifdef BCMSDIO
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#endif /* BCMSDIO */
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == cores_info->coresba[i])) ||
+		    (regs == cores_info->regs[i]))
+			*origidx = i;
+	}
+
+#if defined(PCIE_FULL_DONGLE)
+	pci = FALSE;
+#endif
+	if (pci) {
+		sii->pub.buscoretype = PCI_CORE_ID;
+		sii->pub.buscorerev = pcirev;
+		sii->pub.buscoreidx = pciidx;
+	} else if (pcie) {
+		if (pcie_gen2)
+			sii->pub.buscoretype = PCIE2_CORE_ID;
+		else
+			sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+
+	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+	         sii->pub.buscorerev));
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+	    (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+		OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+#if defined(BCMSDIO)
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+
+uint16
+si_chipid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	return (sii->chipnew) ? sii->chipnew : sih->chip;
+}
+
+static void
+si_chipid_fixup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	ASSERT(sii->chipnew == 0);
+	switch (sih->chip) {
+		case BCM43570_CHIP_ID:
+		case BCM4358_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM43569_CHIP_ID; /* chip class */
+		break;
+		case BCM4356_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
+		break;
+		default:
+		ASSERT(0);
+		break;
+	}
+}
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL.
+ */
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+#if !defined(_CFEZ_) || defined(CFG_WL)
+#endif 
+
+	ASSERT(GOODREGS(regs));
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+
+
+	/* check to see if we are a si core mimic'ing a pci core */
+	if ((bustype == PCI_BUS) &&
+	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+		SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI "
+		          "devid:0x%x\n", __FUNCTION__, devid));
+		bustype = SI_BUS;
+	}
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		if (!regs)
+			return NULL;
+		cc = (chipcregs_t *)regs;
+#ifdef BCMSDIO
+	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+#endif
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	*   We assume we can read chipid at offset 0 from the regs arg.
+	*   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	*   some way of recognizing them needs to be added here.
+	*/
+	if (!cc) {
+		SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+		return NULL;
+	}
+	w = R_REG(osh, &cc->chipid);
+	if ((w & 0xfffff) == 148277) w -= 65532;
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+	if ((sih->chip == BCM4358_CHIP_ID) ||
+		(sih->chip == BCM43570_CHIP_ID) ||
+		(sih->chip == BCM4358_CHIP_ID)) {
+		si_chipid_fixup(sih);
+	}
+
+	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+		(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+		sih->chippkg = BCM4329_182PIN_PKG_ID;
+	}
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
+		(CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) {
+		if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+			SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		else
+			SI_MSG(("Found chip type NAI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+		ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+		SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+		/* pass chipc address instead of original core base */
+		ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		goto exit;
+	}
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+	if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+		>> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+		CST4322_SPROM_PRESENT))) {
+		SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+		return NULL;
+	}
+
+	/* assume current core is CC */
+	if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+	                                 (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+			uint clkdiv;
+			clkdiv = R_REG(osh, &cc->clkdiv);
+			/* otp_clk_div is even number, 120/14 < 9mhz */
+			clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+			W_REG(osh, &cc->clkdiv, clkdiv);
+			SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+		}
+		OSL_DELAY(10);
+	}
+
+	if (bustype == PCI_BUS) {
+
+	}
+#endif 
+#ifdef BCM_SDRBL
+	/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+	 * not turned on, then we want to hold arm in reset.
+	 * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+	 * turned on.
+	 */
+	if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+		uint32 sflags = si_arm_sflags(&(sii->pub));
+
+		/* If SDR is enabled but protection is not turned on
+		* then we want to force arm to WFI.
+		*/
+		if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+			disable_arm_irq();
+			while (1) {
+				hnd_cpu_wait(sih);
+			}
+		}
+	}
+#endif /* BCM_SDRBL */
+
+	pvars = NULL;
+	BCM_REFERENCE(pvars);
+
+
+
+		if (sii->pub.ccrev >= 20) {
+			uint32 gpiopullup = 0, gpiopulldown = 0;
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			ASSERT(cc != NULL);
+
+			/* 4314/43142 has pin muxing, don't clear gpio bits */
+			if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+				(CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+				gpiopullup |= 0x402e0;
+				gpiopulldown |= 0x20500;
+			}
+
+			W_REG(osh, &cc->gpiopullup, gpiopullup);
+			W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+			si_setcoreidx(sih, origidx);
+		}
+
+
+	/* clear any previous epidiag-induced target abort */
+	ASSERT(!si_taclear(sih, FALSE));
+
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+	/* Enable console prints */
+	si_muxenab(sii, 3);
+#endif
+
+	return (sii);
+
+exit:
+
+	return NULL;
+}
+
+/** may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (cores_info->regs[idx]) {
+				REG_UNMAP(cores_info->regs[idx]);
+				cores_info->regs[idx] = NULL;
+			}
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (cores_info != &ksii_cores_info)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, cores_info, sizeof(si_cores_info_t));
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/** register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = cores_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+	sii->intrsrestore_fn = NULL;
+	sii->intrsenabled_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_intflag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag_alt(si_t *sih)
+{
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag_alt(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	return cores_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit)
+{
+	return si_setcore(sih,  D11_CORE_ID, coreunit);
+}
+
+/** return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (cores_info->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found;
+	uint i;
+
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (cores_info->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+si_numcoreunits(si_t *sih, uint coreid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found = 0;
+	uint i;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == coreid) {
+			found++;
+		}
+	}
+
+	return found;
+}
+
+/** return total D11 coreunits */
+uint
+BCMRAMFN(si_numd11coreunits)(si_t *sih)
+{
+	uint found = 0;
+
+	found = si_numcoreunits(sih, D11_CORE_ID);
+
+#if defined(WLRSDB) && defined(WLRSDB_DISABLED)
+	/* If RSDB functionality is compiled out,
+	 * then ignore any D11 cores beyond the first
+	 * Used in norsdb dongle build variants for rsdb chip.
+	 */
+	found = 1;
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+
+	return found;
+}
+
+/** return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/** return current wrapper mapping */
+void *
+si_wrapperregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+
+	return (sii->curwrap);
+}
+
+/** return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+/**
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/** Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	void *cc;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii)) {
+		/* Overloading the origidx variable to remember the coreid,
+		 * this works because the core ids cannot be confused with
+		 * core indices.
+		 */
+		*origidx = coreid;
+		if (coreid == CC_CORE_ID)
+			return (void *)CCREGS_FAST(sii);
+		else if (coreid == sih->buscoretype)
+			return (void *)PCIEREGS(sii);
+	}
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+		return;
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	/* Only supported for SOCI_AI */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_coreaddrspaceX(sih, asidx, addr, size);
+	else
+		*size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	/* only for AI back plane chips */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return (ai_wrap_reg(sih, offset, mask, val));
+	return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/** ILP sensitive register access needs special treatment to avoid backplane stalls */
+bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
+{
+	if (idx == SI_CC_IDX) {
+		if (CHIPCREGS_ILP_SENSITIVE(regoff))
+			return TRUE;
+	} else if (PMUREGS_ILP_SENSITIVE(regoff)) {
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+/** 'idx' should refer either to the chipcommon core or the PMU core */
+uint
+si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
+{
+	int pmustatus_offset;
+
+	/* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
+	if (mask != 0 && sih->pmurev >= 22 &&
+	    si_pmu_is_ilp_sensitive(idx, regoff)) {
+		pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
+			OFFSETOF(chipcregs_t, pmustatus);
+
+		while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
+			{};
+	}
+
+	return si_corereg(sih, idx, regoff, mask, val);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+si_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg_addr(sih, coreidx, regoff);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg_addr(sih, coreidx, regoff);
+	else {
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_reset(sih, bits, resetbits);
+}
+
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/** calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return (clock / m1);
+		case CC_MC_M1M2:	return (clock / (m1 * m2));
+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
+		case CC_MC_M1M3:	return (clock / (m1 * m3));
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(si_t *sih)
+{
+	uint hosti = 0;
+
+	switch (CHIPID(sih->chip)) {
+
+	case BCM43602_CHIP_ID:
+		hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4360_CHIP_ID:
+		/* chippkg bit-0 == 0 is PCIE only pkgs
+		 * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+		 */
+		if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+
+		break;
+
+	case BCM4335_CHIP_ID:
+		/* TBD: like in 4360, do we need to check pkg? */
+		if (CST4335_CHIPMODE_USB20D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4335_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4345_CHIP_ID:
+		if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4345_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4349_CHIP_GRPID:
+		if (CST4349_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4349_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		if (CST4350_CHIPMODE_USB20D(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC20D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D_WL(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC30D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4350_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4350_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	default:
+		break;
+	}
+
+	return hosti;
+}
+
+
+/** set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	uint nb, maxt;
+
+	if (PMUCTL_ENAB(sih)) {
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+		if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+		    (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+			si_setcore(sih, USB20D_CORE_ID, 0);
+			si_core_disable(sih, 1);
+			si_setcore(sih, CC_CORE_ID, 0);
+		}
+#endif 
+
+			nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+		/* The mips compiler uses the sllv instruction,
+		 * so we specially handle the 32-bit case.
+		 */
+		if (nb == 32)
+			maxt = 0xffffffff;
+		else
+			maxt = ((1 << nb) - 1);
+
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+
+		pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
+	} else {
+		maxt = (1 << 28) - 1;
+		if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+/** trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+	return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+	return FALSE;
+}
+
+
+
+/** return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+	chipcregs_t *cc;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	if (sii->pub.ccrev < 6) {
+		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+		     PCI_CFG_GPIO_SCS))
+			return (SCC_SS_PCI);
+		else
+			return (SCC_SS_XTAL);
+	} else if (sii->pub.ccrev < 10) {
+		cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+		ASSERT(cc);
+		return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+	} else	/* Insta-clock */
+		return (SCC_SS_XTAL);
+}
+
+/** return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+	uint32 slowclk;
+	uint div;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	/* shouldn't be here unless we've established the chip has dynamic clk control */
+	ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+	slowclk = si_slowclk_src(sii);
+	if (sii->pub.ccrev < 6) {
+		if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+		else
+			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+	} else if (sii->pub.ccrev < 10) {
+		div = 4 *
+		        (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+		if (slowclk == SCC_SS_LPO)
+			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+		else if (slowclk == SCC_SS_XTAL)
+			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+		else if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+		else
+			ASSERT(0);
+	} else {
+		/* Chipc rev 10 is InstaClock */
+		div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+		div = 4 * (div + 1);
+		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+	}
+	return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+	chipcregs_t *cc = (chipcregs_t *)chipcregs;
+	uint slowmaxfreq, pll_delay, slowclk;
+	uint pll_on_delay, fref_sel_delay;
+
+	pll_delay = PLL_DELAY;
+
+	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+	 * since the xtal will also be powered down by dynamic clk control logic.
+	 */
+
+	slowclk = si_slowclk_src(sii);
+	if (slowclk != SCC_SS_XTAL)
+		pll_delay += XTAL_ON_DELAY;
+
+	/* Starting with 4318 it is ILP that is used for the delays */
+	slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+	W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+	W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/** initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx = 0;
+	chipcregs_t *cc;
+	bool fast;
+
+	if (!CCCTL_ENAB(sih))
+		return;
+
+	sii = SI_INFO(sih);
+	fast = SI_FAST(sii);
+	if (!fast) {
+		origidx = sii->curidx;
+		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+			return;
+	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+		return;
+	ASSERT(cc != NULL);
+
+	/* set all Instaclk chip ILP to 1 MHz */
+	if (sih->ccrev >= 10)
+		SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+	si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+	OSL_DELAY(20000);
+
+	if (!fast)
+		si_setcoreidx(sih, origidx);
+}
+
+
+/** change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/**
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ *   to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return 0xffffffff;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/**
+ * release one gpio.
+ *
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till someone overwrites it.
+ */
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return 0xffffffff;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	uint regoff;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 20)
+		return 0xffffffff;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+	bool level, gpio_handler_t cb, void *arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *gi;
+
+	ASSERT(event);
+	ASSERT(cb != NULL);
+
+	if (sih->ccrev < 11)
+		return NULL;
+
+	if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+		return NULL;
+
+	bzero(gi, sizeof(gpioh_item_t));
+	gi->event = event;
+	gi->handler = cb;
+	gi->arg = arg;
+	gi->level = level;
+
+	gi->next = sii->gpioh_head;
+	sii->gpioh_head = gi;
+
+	return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *p, *n;
+
+	if (sih->ccrev < 11)
+		return;
+
+	ASSERT(sii->gpioh_head != NULL);
+	if ((void*)sii->gpioh_head == gpioh) {
+		sii->gpioh_head = sii->gpioh_head->next;
+		MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+		return;
+	} else {
+		p = sii->gpioh_head;
+		n = p->next;
+		while (n) {
+			if ((void*)n == gpioh) {
+				p->next = n->next;
+				MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+				return;
+			}
+			p = n;
+			n = n->next;
+		}
+	}
+
+	ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *h;
+	uint32 level = si_gpioin(sih);
+	uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+	uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+	uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+	for (h = sii->gpioh_head; h != NULL; h = h->next) {
+		if (h->handler) {
+			uint32 status = (h->level ? level : edge) & h->event;
+			uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+			/* polarity bitval is opposite of status bitval */
+			if ((h->level && (status ^ polarity)) || (!h->level && status))
+				h->handler(status, h->arg);
+		}
+	}
+
+	si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/** Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+	uint banksize, bankinfo;
+	uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		W_REG(sii->osh, &regs->bankidx, bankidx);
+		W_REG(sii->osh, &regs->bankpda, bankpda);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	if (!set)
+		*enable = *protect = *remap = 0;
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+		uint32 bankidx, bankinfo;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (set) {
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+				if (*enable) {
+					bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+					if (*protect)
+						bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+					if ((corerev >= 16) && *remap)
+						bankinfo |=
+							(1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+				}
+				W_REG(sii->osh, &regs->bankinfo, bankinfo);
+			}
+			else if (i == 0) {
+				if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+					*enable = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+						*protect = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+						*remap = 1;
+				}
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup, remap = FALSE;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				remap = TRUE;
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+	return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+	if (si_socdevram_size(sih) > 0)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0, banksz;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+		/*
+		 * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+		 * Only four banks are accessible to ARM
+		 */
+		if ((corerev == 16) && (nb == 5))
+			nb = 4;
+
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+				memsize += banksz;
+			} else {
+				/* Account only consecutive banks for now */
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/** Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else if ((corerev <= 7) || (corerev == 12)) {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+uint32
+si_tcm_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint8 *regs;
+	bool wasup;
+	uint32 corecap;
+	uint memsize = 0;
+	uint32 nab = 0;
+	uint32 nbb = 0;
+	uint32 totb = 0;
+	uint32 bxinfo = 0;
+	uint32 idx = 0;
+	uint32 *arm_cap_reg;
+	uint32 *arm_bidx;
+	uint32 *arm_binfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to CR4 core */
+	if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size. If in reset, come out of reset,
+	 * but remain in halt
+	 */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+
+	arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP);
+	corecap = R_REG(sii->osh, arm_cap_reg);
+
+	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+	totb = nab + nbb;
+
+	arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX);
+	arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO);
+	for (idx = 0; idx < totb; idx++) {
+		W_REG(sii->osh, arm_bidx, idx);
+
+		bxinfo = R_REG(sii->osh, arm_binfo);
+		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+bool
+si_has_flops(si_t *sih)
+{
+	uint origidx, cr4_rev;
+
+	/* Find out CR4 core revision */
+	origidx = si_coreidx(sih);
+	if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+		cr4_rev = si_corerev(sih);
+		si_setcoreidx(sih, origidx);
+
+		if (cr4_rev == 1 || cr4_rev >= 3)
+			return TRUE;
+	}
+	return FALSE;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+		return (32 * 1024);
+	}
+
+	if (CHIPID(sih->chip) == BCM43430_CHIP_ID) {
+		return (64 * 1024);
+	}
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev >= 16) {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++) {
+			W_REG(sii->osh, &regs->bankidx, i);
+			if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+				memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	uint intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	/* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+	if (on) {
+		/* Enable bt_shd0 on gpio4: */
+		val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->chipcontrol, val);
+	si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+	si_setcoreidx(sih, origidx);
+	return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+			val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+			/* Ext PA Controls for 4331 12x9 Package */
+			W_REG(sii->osh, &cc->chipcontrol, val);
+		} else {
+			/* Ext PA Controls for 4331 12x12 Package */
+			if (sih->chiprev > 0) {
+				W_REG(sii->osh, &cc->chipcontrol, val |
+				      (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+			} else {
+				W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+			}
+		}
+	} else {
+		val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		val &= ~(CCTRL4360_SECI_MODE |
+			CCTRL4360_BTSWCTRL_MODE |
+			CCTRL4360_EXTRA_FEMCTRL_MODE |
+			CCTRL4360_BT_LGCY_MODE |
+			CCTRL4360_CORE2FEMCTRL4_ON);
+
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+	si_info_t *sii;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	bool sel_chip;
+
+	sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+		(CHIPID(sih->chip) == BCM43431_CHIP_ID);
+	sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+	if (!sel_chip)
+		return;
+
+	sii = SI_INFO(sih);
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (enter_wowl) {
+		val |= CCTRL4331_EXTPA_EN;
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+#endif 
+
+uint
+si_pll_reset(si_t *sih)
+{
+	uint err = 0;
+
+	return (err);
+}
+
+/** Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* EPA Fix */
+	W_REG(sii->osh, &cc->gpiocontrol,
+	R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+void
+si_pmu_synth_pwrsw_4313_war(si_t *sih)
+{
+}
+
+/** WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->gpiocontrol,
+		R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+	W_REG(sii->osh, &cc->gpioouten,
+		R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* BT fix */
+	W_REG(sii->osh, &cc->chipcontrol,
+		R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+	W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(SI_INFO(sih)->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		break;
+	}
+	return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+	if (sih->ccrev >= 31) {
+		si_info_t *sii;
+		uint origidx;
+		chipcregs_t *cc;
+		uint32 sromctrl;
+
+		if ((sih->cccaps & CC_CAP_SROM) == 0)
+			return FALSE;
+
+		sii = SI_INFO(sih);
+		origidx = sii->curidx;
+		cc = si_setcoreidx(sih, SI_CC_IDX);
+		ASSERT(cc);
+		sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+		si_setcoreidx(sih, origidx);
+		return (sromctrl & SRC_PRESENT);
+	}
+
+	switch (CHIPID(sih->chip)) {
+	case BCM4312_CHIP_ID:
+		return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
+	case BCM4325_CHIP_ID:
+		return (sih->chipst & CST4325_SPROM_SEL) != 0;
+	case BCM4322_CHIP_ID:	case BCM43221_CHIP_ID:	case BCM43231_CHIP_ID:
+	case BCM43222_CHIP_ID:	case BCM43111_CHIP_ID:	case BCM43112_CHIP_ID:
+	case BCM4342_CHIP_ID: {
+		uint32 spromotp;
+		spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+		        CST4322_SPROM_OTP_SEL_SHIFT;
+		return (spromotp & CST4322_SPROM_PRESENT) != 0;
+	}
+	case BCM4329_CHIP_ID:
+		return (sih->chipst & CST4329_SPROM_SEL) != 0;
+	case BCM4315_CHIP_ID:
+		return (sih->chipst & CST4315_SPROM_SEL) != 0;
+	case BCM4319_CHIP_ID:
+		return (sih->chipst & CST4319_SPROM_SEL) != 0;
+	case BCM4336_CHIP_ID:
+	case BCM43362_CHIP_ID:
+		return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+	case BCM4330_CHIP_ID:
+		return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+	case BCM4313_CHIP_ID:
+		return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+	case BCM4331_CHIP_ID:
+	case BCM43431_CHIP_ID:
+		return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+	case BCM43239_CHIP_ID:
+		return ((sih->chipst & CST43239_SPROM_MASK) &&
+			!(sih->chipst & CST43239_SFLASH_MASK));
+	case BCM4324_CHIP_ID:
+	case BCM43242_CHIP_ID:
+		return ((sih->chipst & CST4324_SPROM_MASK) &&
+			!(sih->chipst & CST4324_SFLASH_MASK));
+	case BCM4335_CHIP_ID:
+	case BCM4345_CHIP_ID:
+		return ((sih->chipst & CST4335_SPROM_MASK) &&
+			!(sih->chipst & CST4335_SFLASH_MASK));
+	case BCM4349_CHIP_GRPID:
+		return (sih->chipst & CST4349_SPROM_PRESENT) != 0;
+		break;
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
+	case BCM43602_CHIP_ID:
+		return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
+	case BCM43131_CHIP_ID:
+	case BCM43217_CHIP_ID:
+	case BCM43227_CHIP_ID:
+	case BCM43228_CHIP_ID:
+	case BCM43428_CHIP_ID:
+		return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+	default:
+		return TRUE;
+	}
+}
+
+
+uint32 si_get_sromctl(si_t *sih)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 sromctl;
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	sromctl = R_REG(osh, &cc->sromcontrol);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return sromctl;
+}
+
+int si_set_sromctl(si_t *sih, uint32 value)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	if (si_corerev(sih) < 32)
+		return BCME_UNSUPPORTED;
+
+	W_REG(osh, &cc->sromcontrol, value);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return BCME_OK;
+
+}
+
+uint
+si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
+{
+	uint origidx, intr_val = 0;
+	uint ret_val;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	origidx = si_coreidx(sih);
+
+	INTR_OFF(sii, intr_val);
+	si_setcoreidx(sih, coreidx);
+
+	ret_val = si_wrapperreg(sih, offset, mask, val);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret_val;
+}
+
+
+/* cleanup the timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+	uint32 mask;
+
+	mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
+	if (CHIPID(sih->chip) != BCM4328_CHIP_ID)
+		mask <<= 14;
+	/* clear mask bits */
+	pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
+	/* readback to ensure write completes */
+	return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+}
+
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (!PCIE(sii))
+		return (0);
+
+	return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 i;
+
+	/* issue a watchdog reset */
+	pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2);
+	/* do busy wait for 20ms */
+	for (i = 0; i < 2000; i++) {
+		OSL_DELAY(10);
+	}
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+	if (BUSTYPE(sih->bustype) != PCI_BUS)
+		  return;
+
+	if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+	    (CHIPREV(sih->chiprev) >= 4))
+		return;
+
+	if (reset) {
+		si_info_t *sii = SI_INFO(sih);
+		uint32 bar0win, bar0win_after;
+
+		/* save the bar0win */
+		bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+		si_watchdog_reset(sih);
+
+		bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (bar0win_after != bar0win) {
+			SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+				__FUNCTION__, bar0win, bar0win_after));
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+		}
+	}
+	if (sperst_mask) {
+		/* enable survive perst */
+		si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+	}
+#endif /* SURVIVE_PERST_ENAB */
+}
+
+void
+si_pcie_ltr_war(si_t *sih)
+{
+}
+
+void
+si_pcie_hw_LTR_war(si_t *sih)
+{
+}
+
+void
+si_pciedev_reg_pm_clk_period(si_t *sih)
+{
+}
+
+void
+si_pciedev_crwlpciegen2(si_t *sih)
+{
+}
+
+void
+si_pcie_prep_D3(si_t *sih, bool enter_D3)
+{
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 0000000000000000000000000000000000000000..a69fb69f5a6177efa40b62f2558c6b368d3d96d4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,265 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: siutils_priv.h 474902 2014-05-02 18:31:33Z $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+#define	SI_ERROR(args)
+
+#define	SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define	SI_VMSG(args)	printf args
+#else
+#define	SI_VMSG(args)
+#endif
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+	void			*arg;
+	bool			level;
+	gpio_handler_t		handler;
+	uint32			event;
+	struct gpioh_item	*next;
+} gpioh_item_t;
+
+
+#define SI_GPIO_MAX		16
+
+typedef struct gci_gpio_item {
+	void			*arg;
+	uint8			gci_gpio;
+	uint8			status;
+	gci_gpio_handler_t	handler;
+	struct gci_gpio_item	*next;
+} gci_gpio_item_t;
+
+
+typedef struct si_cores_info {
+	void	*regs[SI_MAXCORES];	/* other regs va */
+
+	uint	coreid[SI_MAXCORES];	/* id of each core */
+	uint32	coresba[SI_MAXCORES];	/* backplane address of each core */
+	void	*regs2[SI_MAXCORES];	/* va of each core second register set (usbh20) */
+	uint32	coresba2[SI_MAXCORES];	/* address of each core second register set (usbh20) */
+	uint32	coresba_size[SI_MAXCORES]; /* backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /* second address space size */
+
+	void	*wrappers[SI_MAXCORES];	/* other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/* address of controlling wrapper */
+
+	uint32	cia[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/* erom cia entry for each core */
+} si_cores_info_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+	struct si_pub pub;		/* back plane public state (must be first field) */
+
+	void	*osh;			/* osl os handle */
+	void	*sdh;			/* bcmsdh handle */
+
+	uint	dev_coreid;		/* the core provides driver functions */
+	void	*intr_arg;		/* interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+	void *pch;			/* PCI/E core handle */
+
+	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
+
+	bool	memseg;			/* flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	void	*curmap;		/* current regs va */
+
+	uint	curidx;			/* current core index */
+	uint	numcores;		/* # discovered cores */
+
+	void	*curwrap;		/* current wrapper va */
+
+	uint32	oob_router;		/* oob router registers for axi */
+
+	void *cores_info;
+	gci_gpio_item_t	*gci_gpio_head;	/* gci gpio interrupts head */
+	uint	chipnew;		/* new chip number */
+} si_info_t;
+
+
+#define	SI_INFO(sih)	((si_info_t *)(uintptr)sih)
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	NOREV		-1		/* Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si)	(PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/* low power oscillator min */
+#define	LPOMAXFREQ		43000		/* low power oscillator max */
+#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/* 25 MHz */
+#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
+
+/* Force fast clock for 4360b0 */
+#define PCI_FORCEHT(si)	\
+	(((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+	((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+#if defined(BCMDBG_PHYDUMP)
+extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern uint ai_flag_alt(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
+	uint32 resetbits, void *p, void *s);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
+	aidmp_t *pmacai, aidmp_t *smacai);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+#if defined(BCMDBG_PHYDUMP)
+extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b)  (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 0000000000000000000000000000000000000000..e4f7e35a490c7eeb1a78d56c014427d6e5b03abd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,160 @@
+/*
+ *  Name:       uamp_api.h
+ *
+ *  Description: Universal AMP API
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: uamp_api.h 467328 2014-04-03 01:23:40Z $
+ *
+ */
+
+
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+**  Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool	BOOLEAN;
+typedef uint8	UINT8;
+typedef uint16	UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1   1
+#define UAMP_ID_2   2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY           0   /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED       1   /* Controller removed */
+#define UAMP_EVT_CTLR_READY         2   /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD            0   /* HCI Command channel */
+#define UAMP_CH_HCI_EVT            1   /* HCI Event channel */
+#define UAMP_CH_HCI_DATA           2   /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+    tUAMP_CH channel;       /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters   amp_id:         AMP device identifier that generated the event
+**              amp_evt:        event id
+**              p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+**  external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function:    UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters   p_cback:    Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer. This value
+**                      will be included in AMP messages sent to the
+**                      BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer to write
+**              num_bytes:  number of bytes to write
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns:     number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+**              UAMP_EVT_RX_READY callback event.
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer for holding incoming AMP data
+**              buf_size:   size of p_buf
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns:     number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 0000000000000000000000000000000000000000..336569c88959d4e958f5a35d5044d0b6efe3c5f8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,2434 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_android.c 490852 2014-07-12 15:20:53Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <proto/bcmip.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef BCMSDIO
+#include <bcmsdbus.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START		"START"
+#define CMD_STOP		"STOP"
+#define	CMD_SCAN_ACTIVE		"SCAN-ACTIVE"
+#define	CMD_SCAN_PASSIVE	"SCAN-PASSIVE"
+#define CMD_RSSI		"RSSI"
+#define CMD_LINKSPEED		"LINKSPEED"
+#define CMD_RXFILTER_START	"RXFILTER-START"
+#define CMD_RXFILTER_STOP	"RXFILTER-STOP"
+#define CMD_RXFILTER_ADD	"RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE	"RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START	"BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP	"BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE		"BTCOEXMODE"
+#define CMD_SETSUSPENDOPT	"SETSUSPENDOPT"
+#define CMD_SETSUSPENDMODE      "SETSUSPENDMODE"
+#define CMD_P2P_DEV_ADDR	"P2P_DEV_ADDR"
+#define CMD_SETFWPATH		"SETFWPATH"
+#define CMD_SETBAND		"SETBAND"
+#define CMD_GETBAND		"GETBAND"
+#define CMD_COUNTRY		"COUNTRY"
+#define CMD_P2P_SET_NOA		"P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
+#define CMD_P2P_GET_NOA			"P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
+#define CMD_P2P_SD_OFFLOAD		"P2P_SD_"
+#define CMD_P2P_SET_PS		"P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE 		"SET_AP_WPS_P2P_IE"
+#define CMD_SETROAMMODE 	"SETROAMMODE"
+#define CMD_SETIBSSBEACONOUIDATA	"SETIBSSBEACONOUIDATA"
+#define CMD_MIRACAST		"MIRACAST"
+#define CMD_NAN		"NAN_"
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+#define CMD_GET_BEST_CHANNELS	"GET_BEST_CHANNELS"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#define CMD_KEEP_ALIVE		"KEEPALIVE"
+
+/* CCX Private Commands */
+#ifdef BCMCCX
+#define CMD_GETCCKM_RN		"get cckm_rn"
+#define CMD_SETCCKM_KRK		"set cckm_krk"
+#define CMD_GET_ASSOC_RES_IES	"get assoc_res_ies"
+#endif
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET	"PNOSSIDCLR"
+#define CMD_PNOSETUP_SET	"PNOSETUP "
+#define CMD_PNOENABLE_SET	"PNOFORCE"
+#define CMD_PNODEBUG_SET	"PNODEBUG"
+#define CMD_WLS_BATCHING	"WLS_BATCHING"
+#endif /* PNO_SUPPORT */
+
+#define CMD_OKC_SET_PMK		"SET_PMK"
+#define CMD_OKC_ENABLE		"OKC_ENABLE"
+
+#define	CMD_HAPD_MAC_FILTER	"HAPD_MAC_FILTER"
+
+#ifdef WLFBT
+#define CMD_GET_FTKEY      "GET_FTKEY"
+#endif
+
+#ifdef WLAIBSS
+#define CMD_SETIBSSTXFAILEVENT		"SETIBSSTXFAILEVENT"
+#define CMD_GET_IBSS_PEER_INFO		"GETIBSSPEERINFO"
+#define CMD_GET_IBSS_PEER_INFO_ALL	"GETIBSSPEERINFOALL"
+#define CMD_SETIBSSROUTETABLE		"SETIBSSROUTETABLE"
+#define CMD_SETIBSSAMPDU			"SETIBSSAMPDU"
+#define CMD_SETIBSSANTENNAMODE		"SETIBSSANTENNAMODE"
+#endif /* WLAIBSS */
+
+#define CMD_ROAM_OFFLOAD			"SETROAMOFFLOAD"
+
+/* miracast related definition */
+#define MIRACAST_MODE_OFF	0
+#define MIRACAST_MODE_SOURCE	1
+#define MIRACAST_MODE_SINK	2
+
+#ifndef MIRACAST_AMPDU_SIZE
+#define MIRACAST_AMPDU_SIZE	8
+#endif
+
+#ifndef MIRACAST_MCHAN_ALGO
+#define MIRACAST_MCHAN_ALGO     1
+#endif
+
+#ifndef MIRACAST_MCHAN_BW
+#define MIRACAST_MCHAN_BW       25
+#endif
+
+static LIST_HEAD(miracast_resume_list);
+static u8 miracast_cur_mode;
+
+struct io_cfg {
+	s8 *iovar;
+	s32 param;
+	u32 ioctl;
+	void *arg;
+	u32 len;
+	struct list_head list;
+};
+
+typedef struct _android_wifi_priv_cmd {
+	char *buf;
+	int used_len;
+	int total_len;
+} android_wifi_priv_cmd;
+
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+	compat_caddr_t buf;
+	int used_len;
+	int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF	"SET_ROAMPREF"
+
+#define MAX_NUM_SUITES		10
+#define WIDTH_AKM_SUITE		8
+#define JOIN_PREF_RSSI_LEN		0x02
+#define JOIN_PREF_RSSI_SIZE		4	/* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE		4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE	12	/* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES	16
+#define MAX_BUF_SIZE		(JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +	\
+				           (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
+#ifdef WL_GENL
+static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
+static int wl_genl_init(void);
+static int wl_genl_deinit(void);
+
+extern struct net init_net;
+/* attribute policy: defines which attribute has which type (e.g int, char * etc)
+ * possible values defined in net/netlink.h
+ */
+static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = {
+	[BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING },
+	[BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY },
+};
+
+#define WL_GENL_VER 1
+/* family definition */
+static struct genl_family wl_genl_family = {
+	.id = GENL_ID_GENERATE,    /* Genetlink would generate the ID */
+	.hdrsize = 0,
+	.name = "bcm-genl",        /* Netlink I/F for Android */
+	.version = WL_GENL_VER,     /* Version Number */
+	.maxattr = BCM_GENL_ATTR_MAX,
+};
+
+/* commands: mapping between the command enumeration and the actual function */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+struct genl_ops wl_genl_ops[] = {
+	{
+	.cmd = BCM_GENL_CMD_MSG,
+	.flags = 0,
+	.policy = wl_genl_policy,
+	.doit = wl_genl_handle_msg,
+	.dumpit = NULL,
+	},
+};
+#else
+struct genl_ops wl_genl_ops = {
+	.cmd = BCM_GENL_CMD_MSG,
+	.flags = 0,
+	.policy = wl_genl_policy,
+	.doit = wl_genl_handle_msg,
+	.dumpit = NULL,
+
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+static struct genl_multicast_group wl_genl_mcast[] = {
+	 { .name = "bcm-genl-mcast", },
+};
+#else
+static struct genl_multicast_group wl_genl_mcast = {
+	.id = GENL_ID_GENERATE,    /* Genetlink would generate the ID */
+	.name = "bcm-genl-mcast",
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+#endif /* WL_GENL */
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+int dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif /* WK_CFG80211 */
+
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+extern bool ap_fw_loaded;
+#if defined(CUSTOMER_HW2)
+extern char iface_name[IFNAMSIZ];
+#endif 
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+	int link_speed;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_link_speed(net, &link_speed);
+	if (error)
+		return -1;
+
+	/* Convert Kbps to Android Mbps */
+	link_speed = link_speed / 1000;
+	bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+	DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+	wlc_ssid_t ssid = {0};
+	int rssi;
+	int bytes_written = 0;
+	int error;
+
+	error = wldev_get_rssi(net, &rssi);
+	if (error)
+		return -1;
+
+	error = wldev_get_ssid(net, &ssid);
+	if (error)
+		return -1;
+	if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+		DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+	} else {
+		memcpy(command, ssid.SSID, ssid.SSID_len);
+		bytes_written = ssid.SSID_len;
+	}
+	bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+	DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+	return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+		suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+		if (suspend_flag != 0)
+			suspend_flag = 1;
+		ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+		if (ret_now != suspend_flag) {
+			if (!(ret = net_os_set_suspend(dev, ret_now, 1)))
+				DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+					__FUNCTION__, ret_now, suspend_flag));
+			else
+				DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+		}
+	return ret;
+}
+
+static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND)
+	int suspend_flag;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
+		DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+	else
+		DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+#endif
+
+	return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+	uint band;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_band(dev, &band);
+	if (error)
+		return -1;
+	bytes_written = snprintf(command, total_len, "Band %d", band);
+	return bytes_written;
+}
+
+
+#ifdef PNO_SUPPORT
+#define PNO_PARAM_SIZE 50
+#define VALUE_SIZE 50
+#define LIMIT_STR_FMT  ("%50s %50s")
+static int
+wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
+{
+	int err = BCME_OK;
+	uint i, tokens;
+	char *pos, *pos2, *token, *token2, *delim;
+	char param[PNO_PARAM_SIZE], value[VALUE_SIZE];
+	struct dhd_pno_batch_params batch_params;
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+	if (total_len < strlen(CMD_WLS_BATCHING)) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		err = BCME_ERROR;
+		goto exit;
+	}
+	pos = command + strlen(CMD_WLS_BATCHING) + 1;
+	memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+
+	if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
+		pos += strlen(PNO_BATCHING_SET) + 1;
+		while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
+			memset(param, 0, sizeof(param));
+			memset(value, 0, sizeof(value));
+			if (token == NULL || !*token)
+				break;
+			if (*token == '\0')
+				continue;
+			delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER);
+			if (delim != NULL)
+				*delim = ' ';
+
+			tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+			if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
+				batch_params.scan_fr = simple_strtol(value, NULL, 0);
+				DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr));
+			} else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
+				batch_params.bestn = simple_strtol(value, NULL, 0);
+				DHD_PNO(("bestn : %d\n", batch_params.bestn));
+			} else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
+				batch_params.mscan = simple_strtol(value, NULL, 0);
+				DHD_PNO(("mscan : %d\n", batch_params.mscan));
+			} else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
+				i = 0;
+				pos2 = value;
+				tokens = sscanf(value, "<%s>", value);
+				if (tokens != 1) {
+					err = BCME_ERROR;
+					DHD_ERROR(("%s : invalid format for channel"
+					" <> params\n", __FUNCTION__));
+					goto exit;
+				}
+					while ((token2 = strsep(&pos2,
+					PNO_PARAM_CHANNEL_DELIMETER)) != NULL) {
+					if (token2 == NULL || !*token2)
+						break;
+					if (*token2 == '\0')
+						continue;
+					if (*token2 == 'A' || *token2 == 'B') {
+						batch_params.band = (*token2 == 'A')?
+							WLC_BAND_5G : WLC_BAND_2G;
+						DHD_PNO(("band : %s\n",
+							(*token2 == 'A')? "A" : "B"));
+					} else {
+						batch_params.chan_list[i++] =
+						simple_strtol(token2, NULL, 0);
+						batch_params.nchan++;
+						DHD_PNO(("channel :%d\n",
+						batch_params.chan_list[i-1]));
+					}
+				 }
+			} else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
+				batch_params.rtt = simple_strtol(value, NULL, 0);
+				DHD_PNO(("rtt : %d\n", batch_params.rtt));
+			} else {
+				DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+				err = BCME_ERROR;
+				goto exit;
+			}
+		}
+		err = dhd_dev_pno_set_for_batch(dev, &batch_params);
+		if (err < 0) {
+			DHD_ERROR(("failed to configure batch scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "%d", err);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
+		err = dhd_dev_pno_get_for_batch(dev, command, total_len);
+		if (err < 0) {
+			DHD_ERROR(("failed to getting batching results\n"));
+		} else {
+			err = strlen(command);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) {
+		err = dhd_dev_pno_stop_for_batch(dev);
+		if (err < 0) {
+			DHD_ERROR(("failed to stop batching scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "OK");
+		}
+	} else {
+		DHD_ERROR(("%s : unknown command\n", __FUNCTION__));
+		err = BCME_ERROR;
+		goto exit;
+	}
+exit:
+	return err;
+}
+#ifndef WL_SCHED_SCAN
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	int res = -1;
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time = 0;
+	int pno_repeat = 0;
+	int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {
+		'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+		'S', '1', '2', '0',
+		'S',
+		0x05,
+		'd', 'l', 'i', 'n', 'k',
+		'S',
+		0x04,
+		'G', 'O', 'O', 'G',
+		'T',
+		'0', 'B',
+		'R',
+		'2',
+		'M',
+		'2',
+		0x00
+		};
+#endif /* PNO_SET_DEBUG */
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		goto exit_proc;
+	}
+#ifdef PNO_SET_DEBUG
+	memcpy(command, pno_in_example, sizeof(pno_in_example));
+	total_len = sizeof(pno_in_example);
+#endif
+	str_ptr = command + strlen(CMD_PNOSETUP_SET);
+	tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+		(cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) {
+
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left -= sizeof(cmd_tlv_t);
+
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+			DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		} else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				DHD_ERROR(("%s scan duration corrupted field size %d\n",
+					__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					DHD_ERROR(("%s pno repeat : corrupted field\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s: pno_freq_expo_max=%d\n",
+					__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	} else {
+		DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat,
+		pno_freq_expo_max, NULL, 0);
+exit_proc:
+	return res;
+}
+#endif /* !WL_SCHED_SCAN */
+#endif /* PNO_SUPPORT  */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+	int ret;
+	int bytes_written = 0;
+
+	ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+	if (ret)
+		return 0;
+	bytes_written = sizeof(struct ether_addr);
+	return bytes_written;
+}
+
+#ifdef BCMCCX
+static int wl_android_get_cckm_rn(struct net_device *dev, char *command)
+{
+	int error, rn;
+
+	WL_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name));
+
+	error = wldev_iovar_getint(dev, "cckm_rn", &rn);
+	if (unlikely(error)) {
+		WL_ERR(("wl_android_get_cckm_rn error (%d)\n", error));
+		return -1;
+	}
+	memcpy(command, &rn, sizeof(int));
+
+	return sizeof(int);
+}
+
+static int wl_android_set_cckm_krk(struct net_device *dev, char *command)
+{
+	int error;
+	unsigned char key[16];
+	static char iovar_buf[WLC_IOCTL_MEDLEN];
+
+	WL_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name));
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	memcpy(key, command+strlen("set cckm_krk")+1, 16);
+
+	error = wldev_iovar_setbuf(dev, "cckm_krk", key, sizeof(key),
+		iovar_buf, WLC_IOCTL_MEDLEN, NULL);
+	if (unlikely(error))
+	{
+		WL_ERR((" cckm_krk set error (%d)\n", error));
+		return -1;
+	}
+	return 0;
+}
+
+static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command)
+{
+	int error;
+	u8 buf[WL_ASSOC_INFO_MAX];
+	wl_assoc_info_t assoc_info;
+	u32 resp_ies_len = 0;
+	int bytes_written = 0;
+
+	WL_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name));
+
+	error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL);
+	if (unlikely(error)) {
+		WL_ERR(("could not get assoc info (%d)\n", error));
+		return -1;
+	}
+
+	memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+
+	if (assoc_info.resp_len) {
+		resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
+	}
+
+	/* first 4 bytes are ie len */
+	memcpy(command, &resp_ies_len, sizeof(u32));
+	bytes_written = sizeof(u32);
+
+	/* get the association resp IE's if there are any */
+	if (resp_ies_len) {
+		error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0,
+			buf, WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(error)) {
+			WL_ERR(("could not get assoc resp_ies (%d)\n", error));
+			return -1;
+		}
+
+		memcpy(command+sizeof(u32), buf, resp_ies_len);
+		bytes_written += resp_ies_len;
+	}
+	return bytes_written;
+}
+
+#endif /* BCMCCX */
+
+int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
+{
+	int i, j, match;
+	int ret	= 0;
+	char mac_buf[MAX_NUM_OF_ASSOCLIST *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+	/* set filtering mode */
+	if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) {
+		DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	if (macmode != MACLIST_MODE_DISABLED) {
+		/* set the MAC filter list */
+		if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist,
+			sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) {
+			DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* get the current list of associated STAs */
+		assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
+		if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist,
+			sizeof(mac_buf), false)) != 0) {
+			DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* do we have any STA associated?  */
+		if (assoc_maclist->count) {
+			/* iterate each associated STA */
+			for (i = 0; i < assoc_maclist->count; i++) {
+				match = 0;
+				/* compare with each entry */
+				for (j = 0; j < maclist->count; j++) {
+					DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+					MAC2STRDBG(maclist->ea[j].octet)));
+					if (memcmp(assoc_maclist->ea[i].octet,
+						maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
+						match = 1;
+						break;
+					}
+				}
+				/* do conditional deauth */
+				/*   "if not in the allow list" or "if in the deny list" */
+				if ((macmode == MACLIST_MODE_ALLOW && !match) ||
+					(macmode == MACLIST_MODE_DENY && match)) {
+					scb_val_t scbval;
+
+					scbval.val = htod32(1);
+					memcpy(&scbval.ea, &assoc_maclist->ea[i],
+						ETHER_ADDR_LEN);
+					if ((ret = wldev_ioctl(dev,
+						WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+						&scbval, sizeof(scb_val_t), true)) != 0)
+						DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
+							__FUNCTION__, ret));
+				}
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
+ *
+ */
+static int
+wl_android_set_mac_address_filter(struct net_device *dev, const char* str)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	/* string should look like below (macmode/macnum/maclist) */
+	/*   1 2 00:11:22:33:44:55 00:11:22:33:44:ff  */
+
+	/* get the MAC filter mode */
+	macmode = bcm_atoi(strsep((char**)&str, " "));
+
+	if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
+		DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+		return -1;
+	}
+
+	macnum = bcm_atoi(strsep((char**)&str, " "));
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		DHD_ERROR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	for (i = 0; i < list->count; i++) {
+		strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1);
+		if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
+			DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
+				__FUNCTION__, i, eabuf));
+			list->count--;
+			break;
+		}
+		DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
+		DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return 0;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+	int ret = 0;
+	int retry = POWERUP_MAX_RETRY;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	if (!dev) {
+		DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (!g_wifi_on) {
+		do {
+			dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+#ifdef BCMSDIO
+			ret = dhd_net_bus_resume(dev, 0);
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+			ret = dhd_net_bus_devreset(dev, FALSE);
+#endif /* BCMPCIE */
+			if (ret == 0)
+				break;
+			DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+				retry));
+#ifdef BCMPCIE
+			dhd_net_bus_devreset(dev, TRUE);
+#endif /* BCMPCIE */
+			dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		} while (retry-- > 0);
+		if (ret != 0) {
+			DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+			goto exit;
+		}
+#ifdef BCMSDIO
+		ret = dhd_net_bus_devreset(dev, FALSE);
+		dhd_net_bus_resume(dev, 1);
+#endif /* BCMSDIO */
+
+#ifndef BCMPCIE
+		if (!ret) {
+			if (dhd_dev_init_ioctl(dev) < 0)
+				ret = -EFAULT;
+		}
+#endif /* !BCMPCIE */
+		g_wifi_on = TRUE;
+	}
+
+exit:
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+	int ret = 0;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	if (!dev) {
+		DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (g_wifi_on) {
+#if defined(BCMSDIO) || defined(BCMPCIE)
+		ret = dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+		dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE */
+		dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		g_wifi_on = FALSE;
+	}
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+	if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+		return -1;
+	return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
+}
+
+
+static int
+wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
+{
+	uchar pmk[33];
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+#ifdef OKC_DEBUG
+	int i = 0;
+#endif
+
+	bzero(pmk, sizeof(pmk));
+	memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
+	error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set PMK for OKC, error = %d\n", error));
+	}
+#ifdef OKC_DEBUG
+	DHD_ERROR(("PMK is "));
+	for (i = 0; i < 32; i++)
+		DHD_ERROR(("%02X ", pmk[i]));
+
+	DHD_ERROR(("\n"));
+#endif
+	return error;
+}
+
+static int
+wl_android_okc_enable(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char okc_enable = 0;
+
+	okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0';
+	error = wldev_iovar_setint(dev, "okc_enable", okc_enable);
+	if (error) {
+		DHD_ERROR(("Failed to %s OKC, error = %d\n",
+			okc_enable ? "enable" : "disable", error));
+	}
+
+	wldev_iovar_setint(dev, "ccx_enable", 0);
+
+	return error;
+}
+
+
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int mode = 0;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "roam_off", mode);
+	if (error) {
+		DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+		return -1;
+	}
+	else
+		DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+	return 0;
+}
+
+int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
+{
+	char ie_buf[VNDR_IE_MAX_LEN];
+	char *ioctl_buf = NULL;
+	char hex[] = "XX";
+	char *pcmd = NULL;
+	int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+	vndr_ie_setbuf_t *vndr_ie = NULL;
+	s32 iecount;
+	uint32 pktflag;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	s32 err = BCME_OK;
+
+	/* Check the VSIE (Vendor Specific IE) which was added.
+	 *  If exist then send IOVAR to delete it
+	 */
+	if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) {
+		return -EINVAL;
+	}
+
+	pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1;
+	for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx] =  (uint8)simple_strtoul(hex, NULL, 16);
+	}
+	pcmd++;
+	while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx++] =  (uint8)simple_strtoul(hex, NULL, 16);
+		datalen++;
+	}
+	tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1);
+	vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+	if (!vndr_ie) {
+		WL_ERR(("IE memory alloc failed\n"));
+		return -ENOMEM;
+	}
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Set packet flag to indicate that BEACON's will contain this IE */
+	pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+	/* Set the IE ID */
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+		DOT11_OUI_LEN);
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+		&ie_buf[DOT11_OUI_LEN], datalen);
+
+	ielen = DOT11_OUI_LEN + datalen;
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+	ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (!ioctl_buf) {
+		WL_ERR(("ioctl memory alloc failed\n"));
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+		return -ENOMEM;
+	}
+	memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);	/* init the buffer */
+	err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+
+
+	if (err != BCME_OK) {
+		err = -EINVAL;
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+	}
+	else {
+		/* do NOT free 'vndr_ie' for the next process */
+		wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len);
+	}
+
+	if (ioctl_buf) {
+		kfree(ioctl_buf);
+	}
+
+	return err;
+}
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+	uint8 buf[MAX_BUF_SIZE];
+	uint8 *pref = buf;
+	char *pcmd;
+	int num_ucipher_suites = 0;
+	int num_akm_suites = 0;
+	wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+	wpa_suite_t akm_suites[MAX_NUM_SUITES];
+	int num_tuples = 0;
+	int total_bytes = 0;
+	int total_len_left;
+	int i, j;
+	char hex[] = "XX";
+
+	pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+	total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+	num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of AKM suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	/* check to make sure pcmd does not overrun */
+	if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	memset(buf, 0, sizeof(buf));
+	memset(akm_suites, 0, sizeof(akm_suites));
+	memset(ucipher_suites, 0, sizeof(ucipher_suites));
+
+	/* Save the AKM suites passed in the command */
+	for (i = 0; i < num_akm_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+	}
+
+	total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+	num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of cipher suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	/* Save the cipher suites passed in the command */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+	}
+
+	/* Join preference for RSSI
+	 * Type	  : 1 byte (0x01)
+	 * Length : 1 byte (0x02)
+	 * Value  : 2 bytes	(reserved)
+	 */
+	*pref++ = WL_JOIN_PREF_RSSI;
+	*pref++ = JOIN_PREF_RSSI_LEN;
+	*pref++ = 0;
+	*pref++ = 0;
+
+	/* Join preference for WPA
+	 * Type	  : 1 byte (0x02)
+	 * Length : 1 byte (not used)
+	 * Value  : (variable length)
+	 *		reserved: 1 byte
+	 *      count	: 1 byte (no of tuples)
+	 *		Tuple1	: 12 bytes
+	 *			akm[4]
+	 *			ucipher[4]
+	 *			mcipher[4]
+	 *		Tuple2	: 12 bytes
+	 *		Tuplen	: 12 bytes
+	 */
+	num_tuples = num_akm_suites * num_ucipher_suites;
+	if (num_tuples != 0) {
+		if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+			*pref++ = WL_JOIN_PREF_WPA;
+			*pref++ = 0;
+			*pref++ = 0;
+			*pref++ = (uint8)num_tuples;
+			total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+				(JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+		} else {
+			DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+			return -1;
+		}
+	} else {
+		/* No WPA config, configure only RSSI preference */
+		total_bytes = JOIN_PREF_RSSI_SIZE;
+	}
+
+	/* akm-ucipher-mcipher tuples in the format required for join_pref */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		for (j = 0; j < num_akm_suites; j++) {
+			memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			/* Set to 0 to match any available multicast cipher */
+			memset(pref, 0, WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+		}
+	}
+
+	prhex("join pref", (uint8 *)buf, total_bytes);
+	error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set join_pref, error = %d\n", error));
+	}
+	return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+
+static int
+wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
+{
+	struct io_cfg *resume_cfg;
+	s32 ret;
+
+	resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL);
+	if (!resume_cfg)
+		return -ENOMEM;
+
+	if (config->iovar) {
+		ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get current %s value\n",
+				__FUNCTION__, config->iovar));
+			goto error;
+		}
+
+		ret = wldev_iovar_setint(dev, config->iovar, config->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+
+		resume_cfg->iovar = config->iovar;
+	} else {
+		resume_cfg->arg = kzalloc(config->len, GFP_KERNEL);
+		if (!resume_cfg->arg) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+				config->ioctl));
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+		if (config->ioctl + 1 == WLC_SET_PM)
+			wl_cfg80211_update_power_mode(dev);
+		resume_cfg->ioctl = config->ioctl;
+		resume_cfg->len = config->len;
+	}
+
+	list_add(&resume_cfg->list, head);
+
+	return 0;
+error:
+	kfree(resume_cfg->arg);
+	kfree(resume_cfg);
+	return ret;
+}
+
+static void
+wl_android_iolist_resume(struct net_device *dev, struct list_head *head)
+{
+	struct io_cfg *config;
+	struct list_head *cur, *q;
+	s32 ret = 0;
+
+	list_for_each_safe(cur, q, head) {
+		config = list_entry(cur, struct io_cfg, list);
+		if (config->iovar) {
+			if (!ret)
+				ret = wldev_iovar_setint(dev, config->iovar,
+					config->param);
+		} else {
+			if (!ret)
+				ret = wldev_ioctl(dev, config->ioctl + 1,
+					config->arg, config->len, true);
+			if (config->ioctl + 1 == WLC_SET_PM)
+				wl_cfg80211_update_power_mode(dev);
+			kfree(config->arg);
+		}
+		list_del(cur);
+		kfree(config);
+	}
+}
+
+static int
+wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
+{
+	int mode, val;
+	int ret = 0;
+	struct io_cfg config;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+
+	if (miracast_cur_mode == mode)
+		return 0;
+
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	miracast_cur_mode = MIRACAST_MODE_OFF;
+
+	switch (mode) {
+	case MIRACAST_MODE_SOURCE:
+		/* setting mchan_algo to platform specific value */
+		config.iovar = "mchan_algo";
+
+		ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false);
+		if (!ret && val > 100) {
+			config.param = 0;
+			DHD_ERROR(("%s: Connected station's beacon interval: "
+				"%d and set mchan_algo to %d \n",
+				__FUNCTION__, val, config.param));
+		}
+		else {
+			config.param = MIRACAST_MCHAN_ALGO;
+		}
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting mchan_bw to platform specific value */
+		config.iovar = "mchan_bw";
+		config.param = MIRACAST_MCHAN_BW;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting apmdu to platform specific value */
+		config.iovar = "ampdu_mpdu";
+		config.param = MIRACAST_AMPDU_SIZE;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* FALLTROUGH */
+		/* Source mode shares most configurations with sink mode.
+		 * Fall through here to avoid code duplication
+		 */
+	case MIRACAST_MODE_SINK:
+		/* disable internal roaming */
+		config.iovar = "roam_off";
+		config.param = 1;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* tunr off pm */
+		val = 0;
+		config.iovar = NULL;
+		config.ioctl = WLC_GET_PM;
+		config.arg = &val;
+		config.len = sizeof(int);
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		break;
+	case MIRACAST_MODE_OFF:
+	default:
+		break;
+	}
+	miracast_cur_mode = mode;
+
+	return 0;
+
+resume:
+	DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	return ret;
+}
+
+#define NETLINK_OXYGEN     30
+#define AIBSS_BEACON_TIMEOUT	10
+
+static struct sock *nl_sk = NULL;
+
+static void wl_netlink_recv(struct sk_buff *skb)
+{
+	WL_ERR(("netlink_recv called\n"));
+}
+
+static int wl_netlink_init(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	struct netlink_kernel_cfg cfg = {
+		.input	= wl_netlink_recv,
+	};
+#endif
+
+	if (nl_sk != NULL) {
+		WL_ERR(("nl_sk already exist\n"));
+		return BCME_ERROR;
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN,
+		0, wl_netlink_recv, NULL, THIS_MODULE);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg);
+#else
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg);
+#endif
+
+	if (nl_sk == NULL) {
+		WL_ERR(("nl_sk is not ready\n"));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+static void wl_netlink_deinit(void)
+{
+	if (nl_sk) {
+		netlink_kernel_release(nl_sk);
+		nl_sk = NULL;
+	}
+}
+
+s32
+wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size)
+{
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh = NULL;
+	int ret = -1;
+
+	if (nl_sk == NULL) {
+		WL_ERR(("nl_sk was not initialized\n"));
+		goto nlmsg_failure;
+	}
+
+	skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC);
+	if (skb == NULL) {
+		WL_ERR(("failed to allocate memory\n"));
+		goto nlmsg_failure;
+	}
+
+	nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
+	if (nlh == NULL) {
+		WL_ERR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n",
+			skb_tailroom(skb), nlmsg_total_size(size)));
+		dev_kfree_skb(skb);
+		goto nlmsg_failure;
+	}
+
+	memcpy(nlmsg_data(nlh), data, size);
+	nlh->nlmsg_seq = seq;
+	nlh->nlmsg_type = type;
+
+	/* netlink_unicast() takes ownership of the skb and frees it itself. */
+	ret = netlink_unicast(nl_sk, skb, pid, 0);
+	WL_DBG(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
+
+nlmsg_failure:
+	return ret;
+}
+
+#ifdef WLAIBSS
+static int wl_android_set_ibss_txfail_event(struct net_device *dev, char *command, int total_len)
+{
+	int err = 0;
+	int retry = 0;
+	int pid = 0;
+	aibss_txfail_config_t txfail_config = {0, 0, 0, 0};
+	char smbuf[WLC_IOCTL_SMLEN];
+
+	if (sscanf(command, CMD_SETIBSSTXFAILEVENT " %d %d", &retry, &pid) <= 0) {
+		WL_ERR(("Failed to get Parameter from : %s\n", command));
+		return -1;
+	}
+
+	/* set pid, and if the event was happened, let's send a notification through netlink */
+	wl_cfg80211_set_txfail_pid(pid);
+
+	/* If retry value is 0, it disables the functionality for TX Fail. */
+	if (retry > 0) {
+		txfail_config.max_tx_retry = retry;
+		txfail_config.bcn_timeout = 0;	/* 0 : disable tx fail from beacon */
+	}
+	txfail_config.version = AIBSS_TXFAIL_CONFIG_VER_0;
+	txfail_config.len = sizeof(txfail_config);
+
+	err = wldev_iovar_setbuf(dev, "aibss_txfail_config", (void *) &txfail_config,
+		sizeof(aibss_txfail_config_t), smbuf, WLC_IOCTL_SMLEN, NULL);
+	WL_DBG(("retry=%d, pid=%d, err=%d\n", retry, pid, err));
+
+	return ((err == 0)?total_len:err);
+}
+
+static int wl_android_get_ibss_peer_info(struct net_device *dev, char *command,
+	int total_len, bool bAll)
+{
+	int error;
+	int bytes_written = 0;
+	void *buf = NULL;
+	bss_peer_list_info_t peer_list_info;
+	bss_peer_info_t *peer_info;
+	int i;
+	bool found = false;
+	struct ether_addr mac_ea;
+
+	WL_DBG(("get ibss peer info(%s)\n", bAll?"true":"false"));
+
+	if (!bAll) {
+		if (sscanf (command, "GETIBSSPEERINFO %02x:%02x:%02x:%02x:%02x:%02x",
+			(unsigned int *)&mac_ea.octet[0], (unsigned int *)&mac_ea.octet[1],
+			(unsigned int *)&mac_ea.octet[2], (unsigned int *)&mac_ea.octet[3],
+			(unsigned int *)&mac_ea.octet[4], (unsigned int *)&mac_ea.octet[5]) != 6) {
+			WL_DBG(("invalid MAC address\n"));
+			return -1;
+		}
+	}
+
+	if ((buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL)) == NULL) {
+		WL_ERR(("kmalloc failed\n"));
+		return -1;
+	}
+
+	error = wldev_iovar_getbuf(dev, "bss_peer_info", NULL, 0, buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(error)) {
+		WL_ERR(("could not get ibss peer info (%d)\n", error));
+		kfree(buf);
+		return -1;
+	}
+
+	memcpy(&peer_list_info, buf, sizeof(peer_list_info));
+	peer_list_info.version = htod16(peer_list_info.version);
+	peer_list_info.bss_peer_info_len = htod16(peer_list_info.bss_peer_info_len);
+	peer_list_info.count = htod32(peer_list_info.count);
+
+	WL_DBG(("ver:%d, len:%d, count:%d\n", peer_list_info.version,
+		peer_list_info.bss_peer_info_len, peer_list_info.count));
+
+	if (peer_list_info.count > 0) {
+		if (bAll)
+			bytes_written += sprintf(&command[bytes_written], "%u ",
+				peer_list_info.count);
+
+		peer_info = (bss_peer_info_t *) ((void *)buf + BSS_PEER_LIST_INFO_FIXED_LEN);
+
+
+		for (i = 0; i < peer_list_info.count; i++) {
+
+			WL_DBG(("index:%d rssi:%d, tx:%u, rx:%u\n", i, peer_info->rssi,
+				peer_info->tx_rate, peer_info->rx_rate));
+
+			if (!bAll &&
+				memcmp(&mac_ea, &peer_info->ea, sizeof(struct ether_addr)) == 0) {
+				found = true;
+			}
+
+			if (bAll || found) {
+				bytes_written += sprintf(&command[bytes_written], MACF,
+					ETHER_TO_MACF(peer_info->ea));
+				bytes_written += sprintf(&command[bytes_written], " %u %d ",
+					peer_info->tx_rate/1000, peer_info->rssi);
+			}
+
+			if (found)
+				break;
+
+			peer_info = (bss_peer_info_t *)((void *)peer_info+sizeof(bss_peer_info_t));
+		}
+	}
+	else {
+		WL_ERR(("could not get ibss peer info : no item\n"));
+	}
+	bytes_written += sprintf(&command[bytes_written], "%s", "\0");
+
+	WL_DBG(("command(%u):%s\n", total_len, command));
+	WL_DBG(("bytes_written:%d\n", bytes_written));
+
+	kfree(buf);
+	return bytes_written;
+}
+
+int wl_android_set_ibss_routetable(struct net_device *dev, char *command, int total_len)
+{
+
+	char *pcmd = command;
+	char *str = NULL;
+
+	ibss_route_tbl_t *route_tbl = NULL;
+	char *ioctl_buf = NULL;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	s32 err = BCME_OK;
+	uint32 route_tbl_len;
+	uint32 entries;
+	char *endptr;
+	uint32 i = 0;
+	struct ipv4_addr  dipaddr;
+	struct ether_addr ea;
+
+	route_tbl_len = sizeof(ibss_route_tbl_t) +
+		(MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t);
+	route_tbl = (ibss_route_tbl_t *)kzalloc(route_tbl_len, kflags);
+	if (!route_tbl) {
+		WL_ERR(("Route TBL alloc failed\n"));
+		return -ENOMEM;
+	}
+	ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (!ioctl_buf) {
+		WL_ERR(("ioctl memory alloc failed\n"));
+		if (route_tbl) {
+			kfree(route_tbl);
+		}
+		return -ENOMEM;
+	}
+	memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);
+
+	/* drop command */
+	str = bcmstrtok(&pcmd, " ", NULL);
+
+	/* get count */
+	str = bcmstrtok(&pcmd, " ",  NULL);
+	if (!str) {
+		WL_ERR(("Invalid number parameter %s\n", str));
+		err = -EINVAL;
+		goto exit;
+	}
+	entries = bcm_strtoul(str, &endptr, 0);
+	if (*endptr != '\0') {
+		WL_ERR(("Invalid number parameter %s\n", str));
+		err = -EINVAL;
+		goto exit;
+	}
+	WL_INFORM(("Routing table count:%d\n", entries));
+	route_tbl->num_entry = entries;
+
+	for (i = 0; i < entries; i++) {
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, &dipaddr)) {
+			WL_ERR(("Invalid ip string %s\n", str));
+			err = -EINVAL;
+			goto exit;
+		}
+
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_ether_atoe(str, &ea)) {
+			WL_ERR(("Invalid ethernet string %s\n", str));
+			err = -EINVAL;
+			goto exit;
+		}
+		bcopy(&dipaddr, &route_tbl->route_entry[i].ipv4_addr, IPV4_ADDR_LEN);
+		bcopy(&ea, &route_tbl->route_entry[i].nexthop, ETHER_ADDR_LEN);
+	}
+
+	route_tbl_len = sizeof(ibss_route_tbl_t) +
+		((!entries?0:(entries - 1)) * sizeof(ibss_route_entry_t));
+	err = wldev_iovar_setbuf(dev, "ibss_route_tbl",
+		route_tbl, route_tbl_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+	if (err != BCME_OK) {
+		WL_ERR(("Fail to set iovar %d\n", err));
+		err = -EINVAL;
+	}
+
+exit:
+	if (route_tbl)
+		kfree(route_tbl);
+	if (ioctl_buf)
+		kfree(ioctl_buf);
+	return err;
+
+}
+
+int
+wl_android_set_ibss_ampdu(struct net_device *dev, char *command, int total_len)
+{
+	char *pcmd = command;
+	char *str = NULL, *endptr = NULL;
+	struct ampdu_aggr aggr;
+	char smbuf[WLC_IOCTL_SMLEN];
+	int idx;
+	int err = 0;
+	int wme_AC2PRIO[AC_COUNT][2] = {
+		{PRIO_8021D_VO, PRIO_8021D_NC},		/* AC_VO - 3 */
+		{PRIO_8021D_CL, PRIO_8021D_VI},		/* AC_VI - 2 */
+		{PRIO_8021D_BK, PRIO_8021D_NONE},	/* AC_BK - 1 */
+		{PRIO_8021D_BE, PRIO_8021D_EE}};	/* AC_BE - 0 */
+
+	WL_DBG(("set ibss ampdu:%s\n", command));
+
+	memset(&aggr, 0, sizeof(aggr));
+	/* Cofigure all priorities */
+	aggr.conf_TID_bmap = NBITMASK(NUMPRIO);
+
+	/* acquire parameters */
+	/* drop command */
+	str = bcmstrtok(&pcmd, " ", NULL);
+
+	for (idx = 0; idx < AC_COUNT; idx++) {
+		bool on;
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str) {
+			WL_ERR(("Invalid parameter : %s\n", pcmd));
+			return -EINVAL;
+		}
+		on = bcm_strtoul(str, &endptr, 0) ? TRUE : FALSE;
+		if (*endptr != '\0') {
+			WL_ERR(("Invalid number format %s\n", str));
+			return -EINVAL;
+		}
+		if (on) {
+			setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][0]);
+			setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][1]);
+		}
+	}
+
+	err = wldev_iovar_setbuf(dev, "ampdu_txaggr", (void *)&aggr,
+	sizeof(aggr), smbuf, WLC_IOCTL_SMLEN, NULL);
+
+	return ((err == 0) ? total_len : err);
+}
+
+int wl_android_set_ibss_antenna(struct net_device *dev, char *command, int total_len)
+{
+	char *pcmd = command;
+	char *str = NULL;
+	int txchain, rxchain;
+	int err = 0;
+
+	WL_DBG(("set ibss antenna:%s\n", command));
+
+	/* acquire parameters */
+	/* drop command */
+	str = bcmstrtok(&pcmd, " ", NULL);
+
+	/* TX chain */
+	str = bcmstrtok(&pcmd, " ", NULL);
+	if (!str) {
+		WL_ERR(("Invalid parameter : %s\n", pcmd));
+		return -EINVAL;
+	}
+	txchain = bcm_atoi(str);
+
+	/* RX chain */
+	str = bcmstrtok(&pcmd, " ", NULL);
+	if (!str) {
+		WL_ERR(("Invalid parameter : %s\n", pcmd));
+		return -EINVAL;
+	}
+	rxchain = bcm_atoi(str);
+
+	err = wldev_iovar_setint(dev, "txchain", txchain);
+	if (err != 0)
+		return err;
+	err = wldev_iovar_setint(dev, "rxchain", rxchain);
+	return ((err == 0)?total_len:err);
+}
+#endif /* WLAIBSS */
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
+{
+	char 				buf[256];
+	const char 			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res 				= -1;
+	uint period_msec = 0;
+
+	if (extra == NULL)
+	{
+		 DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+		 return -1;
+	}
+	if (sscanf(extra, "%d", &period_msec) != 1)
+	{
+		 DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+		 return -EINVAL;
+	}
+	DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+	memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = period_msec;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0)
+	{
+		DHD_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res));
+	}
+	else
+	{
+		DHD_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res));
+	}
+
+	return res;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+#define PRIVATE_COMMAND_MAX_LEN	8192
+	int ret = 0;
+	char *command = NULL;
+	int bytes_written = 0;
+	android_wifi_priv_cmd priv_cmd;
+
+	net_os_wake_lock(net);
+
+	if (!ifr->ifr_data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_android_wifi_priv_cmd compat_priv_cmd;
+		if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+			sizeof(compat_android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+
+		}
+		priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+		priv_cmd.used_len = compat_priv_cmd.used_len;
+		priv_cmd.total_len = compat_priv_cmd.total_len;
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+		}
+	}
+	if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+		DHD_ERROR(("%s: too long priavte command\n", __FUNCTION__));
+		ret = -EINVAL;
+		goto exit;
+	}
+	command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
+	if (!command)
+	{
+		DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+	command[priv_cmd.total_len] = '\0';
+
+	DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+		DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+		bytes_written = wl_android_wifi_on(net);
+	}
+	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+		bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+	}
+
+	if (!g_wifi_on) {
+		DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+			__FUNCTION__, command, ifr->ifr_name));
+		ret = 0;
+		goto exit;
+	}
+
+	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+		bytes_written = wl_android_wifi_off(net);
+	}
+	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+		/* TBD: SCAN-ACTIVE */
+	}
+	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+		/* TBD: SCAN-PASSIVE */
+	}
+	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+		bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+		bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+	}
+#ifdef PKT_FILTER_SUPPORT
+	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 1);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 0);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+	}
+#endif /* PKT_FILTER_SUPPORT */
+	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+		/* TBD: BTCOEXSCAN-START */
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+		/* TBD: BTCOEXSCAN-STOP */
+	}
+	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+		void *dhdp = wl_cfg80211_get_dhdp();
+		bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+		uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+		if (mode == 1)
+			net_os_enable_packet_filter(net, 0); /* DHCP starts */
+		else
+			net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+		bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+#ifdef WL_HOST_BAND_MGMT
+		s32 ret = 0;
+		if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
+			if (ret == BCME_UNSUPPORTED) {
+				/* If roam_var is unsupported, fallback to the original method */
+				WL_ERR(("WL_HOST_BAND_MGMT defined, "
+					"but roam_band iovar unsupported in the firmware\n"));
+			} else {
+				bytes_written = -1;
+				goto exit;
+			}
+		}
+		if ((band == WLC_BAND_AUTO) || (ret == BCME_UNSUPPORTED))
+			bytes_written = wldev_set_band(net, band);
+#else
+		bytes_written = wldev_set_band(net, band);
+#endif /* WL_HOST_BAND_MGMT */
+	}
+	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+		bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
+	else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+		char *country_code = command + strlen(CMD_COUNTRY) + 1;
+		bytes_written = wldev_set_country(net, country_code, true, true);
+	}
+#endif /* WL_CFG80211 */
+
+
+#ifdef PNO_SUPPORT
+	else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+		bytes_written = dhd_dev_pno_stop_for_ssid(net);
+	}
+#ifndef WL_SCHED_SCAN
+	else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+		bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+	}
+#endif /* !WL_SCHED_SCAN */
+	else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+		int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+		bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net);
+	}
+	else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) {
+		bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len);
+	}
+#endif /* PNO_SUPPORT */
+	else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+		bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+		int skip = strlen(CMD_P2P_SET_NOA) + 1;
+		bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#ifdef WL_SDO
+	else if (strnicmp(command, CMD_P2P_SD_OFFLOAD, strlen(CMD_P2P_SD_OFFLOAD)) == 0) {
+		u8 *buf = command;
+		u8 *cmd_id = NULL;
+		int len;
+
+		cmd_id = strsep((char **)&buf, " ");
+		/* if buf == NULL, means no arg */
+		if (buf == NULL)
+			len = 0;
+		else
+			len = strlen(buf);
+
+		bytes_written = wl_cfg80211_sd_offload(net, cmd_id, buf, len);
+	}
+#endif /* WL_SDO */
+#ifdef WL_NAN
+	else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) {
+		bytes_written = wl_cfg80211_nan_cmd_handler(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_NAN */
+#if !defined WL_ENABLE_P2P_IF
+	else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+		bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+		int skip = strlen(CMD_P2P_SET_PS) + 1;
+		bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+		strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+		int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+		bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+			priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+	}
+#ifdef WLFBT
+	else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) {
+		wl_cfg80211_get_fbt_key(command);
+		bytes_written = FBT_KEYLEN;
+	}
+#endif /* WLFBT */
+#endif /* WL_CFG80211 */
+	else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0)
+		bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0)
+		bytes_written = wl_android_okc_enable(net, command, priv_cmd.total_len);
+#ifdef BCMCCX
+	else if (strnicmp(command, CMD_GETCCKM_RN, strlen(CMD_GETCCKM_RN)) == 0) {
+		bytes_written = wl_android_get_cckm_rn(net, command);
+	}
+	else if (strnicmp(command, CMD_SETCCKM_KRK, strlen(CMD_SETCCKM_KRK)) == 0) {
+		bytes_written = wl_android_set_cckm_krk(net, command);
+	}
+	else if (strnicmp(command, CMD_GET_ASSOC_RES_IES, strlen(CMD_GET_ASSOC_RES_IES)) == 0) {
+		bytes_written = wl_android_get_assoc_res_ies(net, command);
+	}
+#endif /* BCMCCX */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+	else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
+		strlen(CMD_GET_BEST_CHANNELS)) == 0) {
+		bytes_written = wl_cfg80211_get_best_channels(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+	else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
+		int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
+		wl_android_set_mac_address_filter(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
+		bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
+#if defined(BCMFW_ROAM_ENABLE)
+	else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+		bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+	}
+#endif /* BCMFW_ROAM_ENABLE */
+	else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
+		bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+		bytes_written = wl_android_set_ibss_beacon_ouidata(net,
+		command, priv_cmd.total_len);
+#ifdef WLAIBSS
+	else if (strnicmp(command, CMD_SETIBSSTXFAILEVENT,
+		strlen(CMD_SETIBSSTXFAILEVENT)) == 0)
+		bytes_written = wl_android_set_ibss_txfail_event(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO_ALL,
+		strlen(CMD_GET_IBSS_PEER_INFO_ALL)) == 0)
+		bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+			TRUE);
+	else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO,
+		strlen(CMD_GET_IBSS_PEER_INFO)) == 0)
+		bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+			FALSE);
+	else if (strnicmp(command, CMD_SETIBSSROUTETABLE,
+		strlen(CMD_SETIBSSROUTETABLE)) == 0)
+		bytes_written = wl_android_set_ibss_routetable(net, command,
+			priv_cmd.total_len);
+	else if (strnicmp(command, CMD_SETIBSSAMPDU, strlen(CMD_SETIBSSAMPDU)) == 0)
+		bytes_written = wl_android_set_ibss_ampdu(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_SETIBSSANTENNAMODE, strlen(CMD_SETIBSSANTENNAMODE)) == 0)
+		bytes_written = wl_android_set_ibss_antenna(net, command, priv_cmd.total_len);
+#endif /* WLAIBSS */
+	else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+		int skip = strlen(CMD_KEEP_ALIVE) + 1;
+		bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+		int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+		bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+	}
+	else {
+		DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+		snprintf(command, 3, "OK");
+		bytes_written = strlen("OK");
+	}
+
+	if (bytes_written >= 0) {
+		if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+			command[0] = '\0';
+		if (bytes_written >= priv_cmd.total_len) {
+			DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+			bytes_written = priv_cmd.total_len;
+		} else {
+			bytes_written++;
+		}
+		priv_cmd.used_len = bytes_written;
+		if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+			DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+			ret = -EFAULT;
+		}
+	}
+	else {
+		ret = bytes_written;
+	}
+
+exit:
+	net_os_wake_unlock(net);
+	if (command) {
+		kfree(command);
+	}
+
+	return ret;
+}
+
+int wl_android_init(void)
+{
+	int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+	dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#if defined(CUSTOMER_HW2)
+	if (!iface_name[0]) {
+		memset(iface_name, 0, IFNAMSIZ);
+		bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+	}
+#endif 
+
+#ifdef WL_GENL
+	wl_genl_init();
+#endif
+	wl_netlink_init();
+
+	return ret;
+}
+
+int wl_android_exit(void)
+{
+	int ret = 0;
+	struct io_cfg *cur, *q;
+
+#ifdef WL_GENL
+	wl_genl_deinit();
+#endif /* WL_GENL */
+	wl_netlink_deinit();
+
+	list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+		list_del(&cur->list);
+		kfree(cur);
+	}
+
+	return ret;
+}
+
+void wl_android_post_init(void)
+{
+
+#ifdef ENABLE_4335BT_WAR
+	bcm_bt_unlock(lock_cookie_wifi);
+	printf("%s: btlock released\n", __FUNCTION__);
+#endif /* ENABLE_4335BT_WAR */
+
+	if (!dhd_download_fw_on_driverload)
+		g_wifi_on = FALSE;
+}
+
+#ifdef WL_GENL
+/* Generic Netlink Initializaiton */
+static int wl_genl_init(void)
+{
+	int ret;
+
+	WL_DBG(("GEN Netlink Init\n\n"));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+	/* register new family */
+	ret = genl_register_family(&wl_genl_family);
+	if (ret != 0)
+		goto failure;
+
+	/* register functions (commands) of the new family */
+	ret = genl_register_ops(&wl_genl_family, &wl_genl_ops);
+	if (ret != 0) {
+		WL_ERR(("register ops failed: %i\n", ret));
+		genl_unregister_family(&wl_genl_family);
+		goto failure;
+	}
+
+	ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
+#else
+	ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+	if (ret != 0) {
+		WL_ERR(("register mc_group failed: %i\n", ret));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+		genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
+#endif
+		genl_unregister_family(&wl_genl_family);
+		goto failure;
+	}
+
+	return 0;
+
+failure:
+	WL_ERR(("Registering Netlink failed!!\n"));
+	return -1;
+}
+
+/* Generic netlink deinit */
+static int wl_genl_deinit(void)
+{
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+	if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0)
+		WL_ERR(("Unregister wl_genl_ops failed\n"));
+#endif
+	if (genl_unregister_family(&wl_genl_family) < 0)
+		WL_ERR(("Unregister wl_genl_ops failed\n"));
+
+	return 0;
+}
+
+s32 wl_event_to_bcm_event(u16 event_type)
+{
+	u16 event = -1;
+
+	switch (event_type) {
+		case WLC_E_SERVICE_FOUND:
+			event = BCM_E_SVC_FOUND;
+			break;
+		case WLC_E_P2PO_ADD_DEVICE:
+			event = BCM_E_DEV_FOUND;
+			break;
+		case WLC_E_P2PO_DEL_DEVICE:
+			event = BCM_E_DEV_LOST;
+			break;
+	/* Above events are supported from BCM Supp ver 47 Onwards */
+#ifdef BT_WIFI_HANDOVER
+		case WLC_E_BT_WIFI_HANDOVER_REQ:
+			event = BCM_E_DEV_BT_WIFI_HO_REQ;
+			break;
+#endif /* BT_WIFI_HANDOVER */
+
+		default:
+			WL_ERR(("Event not supported\n"));
+	}
+
+	return event;
+}
+
+s32
+wl_genl_send_msg(
+	struct net_device *ndev,
+	u32 event_type,
+	u8 *buf,
+	u16 len,
+	u8 *subhdr,
+	u16 subhdr_len)
+{
+	int ret = 0;
+	struct sk_buff *skb = NULL;
+	void *msg;
+	u32 attr_type = 0;
+	bcm_event_hdr_t *hdr = NULL;
+	int mcast = 1; /* By default sent as mutlicast type */
+	int pid = 0;
+	u8 *ptr = NULL, *p = NULL;
+	u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+
+	WL_DBG(("Enter \n"));
+
+	/* Decide between STRING event and Data event */
+	if (event_type == 0)
+		attr_type = BCM_GENL_ATTR_STRING;
+	else
+		attr_type = BCM_GENL_ATTR_MSG;
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
+	if (skb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
+	if (msg == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+
+	if (attr_type == BCM_GENL_ATTR_STRING) {
+		/* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
+		 * make sure it is null terminated
+		 */
+		if (subhdr || subhdr_len) {
+			WL_ERR(("No sub hdr support for the ATTR STRING type \n"));
+			ret =  -EINVAL;
+			goto out;
+		}
+
+		ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
+		if (ret != 0) {
+			WL_ERR(("nla_put_string failed\n"));
+			goto out;
+		}
+	} else {
+		/* ATTR_MSG */
+
+		/* Create a single buffer for all */
+		p = ptr = kzalloc(tot_len, kflags);
+		if (!ptr) {
+			ret = -ENOMEM;
+			WL_ERR(("ENOMEM!!\n"));
+			goto out;
+		}
+
+		/* Include the bcm event header */
+		hdr = (bcm_event_hdr_t *)ptr;
+		hdr->event_type = wl_event_to_bcm_event(event_type);
+		hdr->len = len + subhdr_len;
+		ptr += sizeof(bcm_event_hdr_t);
+
+		/* Copy subhdr (if any) */
+		if (subhdr && subhdr_len) {
+			memcpy(ptr, subhdr, subhdr_len);
+			ptr += subhdr_len;
+		}
+
+		/* Copy the data */
+		if (buf && len) {
+			memcpy(ptr, buf, len);
+		}
+
+		ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
+		if (ret != 0) {
+			WL_ERR(("nla_put_string failed\n"));
+			goto out;
+		}
+	}
+
+	if (mcast) {
+		int err = 0;
+		/* finalize the message */
+		genlmsg_end(skb, msg);
+		/* NETLINK_CB(skb).dst_group = 1; */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+		if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
+#else
+		if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0)
+#endif
+			WL_ERR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
+				attr_type, err));
+		else
+			WL_DBG(("Multicast msg sent successfully. attr_type:%d len:%d \n",
+				attr_type, tot_len));
+	} else {
+		NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */
+
+		/* finalize the message */
+		genlmsg_end(skb, msg);
+
+		/* send the message back */
+		if (genlmsg_unicast(&init_net, skb, pid) < 0)
+			WL_ERR(("genlmsg_unicast failed\n"));
+	}
+
+out:
+	if (p)
+		kfree(p);
+	if (ret)
+		nlmsg_free(skb);
+
+	return ret;
+}
+
+static s32
+wl_genl_handle_msg(
+	struct sk_buff *skb,
+	struct genl_info *info)
+{
+	struct nlattr *na;
+	u8 *data = NULL;
+
+	WL_DBG(("Enter \n"));
+
+	if (info == NULL) {
+		return -EINVAL;
+	}
+
+	na = info->attrs[BCM_GENL_ATTR_MSG];
+	if (!na) {
+		WL_ERR(("nlattribute NULL\n"));
+		return -EINVAL;
+	}
+
+	data = (char *)nla_data(na);
+	if (!data) {
+		WL_ERR(("Invalid data\n"));
+		return -EINVAL;
+	} else {
+		/* Handle the data */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || defined(WL_COMPAT_WIRELESS)
+		WL_DBG(("%s: Data received from pid (%d) \n", __func__,
+			info->snd_pid));
+#else
+		WL_DBG(("%s: Data received from pid (%d) \n", __func__,
+			info->snd_portid));
+#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */
+	}
+
+	return 0;
+}
+#endif /* WL_GENL */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 0000000000000000000000000000000000000000..87e672fc9c2bf0a89ea27f1dc2c70658b8a16b17
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,98 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_android.h 487838 2014-06-27 05:51:44Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
+ * automatically
+ */
+#if defined(WL_SDO) || defined(BT_WIFI_HANDOVER) || defined(WL_NAN)
+#define WL_GENL
+#endif
+
+
+#ifdef WL_GENL
+#include <net/genetlink.h>
+#endif
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#ifdef WL_GENL
+typedef struct bcm_event_hdr {
+	u16 event_type;
+	u16 len;
+} bcm_event_hdr_t;
+
+/* attributes (variables): the index in this enum is used as a reference for the type,
+ *             userspace application has to indicate the corresponding type
+ *             the policy is used for security considerations
+ */
+enum {
+	BCM_GENL_ATTR_UNSPEC,
+	BCM_GENL_ATTR_STRING,
+	BCM_GENL_ATTR_MSG,
+	__BCM_GENL_ATTR_MAX
+};
+#define BCM_GENL_ATTR_MAX (__BCM_GENL_ATTR_MAX - 1)
+
+/* commands: enumeration of all commands (functions),
+ * used by userspace application to identify command to be ececuted
+ */
+enum {
+	BCM_GENL_CMD_UNSPEC,
+	BCM_GENL_CMD_MSG,
+	__BCM_GENL_CMD_MAX
+};
+#define BCM_GENL_CMD_MAX (__BCM_GENL_CMD_MAX - 1)
+
+/* Enum values used by the BCM supplicant to identify the events */
+enum {
+	BCM_E_UNSPEC,
+	BCM_E_SVC_FOUND,
+	BCM_E_DEV_FOUND,
+	BCM_E_DEV_LOST,
+	BCM_E_DEV_BT_WIFI_HO_REQ,
+	BCM_E_MAX
+};
+
+s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type,
+	u8 *string, u16 len, u8 *hdr, u16 hdrlen);
+#endif /* WL_GENL */
+s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size);
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED   0
+#define MACLIST_MODE_DENY       1
+#define MACLIST_MODE_ALLOW      2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST    64
+
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT        10
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 0000000000000000000000000000000000000000..13f6bbb77515039258dfb4617e31bc84a96bf7ff
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,14454 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg80211.c 491569 2014-07-16 21:28:40Z $
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+#include <wl_cfgvendor.h>
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef WL11U
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \
+	according to Kernel version and is supported only in Android-JB
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1  0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4     0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+#ifdef BCMWAPI_WPI
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+static struct device *cfg80211_parent_dev = NULL;
+/* g_bcm_cfg should be static. Do not change */
+static struct bcm_cfg80211 *g_bcm_cfg = NULL;
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAX_WAIT_TIME 1500
+#ifdef WLAIBSS_MCHAN
+#define IBSS_IF_NAME "ibss%d"
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef VSDB
+/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
+#define DEFAULT_SLEEP_TIME_VSDB		120
+#define OFF_CHAN_TIME_THRESHOLD_MS	200
+#define AF_RETRY_DELAY_TIME			40
+
+/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)	\
+	do {	\
+		if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||	\
+			wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {	\
+			OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB);			\
+		}	\
+	} while (0)
+#else /* VSDB */
+/* if not VSDB, do nothing */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
+#endif /* VSDB */
+
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+	(wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \
+		(e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE))
+
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#define WLAN_EID_SSID	0
+#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+
+#ifdef WLAIBSS
+enum abiss_event_type {
+	AIBSS_EVENT_TXFAIL
+};
+#endif
+
+enum rmc_event_type {
+	RMC_EVENT_NONE,
+	RMC_EVENT_LEADER_CHECK_FAIL
+};
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+	.n_reg_rules = 4,
+	.alpha2 =  "99",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..11 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* If any */
+		/* IEEE 802.11 channel 14 - Only JP enables
+		 * this and for 802.11b only
+		 */
+		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		/* IEEE 802.11a, channel 36..64 */
+		REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+		/* IEEE 802.11a, channel 100..165 */
+		REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+/*
+ * Possible interface combinations supported by driver
+ *
+ * ADHOC Mode     - #ADHOC <= 1 on channels = 1
+ * SoftAP Mode    - #AP <= 1 on channels = 1
+ * STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1
+ *                  on channels = 2
+ */
+static const struct ieee80211_iface_limit common_if_limits[] = {
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+	/*
+	 * During P2P-GO removal, P2P-GO is first changed to STA and later only
+	 * removed. So setting maximum possible number of STA interfaces according
+	 * to kernel version.
+	 *
+	 * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+	 * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
+	 */
+#ifdef WL_ENABLE_P2P_IF
+	.max = 3,
+#else
+	.max = 2,
+#endif /* WL_ENABLE_P2P_IF */
+	.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+	.max = 2,
+	.types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
+#define NUM_DIFF_CHANNELS 2
+#endif
+static const struct ieee80211_iface_combination
+common_iface_combinations[] = {
+	{
+	.num_different_channels = NUM_DIFF_CHANNELS,
+	.max_interfaces = 4,
+	.limits = common_if_limits,
+	.n_limits = ARRAY_SIZE(common_if_limits),
+	},
+};
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
+#ifdef BCMCCX
+#ifndef WLAN_AKM_SUITE_CCKM
+#define WLAN_AKM_SUITE_CCKM 0x00409600
+#endif
+#define DOT11_LEAP_AUTH	0x80 /* LEAP auth frame paylod constants */
+#endif /* BCMCCX */
+
+#ifdef MFP
+#define WL_AKM_SUITE_MFP_1X  0x000FAC05
+#define WL_AKM_SUITE_MFP_PSK 0x000FAC06
+#endif /* MFP */
+
+#ifndef IBSS_COALESCE_ALLOWED
+#define IBSS_COALESCE_ALLOWED 0
+#endif
+
+#ifndef IBSS_INITIAL_SCAN_ALLOWED
+#define IBSS_INITIAL_SCAN_ALLOWED 0
+#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
+static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* WLAIBSS_MCHAN */
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+	struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac,
+	struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+	struct net_device *dev, bool enabled,
+	s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm);
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm);
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	void *cookie, void (*callback) (void *cookie,
+	struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev,	u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie);
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+	struct net_device *ndev, u8* mac_addr);
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+	struct net_device *dev);
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, bool aborted, bool fw_abort);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper);
+#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
+		 iface_type, u8 *mac_addr, const char *name);
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
+	const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#ifdef BT_WIFI_HANDOVER
+static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef WL_SCHED_SCAN
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WL_SCHED_SCAN */
+#ifdef PNO_SUPPORT
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set);
+#ifdef WL_SDO
+static s32 wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif
+
+#ifdef WLTDLS
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * cfg profile utilities
+ */
+static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+        struct cfg80211_connect_params *sme);
+#endif
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+	struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct bcm_cfg80211 *cfg);
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa);
+#endif
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len);
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len);
+#endif /* WL11U */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+s32 wl_cfg80211_channel_to_freq(u32 channel);
+
+#if defined(DHCP_SCAN_SUPPRESS)
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static void wl_cfg80211_scan_supp_timerfunc(ulong data);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr,
+	struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * bcm_cfg80211 memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
+	struct net_device *ndev);
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev);
+
+int wl_cfg80211_get_ioctl_version(void);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+#ifdef DEBUGFS_CFG80211
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
+#endif
+
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
+	int nprobes, int *out_params_size);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef WL_CFG80211_ACL
+/* ACL */
+static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl);
+#endif /* WL_CFG80211_ACL */
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid);
+
+#ifdef WL_SDO
+s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
+#define MAX_SDO_PROTO 5
+wl_sdo_proto_t wl_sdo_protos [] = {
+	{ "all", SVC_RPOTYPE_ALL },
+	{ "upnp", SVC_RPOTYPE_UPNP },
+	{ "bonjour", SVC_RPOTYPE_BONJOUR },
+	{ "wsd", SVC_RPOTYPE_WSD },
+	{ "vendor", SVC_RPOTYPE_VENDOR },
+};
+#endif
+static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
+	WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv)						\
+do {									\
+	struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv);       	\
+	if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) {	\
+		WL_INFORM(("device is not ready\n"));			\
+		return -EIO;						\
+	}								\
+} while (0)
+
+#ifdef RSSI_OFFSET
+static s32 wl_rssi_offset(s32 rssi)
+{
+	rssi += RSSI_OFFSET;
+	if (rssi > 0)
+		rssi = 0;
+	return rssi;
+}
+#else
+#define wl_rssi_offset(x)	x
+#endif
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || 			\
+				 (akm) == RSN_AKM_UNSPECIFIED || 	\
+				 (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX	50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+	"SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+	"DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+	"REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+	"BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+	"TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+	"EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+	"BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+	"PFN_NET_LOST",
+	"RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+	"IBSS_ASSOC",
+	"RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+	"PROBREQ_MSG",
+	"SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+	"EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+	"UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+	"WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+	"RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+	"ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+	"WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+	"WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+	"WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+	"WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+	"WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif				/* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) {			\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel, _flags) {				\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{								\
+		.bitrate	= RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value	= (_rateid),			    \
+		.flags	  = (_flags),			     \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(DOT11_RATE_1M, 0),
+	RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_6M, 0),
+	RATETAB_ENT(DOT11_RATE_9M, 0),
+	RATETAB_ENT(DOT11_RATE_12M, 0),
+	RATETAB_ENT(DOT11_RATE_18M, 0),
+	RATETAB_ENT(DOT11_RATE_24M, 0),
+	RATETAB_ENT(DOT11_RATE_36M, 0),
+	RATETAB_ENT(DOT11_RATE_48M, 0),
+	RATETAB_ENT(DOT11_RATE_54M, 0)
+};
+
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size	8
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size	12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+	CHAN5G(34, 0), CHAN5G(36, 0),
+	CHAN5G(38, 0), CHAN5G(40, 0),
+	CHAN5G(42, 0), CHAN5G(44, 0),
+	CHAN5G(46, 0), CHAN5G(48, 0),
+	CHAN5G(52, 0), CHAN5G(56, 0),
+	CHAN5G(60, 0), CHAN5G(64, 0),
+	CHAN5G(100, 0), CHAN5G(104, 0),
+	CHAN5G(108, 0), CHAN5G(112, 0),
+	CHAN5G(116, 0), CHAN5G(120, 0),
+	CHAN5G(124, 0), CHAN5G(128, 0),
+	CHAN5G(132, 0), CHAN5G(136, 0),
+	CHAN5G(140, 0), CHAN5G(144, 0),
+	CHAN5G(149, 0),	CHAN5G(153, 0),
+	CHAN5G(157, 0),	CHAN5G(161, 0),
+	CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.channels = __wl_2ghz_channels,
+	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_a_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC,
+#ifdef BCMWAPI_WPI
+	WLAN_CIPHER_SUITE_SMS4,
+#endif
+#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK)
+	WLAN_CIPHER_SUITE_PMK,
+#endif
+};
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * The firmware code required for this feature to work is currently under
+ * BCMINTERNAL flag. In future if this is to enabled we need to bring the
+ * required firmware code out of the BCMINTERNAL flag.
+ */
+struct wl_dump_survey {
+	u32 obss;
+	u32 ibss;
+	u32 no_ctg;
+	u32 no_pckt;
+	u32 tx;
+	u32 idle;
+};
+#endif /* WL_SUPPORT_ACS */
+
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+static int maxrxpktglom = 0;
+#endif
+
+/* IOCtl version read from targeted driver */
+static int ioctl_version;
+#ifdef DEBUGFS_CFG80211
+#define S_SUBLOGLEVEL 20
+static const struct {
+	u32 log_level;
+	char *sublogname;
+} sublogname_map[] = {
+	{WL_DBG_ERR, "ERR"},
+	{WL_DBG_INFO, "INFO"},
+	{WL_DBG_DBG, "DBG"},
+	{WL_DBG_SCAN, "SCAN"},
+	{WL_DBG_TRACE, "TRACE"},
+	{WL_DBG_P2P_ACTION, "P2PACTION"}
+};
+#endif
+
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove,
+	enum wl_handler_del_type type)
+{
+	if (cfg == NULL)
+		return;
+
+	if (cfg->pm_enable_work_on) {
+		if (add_remove) {
+			schedule_delayed_work(&cfg->pm_enable_work,
+				msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+		} else {
+			cancel_delayed_work_sync(&cfg->pm_enable_work);
+			switch (type) {
+				case WL_HANDLER_MAINTAIN:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+					break;
+				case WL_HANDLER_PEND:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2));
+					break;
+				case WL_HANDLER_DEL:
+				default:
+					cfg->pm_enable_work_on = false;
+					break;
+			}
+		}
+	}
+}
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		WL_ERR((
+		        "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+
+	chanspec_t chanspec;
+
+	chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
+
+	chanspec |= WL_CHANSPEC_BW_20;
+	chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+	return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+
+/*
+ * convert ASCII string to MAC address (colon-delimited format)
+ * eg: 00:11:22:33:44:55
+ */
+int
+wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n)
+{
+	char *c = NULL;
+	int count = 0;
+
+	memset(n, 0, ETHER_ADDR_LEN);
+	for (;;) {
+		n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
+		if (!*c++ || count == ETHER_ADDR_LEN)
+			break;
+		a = c;
+	}
+	return (count == ETHER_ADDR_LEN);
+}
+
+/* convert hex string buffer to binary */
+int
+wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str)
+{
+	int count, slen;
+	int hvalue;
+	char tmp[3] = {0};
+	char *ptr = str, *endp = NULL;
+
+	if (!data || !str || !dlen) {
+		WL_DBG((" passed buffer is empty \n"));
+		return 0;
+	}
+
+	slen = strlen(str);
+	if (dlen * 2 < slen) {
+		WL_DBG((" destination buffer too short \n"));
+		return 0;
+	}
+
+	if (slen % 2) {
+		WL_DBG((" source buffer is of odd length \n"));
+		return 0;
+	}
+
+	for (count = 0; count < slen; count += 2) {
+		memcpy(tmp, ptr, 2);
+		hvalue = simple_strtol(tmp, &endp, 16);
+		if (*endp != '\0') {
+			WL_DBG((" non hexadecimal character encountered \n"));
+			return 0;
+		}
+		*data++ = (unsigned char)hvalue;
+		ptr += 2;
+	}
+
+	return (slen / 2);
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_ADHOC] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_AP] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_AP_VLAN] = {
+		/* copy AP */
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+static void
+wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+	#define WPS_IE_FIXED_LEN 6
+	u16 len;
+	u8 *subel = NULL;
+	u16 subelt_id;
+	u16 subelt_len;
+	u16 val;
+	u8 *valptr = (uint8*) &val;
+	if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+		WL_ERR(("invalid argument : NULL\n"));
+		return;
+	}
+	len = (u16)wps_ie[TLV_LEN_OFF];
+
+	if (len > wps_ie_len) {
+		WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+		return;
+	}
+	WL_DBG(("wps_ie len=%d\n", len));
+	len -= 4;	/* for the WPS IE's OUI, oui_type fields */
+	subel = wps_ie + WPS_IE_FIXED_LEN;
+	while (len >= 4) {		/* must have attr id, attr len fields */
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_id = HTON16(val);
+
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_len = HTON16(val);
+
+		len -= 4;			/* for the attr id, attr len fields */
+		len -= subelt_len;	/* for the remaining fields in this attribute */
+		WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+			subel, subelt_id, subelt_len));
+
+		if (subelt_id == WPS_ID_VERSION) {
+			WL_DBG(("  attr WPS_ID_VERSION: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_REQ_TYPE) {
+			WL_DBG(("  attr WPS_ID_REQ_TYPE: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_DEVICE_NAME) {
+			char devname[100];
+			memcpy(devname, subel, subelt_len);
+			devname[subelt_len] = '\0';
+			WL_DBG(("  attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+				devname, subelt_len));
+		} else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+			*pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+		} else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+				": cat=%u\n", HTON16(val)));
+		} else {
+			WL_DBG(("  unknown attr 0x%x\n", subelt_id));
+		}
+
+		subel += subelt_len;
+	}
+}
+
+s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm)
+{
+	s32 err = 0;
+	s32 disable = 0;
+	s32 txpwrqdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	disable = htod32(disable);
+	err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		return err;
+	}
+
+	if (dbm > 0xffff)
+		dbm = 0xffff;
+	txpwrqdbm = dbm * 4;
+#ifdef SUPPORT_WL_TXPOWER
+	if (type == NL80211_TX_POWER_AUTOMATIC)
+		txpwrqdbm = 127;
+	else
+		txpwrqdbm |= WL_TXPWR_OVERRIDE;
+#endif /* SUPPORT_WL_TXPOWER */
+	err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+		sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (unlikely(err))
+		WL_ERR(("qtxpower error (%d)\n", err));
+	else
+		WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+	return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+	s32 err = 0;
+	s32 txpwrdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
+	txpwrdbm = dtoh32(txpwrdbm);
+	*dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+	WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+	return err;
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+	chanspec_t chspec;
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr bssid;
+	struct wl_bss_info *bss = NULL;
+
+	if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
+		/* STA interface is not associated. So start the new interface on a temp
+		 * channel . Later proper channel will be applied by the above framework
+		 * via set_channel (cfg80211 API).
+		 */
+		WL_DBG(("Not associated. Return a temp channel. \n"));
+		return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf,
+		WL_EXTRA_BUF_MAX, false))) {
+			WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+			chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+	else {
+			bss = (struct wl_bss_info *) (cfg->extra_buf + 4);
+			chspec =  bss->chanspec;
+
+			WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
+	}
+	return chspec;
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_monitor_if(char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+	WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+	return ERR_PTR(-EOPNOTSUPP);
+#else
+	struct net_device* ndev = NULL;
+
+	dhd_add_monitor(name, &ndev);
+	WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+	return ndev_to_cfgdev(ndev);
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	const char *name,
+#else
+	char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 err;
+	s32 timeout = -1;
+	s32 wlif_type = -1;
+	s32 mode = 0;
+	s32 val = 0;
+	s32 dhd_mode = 0;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev;
+	struct net_device *new_ndev;
+	struct ether_addr primary_mac;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	s32 up = 1;
+	dhd_pub_t *dhd;
+	bool enabled;
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd = (dhd_pub_t *)(cfg->pub);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+
+	/* Use primary I/F for sending cmds down to firmware */
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+		WL_ERR(("device is not ready\n"));
+		return ERR_PTR(-ENODEV);
+	}
+
+	WL_DBG(("if name: %s, type: %d\n", name, type));
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+#ifdef WLAIBSS_MCHAN
+		return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+#endif /* WLAIBSS_MCHAN */
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		WL_ERR(("Unsupported interface type\n"));
+		mode = WL_MODE_IBSS;
+		return NULL;
+	case NL80211_IFTYPE_MONITOR:
+		return wl_cfg80211_add_monitor_if((char *)name);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return wl_cfgp2p_add_p2p_disc_if(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	case NL80211_IFTYPE_STATION:
+#ifdef DUAL_STA
+#ifdef WLAIBSS_MCHAN
+		if (cfg->ibss_cfgdev) {
+			WL_ERR(("AIBSS is already operational. "
+					" AIBSS & DUALSTA can't be used together \n"));
+			return NULL;
+		}
+#endif /* WLAIBSS_MCHAN */
+		if (!name) {
+			WL_ERR(("Interface name not provided \n"));
+			return NULL;
+		}
+		return wl_cfg80211_create_iface(cfg->wdev->wiphy,
+			NL80211_IFTYPE_STATION, NULL, name);
+#endif /* DUAL_STA */
+	case NL80211_IFTYPE_P2P_CLIENT:
+		wlif_type = WL_P2P_IF_CLIENT;
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		wlif_type = WL_P2P_IF_GO;
+		mode = WL_MODE_AP;
+		break;
+	default:
+		WL_ERR(("Unsupported interface type\n"));
+		return NULL;
+		break;
+	}
+
+	if (!name) {
+		WL_ERR(("name is NULL\n"));
+		return NULL;
+	}
+	if (cfg->p2p_supported && (wlif_type != -1)) {
+		ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!dhd)
+			return ERR_PTR(-ENODEV);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+		if (!cfg->p2p)
+			return ERR_PTR(-ENODEV);
+
+		if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+			p2p_on(cfg) = true;
+			wl_cfgp2p_set_firm_p2p(cfg);
+			wl_cfgp2p_init_discovery(cfg);
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac,
+				&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		}
+
+		memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
+		strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+		wl_cfg80211_scan_abort(cfg);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!cfg->wlfc_on && !disable_proptx) {
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_init(dhd);
+				err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true);
+				if (err < 0)
+					WL_ERR(("WLC_UP return err:%d\n", err));
+			}
+			cfg->wlfc_on = true;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+		/* In concurrency case, STA may be already associated in a particular channel.
+		 * so retrieve the current channel of primary interface and then start the virtual
+		 * interface on that.
+		 */
+		 chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+		/* For P2P mode, use P2P-specific driver features to create the
+		 * bss: "cfg p2p_ifadd"
+		 */
+		wl_set_p2p_status(cfg, IF_ADDING);
+		memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+		if (wlif_type == WL_P2P_IF_GO)
+			wldev_iovar_setint(primary_ndev, "mpc", 0);
+		err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+		if (unlikely(err)) {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual iface add failed (%d) \n", err));
+			return ERR_PTR(-ENOMEM);
+		}
+
+		timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+			(wl_get_p2p_status(cfg, IF_ADDING) == false),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+
+		if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+			struct wireless_dev *vwdev;
+			int pm_mode = PM_ENABLE;
+			wl_if_event_info *event = &cfg->if_event_info;
+
+			/* IF_ADD event has come back, we can proceed to to register
+			 * the new interface now, use the interface name provided by caller (thus
+			 * ignore the one from wlc)
+			 */
+			strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
+			new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+				event->mac, event->bssidx);
+			if (new_ndev == NULL)
+				goto fail;
+
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
+			vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+			if (unlikely(!vwdev)) {
+				WL_ERR(("Could not allocate wireless device\n"));
+				goto fail;
+			}
+			vwdev->wiphy = cfg->wdev->wiphy;
+			WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+			vwdev->iftype = type;
+			vwdev->netdev = new_ndev;
+			new_ndev->ieee80211_ptr = vwdev;
+			SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+			wl_set_drv_status(cfg, READY, new_ndev);
+			cfg->p2p->vif_created = true;
+			wl_set_mode_by_netdev(cfg, new_ndev, mode);
+
+			if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+				wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+				goto fail;
+			}
+			wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
+			val = 1;
+			/* Disable firmware roaming for P2P interface  */
+			wldev_iovar_setint(new_ndev, "roam_off", val);
+
+			if (mode != WL_MODE_AP)
+				wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+			WL_ERR((" virtual interface(%s) is "
+				"created net attach done\n", cfg->p2p->vir_ifname));
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, new_ndev);
+			if (type == NL80211_IFTYPE_P2P_CLIENT)
+				dhd_mode = DHD_FLAG_P2P_GC_MODE;
+			else if (type == NL80211_IFTYPE_P2P_GO)
+				dhd_mode = DHD_FLAG_P2P_GO_MODE;
+			DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+			/* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+			INIT_COMPLETION(cfg->iface_disable);
+#else
+			init_completion(&cfg->iface_disable);
+#endif
+			return ndev_to_cfgdev(new_ndev);
+		} else {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+			memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+			cfg->p2p->vif_created = false;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+			dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+		}
+	}
+
+fail:
+	if (wlif_type == WL_P2P_IF_GO)
+		wldev_iovar_setint(primary_ndev, "mpc", 1);
+	return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct net_device *dev = NULL;
+	struct ether_addr p2p_mac;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 timeout = -1;
+	s32 ret = 0;
+	s32 index = -1;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_DBG(("Enter\n"));
+
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+	if (!(dhd->chan_isvht80))
+		dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef WLAIBSS_MCHAN
+	if (cfgdev == cfg->ibss_cfgdev)
+		return bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef DUAL_STA
+	if (cfgdev == cfg->bss_cfgdev)
+		return wl_cfg80211_del_iface(wiphy, cfgdev);
+#endif /* DUAL_STA */
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) {
+		WL_ERR(("Find p2p index from ndev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (cfg->p2p_supported) {
+		memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN);
+
+		/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
+		 */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		if (cfg->p2p->vif_created) {
+			if (wl_get_drv_status(cfg, SCANNING, dev)) {
+				wl_notify_escan_complete(cfg, dev, true, true);
+			}
+			wldev_iovar_setint(dev, "mpc", 1);
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+			/* for GC */
+			if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+				(wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
+				WL_ERR(("Wait for Link Down event for GC !\n"));
+				wait_for_completion_timeout
+					(&cfg->iface_disable, msecs_to_jiffies(500));
+			}
+
+			memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+			wl_set_p2p_status(cfg, IF_DELETING);
+			DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+			/* for GO */
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+				/* disable interface before bsscfg free */
+				ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
+				/* if fw doesn't support "ifdis",
+				   do not wait for link down of ap mode
+				 */
+				if (ret == 0) {
+					WL_ERR(("Wait for Link Down event for GO !!!\n"));
+					wait_for_completion_timeout(&cfg->iface_disable,
+						msecs_to_jiffies(500));
+				} else if (ret != BCME_UNSUPPORTED) {
+					msleep(300);
+				}
+			}
+			wl_cfgp2p_clear_management_ie(cfg, index);
+
+			if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
+				wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+
+			/* delete interface after link down */
+			ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+
+			if (ret != BCME_OK) {
+				struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+				WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+					ret, ndev->name));
+				#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+				net_os_send_hang_message(ndev);
+				#endif
+			} else {
+				/* Wait for IF_DEL operation to be finished */
+				timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+					(wl_get_p2p_status(cfg, IF_DELETING) == false),
+					msecs_to_jiffies(MAX_WAIT_TIME));
+				if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+					cfg->if_event_info.valid) {
+
+					WL_DBG(("IFDEL operation done\n"));
+					wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+				} else {
+					WL_ERR(("IFDEL didn't complete properly\n"));
+				}
+			}
+
+			ret = dhd_del_monitor(dev);
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+			}
+		}
+	}
+	return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 ap = 0;
+	s32 infra = 0;
+	s32 ibss = 0;
+	s32 wlif_type;
+	s32 mode = 0;
+	s32 err = BCME_OK;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("Enter type %d\n", type));
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		ap = 1;
+		WL_ERR(("type (%d) : currently we do not support this type\n",
+			type));
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		ibss = 1;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		ap = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!dhd)
+		return -EINVAL;
+	if (ap) {
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		if (cfg->p2p_supported && cfg->p2p->vif_created) {
+			WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
+			p2p_on(cfg)));
+			wldev_iovar_setint(ndev, "mpc", 0);
+			wl_notify_escan_complete(cfg, ndev, true, true);
+
+			/* In concurrency case, STA may be already associated in a particular
+			 * channel. so retrieve the current channel of primary interface and
+			 * then start the virtual interface on that.
+			 */
+			chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+			wlif_type = WL_P2P_IF_GO;
+			WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+				ndev->name, ap, infra, type));
+			wl_set_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+			wait_event_interruptible_timeout(cfg->netif_change_event,
+				(wl_get_p2p_status(cfg, IF_CHANGED) == true),
+				msecs_to_jiffies(MAX_WAIT_TIME));
+			wl_set_mode_by_netdev(cfg, ndev, mode);
+			dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+			dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+			wl_clr_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, ndev);
+		} else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
+			!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+			wl_set_drv_status(cfg, AP_CREATING, ndev);
+			if (!cfg->ap_info &&
+				!(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+				WL_ERR(("struct ap_saved_ie allocation failed\n"));
+				return -ENOMEM;
+			}
+		} else {
+			WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+			return -EINVAL;
+		}
+	} else {
+		WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA"));
+	}
+
+	if (ibss) {
+		infra = 0;
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET Adhoc error %d\n", err));
+			return -EINVAL;
+		}
+	}
+
+	ndev->ieee80211_ptr->iftype = type;
+	return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifadd_expected = FALSE;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+	 * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+	 */
+	if (wl_get_p2p_status(cfg, IF_CHANGING))
+		return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx);
+
+	/* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+	if (wl_get_p2p_status(cfg, IF_ADDING)) {
+		ifadd_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_ADDING);
+	} else if (cfg->bss_pending_op) {
+		ifadd_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifadd_expected) {
+		wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		strncpy(if_event_info->name, name, IFNAMSIZ);
+		if_event_info->name[IFNAMSIZ] = '\0';
+		if (mac)
+			memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifdel_expected = FALSE;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+	if (wl_get_p2p_status(cfg, IF_DELETING)) {
+		ifdel_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_DELETING);
+	} else if (cfg->bss_pending_op) {
+		ifdel_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifdel_expected) {
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+		wl_set_p2p_status(cfg, IF_CHANGED);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev)
+{
+	s32 type = -1;
+	s32 bssidx = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	bool enabled;
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+	bssidx = if_event_info->bssidx;
+	if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) {
+		WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+		return BCME_ERROR;
+	}
+
+	if (p2p_is_on(cfg) && cfg->p2p->vif_created) {
+
+		if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
+			/* Abort any pending scan requests */
+			cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+			WL_DBG(("ESCAN COMPLETED\n"));
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
+		}
+
+		memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) {
+			WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx));
+			return BCME_ERROR;
+		}
+		wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+		wl_to_p2p_bss_ndev(cfg, type) = NULL;
+		wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID;
+		cfg->p2p->vif_created = false;
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+	wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev);
+	return BCME_OK;
+}
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+	const u8 *ie, u32 ie_len)
+{
+	wifi_p2p_ie_t *p2p_ie;
+	u8 *end, *pos;
+	s32 listen_channel;
+
+	pos = (u8 *)ie;
+	p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+	if (p2p_ie == NULL)
+		return 0;
+
+	pos = p2p_ie->subelts;
+	end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+	CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+		p2p_ie->len));
+
+	while (pos < end) {
+		uint16 attr_len;
+		if (pos + 2 >= end) {
+			CFGP2P_DBG((" -- Invalid P2P attribute"));
+			return 0;
+		}
+		attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+		if (pos + 3 + attr_len > end) {
+			CFGP2P_DBG(("P2P: Attribute underflow "
+				   "(len=%u left=%d)",
+				   attr_len, (int) (end - pos - 3)));
+			return 0;
+		}
+
+		/* if Listen Channel att id is 6 and the vailue is valid,
+		 * return the listen channel
+		 */
+		if (pos[0] == 6) {
+			/* listen channel subel length format
+			 * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+			 */
+			listen_channel = pos[1 + 2 + 3 + 1];
+
+			if (listen_channel == SOCIAL_CHAN_1 ||
+				listen_channel == SOCIAL_CHAN_2 ||
+				listen_channel == SOCIAL_CHAN_3) {
+				CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+				return listen_channel;
+			}
+		}
+		pos += 3 + attr_len;
+	}
+	return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u16 channel;
+	chanspec_t chanspec;
+	s32 i = 0, j = 0, offset;
+	char *ptr;
+	wlc_ssid_t ssid;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+	memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+	WL_SCAN(("Preparing Scan request\n"));
+	WL_SCAN(("nprobes=%d\n", params->nprobes));
+	WL_SCAN(("active_time=%d\n", params->active_time));
+	WL_SCAN(("passive_time=%d\n", params->passive_time));
+	WL_SCAN(("home_time=%d\n", params->home_time));
+	WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+
+	/* if request is null just exit so it will be all channel broadcast scan */
+	if (!request)
+		return;
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	WL_SCAN(("### List of channelspecs to scan ###\n"));
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			chanspec = 0;
+			channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+			/* SKIP DFS channels for Secondary interface */
+			if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+				(request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+				continue;
+
+			if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+#ifdef WL_HOST_BAND_MGMT
+				if (cfg->curr_band == WLC_BAND_5G) {
+					WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
+					continue;
+				}
+#endif /* WL_HOST_BAND_MGMT */
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			} else {
+#ifdef WL_HOST_BAND_MGMT
+				if (cfg->curr_band == WLC_BAND_2G) {
+					WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
+					continue;
+				}
+#endif /* WL_HOST_BAND_MGMT */
+				chanspec |= WL_CHANSPEC_BAND_5G;
+			}
+
+			chanspec |= WL_CHANSPEC_BW_20;
+			chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+			params->channel_list[j] = channel;
+			params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+			params->channel_list[j] |= chanspec;
+			WL_SCAN(("Chan : %d, Channel spec: %x \n",
+				channel, params->channel_list[j]));
+			params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
+			j++;
+		}
+	} else {
+		WL_SCAN(("Scanning all channels\n"));
+	}
+	n_channels = j;
+	/* Copy ssid array if applicable */
+	WL_SCAN(("### List of SSIDs to scan ###\n"));
+	if (n_ssids > 0) {
+		offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char*)params + offset;
+		for (i = 0; i < n_ssids; i++) {
+			memset(&ssid, 0, sizeof(wlc_ssid_t));
+			ssid.SSID_len = request->ssids[i].ssid_len;
+			memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+			if (!ssid.SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+				WL_SCAN(("%d: scan  for  %s size =%d\n", i,
+				ssid.SSID, ssid.SSID_len));
+			memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+			ptr += sizeof(wlc_ssid_t);
+		}
+	} else {
+		WL_SCAN(("Broadcast scan\n"));
+	}
+	/* Adding mask to channel numbers */
+	params->channel_num =
+	        htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+
+	if (n_channels == 1) {
+		params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+		params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	}
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+	wl_uint32_list_t *list;
+	s32 err = BCME_OK;
+	if (valid_chan_list == NULL || size <= 0)
+		return -ENOMEM;
+
+	memset(valid_chan_list, 0, size);
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+	if (err != 0) {
+		WL_ERR(("get channels failed with %d\n", err));
+	}
+
+	return err;
+}
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct cfg80211_scan_request *request, uint16 action)
+{
+	s32 err = BCME_OK;
+	u32 n_channels;
+	u32 n_ssids;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params = NULL;
+	u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	u32 num_chans = 0;
+	s32 channel;
+	s32 n_valid_chan;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	u32 i, j, n_nodfs = 0;
+	u16 *default_chan_list = NULL;
+	wl_uint32_list_t *list;
+	struct net_device *dev = NULL;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+	bool is_first_init_2g_scan = false;
+#endif
+	p2p_scan_purpose_t	p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+
+	WL_DBG(("Enter \n"));
+
+	/* scan request can come with empty request : perform all default scan */
+	if (!cfg) {
+		err = -EINVAL;
+		goto exit;
+	}
+	if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+		/* LEGACY SCAN TRIGGER */
+		WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+		if (!request) {
+			err = -EINVAL;
+			goto exit;
+		}
+		if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+			is_first_init_2g_scan = true;
+			g_first_broadcast_scan = false;
+		}
+#endif
+
+		/* if scan request is not empty parse scan request paramters */
+		if (request != NULL) {
+			n_channels = request->n_channels;
+			n_ssids = request->n_ssids;
+			if (n_channels % 2)
+				/* If n_channels is odd, add a padd of u16 */
+				params_size += sizeof(u16) * (n_channels + 1);
+			else
+				params_size += sizeof(u16) * n_channels;
+
+			/* Allocate space for populating ssids in wl_escan_params_t struct */
+			params_size += sizeof(struct wlc_ssid) * n_ssids;
+		}
+		params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+		if (params == NULL) {
+			err = -ENOMEM;
+			goto exit;
+		}
+		wl_scan_prep(&params->params, request);
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+		/* Override active_time to reduce scan time if it's first bradcast scan. */
+		if (is_first_init_2g_scan)
+			params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+#endif
+
+		params->version = htod32(ESCAN_REQ_VERSION);
+		params->action =  htod16(action);
+		wl_escan_set_sync_id(params->sync_id, cfg);
+		wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+		if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+			WL_ERR(("ioctl buffer length not sufficient\n"));
+			kfree(params);
+			err = -ENOMEM;
+			goto exit;
+		}
+		err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+			cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (unlikely(err)) {
+			if (err == BCME_EPERM)
+				/* Scan Not permitted at this point of time */
+				WL_DBG((" Escan not permitted at this time (%d)\n", err));
+			else
+				WL_ERR((" Escan set error (%d)\n", err));
+		}
+		kfree(params);
+	}
+	else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+		/* P2P SCAN TRIGGER */
+		s32 _freq = 0;
+		n_nodfs = 0;
+		if (request && request->n_channels) {
+			num_chans = request->n_channels;
+			WL_SCAN((" chann number : %d\n", num_chans));
+			default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+				GFP_KERNEL);
+			if (default_chan_list == NULL) {
+				WL_ERR(("channel list allocation failed \n"));
+				err = -ENOMEM;
+				goto exit;
+			}
+			if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+				list = (wl_uint32_list_t *) chan_buf;
+				n_valid_chan = dtoh32(list->count);
+				for (i = 0; i < num_chans; i++)
+				{
+#ifdef WL_HOST_BAND_MGMT
+					int channel_band = 0;
+#endif /* WL_HOST_BAND_MGMT */
+					_freq = request->channels[i]->center_freq;
+					channel = ieee80211_frequency_to_channel(_freq);
+#ifdef WL_HOST_BAND_MGMT
+					channel_band = (channel > CH_MAX_2G_CHANNEL) ?
+						WLC_BAND_5G : WLC_BAND_2G;
+					if ((cfg->curr_band != WLC_BAND_AUTO) &&
+						(cfg->curr_band != channel_band) &&
+						!IS_P2P_SOCIAL_CHANNEL(channel))
+							continue;
+#endif /* WL_HOST_BAND_MGMT */
+
+					/* ignore DFS channels */
+					if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+						(IEEE80211_CHAN_NO_IR
+						| IEEE80211_CHAN_RADAR))
+#else
+						(IEEE80211_CHAN_RADAR
+						| IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
+						continue;
+
+					for (j = 0; j < n_valid_chan; j++) {
+						/* allows only supported channel on
+						*  current reguatory
+						*/
+						if (channel == (dtoh32(list->element[j])))
+							default_chan_list[n_nodfs++] =
+								channel;
+					}
+
+				}
+			}
+			if (num_chans == SOCIAL_CHAN_CNT && (
+						(default_chan_list[0] == SOCIAL_CHAN_1) &&
+						(default_chan_list[1] == SOCIAL_CHAN_2) &&
+						(default_chan_list[2] == SOCIAL_CHAN_3))) {
+				/* SOCIAL CHANNELS 1, 6, 11 */
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+				WL_INFORM(("P2P SEARCH PHASE START \n"));
+			} else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+				(wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+				/* If you are already a GO, then do SEARCH only */
+				WL_INFORM(("Already a GO. Do SEARCH Only"));
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+			} else if (num_chans == 1) {
+				p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+			} else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+			/* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+			 * the supplicant
+			 */
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+			} else {
+				WL_INFORM(("P2P SCAN STATE START \n"));
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+			}
+		} else {
+			err = -EINVAL;
+			goto exit;
+		}
+		err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list,
+			search_state, action,
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+			p2p_scan_purpose);
+
+		if (!err)
+			cfg->p2p->search_state = search_state;
+
+		kfree(default_chan_list);
+	}
+exit:
+	if (unlikely(err)) {
+		/* Don't print Error incase of Scan suppress */
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("Escan failed: Scan Suppressed \n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+	}
+	return err;
+}
+
+
+static s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+{
+	s32 err = BCME_OK;
+	s32 passive_scan;
+	wl_scan_results_t *results;
+	WL_SCAN(("Enter \n"));
+	mutex_lock(&cfg->usr_sync);
+
+	results = wl_escan_get_buf(cfg, FALSE);
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+	cfg->escan_info.ndev = ndev;
+	cfg->escan_info.wiphy = wiphy;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+		&passive_scan, sizeof(passive_scan), true);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto exit;
+	}
+
+	err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssids;
+	struct ether_addr primary_mac;
+	bool p2p_ssid;
+#ifdef WL11U
+	bcm_tlv_t *interworking_ie;
+#endif
+	s32 err = 0;
+	s32 bssidx = -1;
+	s32 i;
+
+	unsigned long flags;
+	static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	struct net_device *remain_on_channel_ndev = NULL;
+#endif
+
+	dhd_pub_t *dhd;
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+	/*
+	 * Hostapd triggers scan before starting automatic channel selection
+	 * also Dump stats IOVAR scans each channel hence returning from here.
+	 */
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+#ifdef WL_SUPPORT_ACS
+		WL_INFORM(("Scan Command at SoftAP mode\n"));
+		return 0;
+#else
+		WL_ERR(("Invalid Scan Command at SoftAP mode\n"));
+		return -EINVAL;
+#endif /* WL_SUPPORT_ACS */
+	}
+
+	ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+		WL_ERR(("Sending Action Frames. Try it again.\n"));
+		return -EAGAIN;
+	}
+
+	WL_DBG(("Enter wiphy (%p)\n", wiphy));
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		if (cfg->scan_request == NULL) {
+			wl_clr_drv_status_all(cfg, SCANNING);
+			WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+		} else {
+			WL_ERR(("Scanning already\n"));
+			return -EAGAIN;
+		}
+	}
+	if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+		WL_ERR(("Scanning being aborted\n"));
+		return -EAGAIN;
+	}
+	if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+		WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+		return -EOPNOTSUPP;
+	}
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+	if (remain_on_channel_ndev) {
+		WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+		wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SDO
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		wl_cfg80211_pause_sdo(ndev, cfg);
+	}
+#endif
+
+	/* Arm scan timeout timer */
+	mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
+	if (request) {		/* scan bss */
+		ssids = request->ssids;
+		p2p_ssid = false;
+		for (i = 0; i < request->n_ssids; i++) {
+			if (ssids[i].ssid_len &&
+				IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+				p2p_ssid = true;
+				break;
+			}
+		}
+		if (p2p_ssid) {
+			if (cfg->p2p_supported) {
+				/* p2p scan trigger */
+				if (p2p_on(cfg) == false) {
+					/* p2p on at the first time */
+					p2p_on(cfg) = true;
+					wl_cfgp2p_set_firm_p2p(cfg);
+					get_primary_mac(cfg, &primary_mac);
+					wl_cfgp2p_generate_bss_mac(&primary_mac,
+						&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+				}
+				wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+				WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+				p2p_scan(cfg) = true;
+			}
+		} else {
+			/* legacy scan trigger
+			 * So, we have to disable p2p discovery if p2p discovery is on
+			 */
+			if (cfg->p2p_supported) {
+				p2p_scan(cfg) = false;
+				/* If Netdevice is not equals to primary and p2p is on
+				*  , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+				*/
+
+				if (p2p_scan(cfg) == false) {
+					if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+						err = wl_cfgp2p_discover_enable_search(cfg,
+						false);
+						if (unlikely(err)) {
+							goto scan_out;
+						}
+
+					}
+				}
+			}
+			if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+
+				if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) {
+					WL_ERR(("Find p2p index from ndev(%p) failed\n",
+						ndev));
+					err = BCME_ERROR;
+					goto scan_out;
+				}
+#ifdef WL11U
+				if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+					(u8 *)request->ie, request->ie_len)) != NULL) {
+					err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+					       VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+					       interworking_ie->data, interworking_ie->len);
+
+					if (unlikely(err)) {
+						goto scan_out;
+					}
+				} else if (cfg->iw_ie_len != 0) {
+				/* we have to clear IW IE and disable gratuitous APR */
+					wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+						VNDR_IE_CUSTOM_FLAG,
+						DOT11_MNG_INTERWORKING_ID,
+						0, 0);
+
+					wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
+						bssidx);
+					cfg->wl11u = FALSE;
+					/* we don't care about error */
+				}
+#endif /* WL11U */
+				err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx,
+					VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
+					request->ie_len);
+
+				if (unlikely(err)) {
+					goto scan_out;
+				}
+
+			}
+		}
+	} else {		/* scan in ibss */
+		ssids = this_ssid;
+	}
+
+	cfg->scan_request = request;
+	wl_set_drv_status(cfg, SCANNING, ndev);
+
+	if (cfg->p2p_supported) {
+		if (p2p_on(cfg) && p2p_scan(cfg)) {
+
+#ifdef WL_SDO
+			if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+				/* We shouldn't be getting p2p_find while discovery
+				 * offload is in progress
+				 */
+				WL_SD(("P2P_FIND: Discovery offload is in progress."
+					" Do nothing\n"));
+				err = -EINVAL;
+				goto scan_out;
+			}
+#endif
+			/* find my listen channel */
+			cfg->afx_hdl->my_listen_chan =
+				wl_find_listen_channel(cfg, request->ie,
+				request->ie_len);
+			err = wl_cfgp2p_enable_discovery(cfg, ndev,
+			request->ie, request->ie_len);
+
+			if (unlikely(err)) {
+				goto scan_out;
+			}
+		}
+	}
+	err = wl_do_escan(cfg, wiphy, ndev, request);
+	if (likely(!err))
+		goto scan_success;
+	else
+		goto scan_out;
+
+scan_success:
+	busy_count = 0;
+
+	return 0;
+
+scan_out:
+	if (err == BCME_BUSY || err == BCME_NOTREADY) {
+		WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+		err = -EBUSY;
+	}
+
+#define SCAN_EBUSY_RETRY_LIMIT 10
+	if (err == -EBUSY) {
+		if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
+			struct ether_addr bssid;
+			s32 ret = 0;
+			busy_count = 0;
+			WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+				wl_get_drv_status(cfg, SCANNING, ndev),
+				wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+				wl_get_drv_status(cfg, CONNECTING, ndev),
+				wl_get_drv_status(cfg, CONNECTED, ndev),
+				wl_get_drv_status(cfg, DISCONNECTING, ndev),
+				wl_get_drv_status(cfg, AP_CREATING, ndev),
+				wl_get_drv_status(cfg, AP_CREATED, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+			bzero(&bssid, sizeof(bssid));
+			if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID,
+				&bssid, ETHER_ADDR_LEN, false)) == 0)
+				WL_ERR(("FW is connected with " MACDBG "/n",
+					MAC2STRDBG(bssid.octet)));
+			else
+				WL_ERR(("GET BSSID failed with %d\n", ret));
+
+			wl_cfg80211_scan_abort(cfg);
+
+		}
+	} else {
+		busy_count = 0;
+	}
+
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	cfg->scan_request = NULL;
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+#ifdef WL_SDO
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		wl_cfg80211_resume_sdo(ndev, cfg);
+	}
+#endif
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Enter \n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+	if (unlikely(err)) {
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("scan not permitted at this time (%d)\n", err));
+		else
+			WL_ERR(("scan error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+	retry = htod32(retry);
+	err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), true);
+	if (unlikely(err)) {
+		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+		(cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+		cfg->conf->rts_threshold = wiphy->rts_threshold;
+		err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+		(cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+		cfg->conf->frag_threshold = wiphy->frag_threshold;
+		err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG &&
+		(cfg->conf->retry_long != wiphy->retry_long)) {
+		cfg->conf->retry_long = wiphy->retry_long;
+		err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT &&
+		(cfg->conf->retry_short != wiphy->retry_short)) {
+		cfg->conf->retry_short = wiphy->retry_short;
+		err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+		if (!err) {
+			return err;
+		}
+	}
+
+	return err;
+}
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u8 *buf = NULL;
+	wl_uint32_list_t *list;
+	int err = BCME_OK;
+	chanspec_t c = 0, ret_c = 0;
+	int bw = 0, tmp_bw = 0;
+	int i;
+	u32 tmp_c;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#define LOCAL_BUF_SIZE	1024
+	buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
+	if (!buf) {
+		WL_ERR(("buf memory alloc failed\n"));
+		goto exit;
+	}
+	list = (wl_uint32_list_t *)(void *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+	if (err != BCME_OK) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		goto exit;
+	}
+	for (i = 0; i < dtoh32(list->count); i++) {
+		c = dtoh32(list->element[i]);
+		if (channel <= CH_MAX_2G_CHANNEL) {
+			if (!CHSPEC_IS20(c))
+				continue;
+			if (channel == CHSPEC_CHANNEL(c)) {
+				ret_c = c;
+				bw = 20;
+				goto exit;
+			}
+		}
+		tmp_c = wf_chspec_ctlchan(c);
+		tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+		if (tmp_c != channel)
+			continue;
+
+		if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+			bw = tmp_bw;
+			ret_c = c;
+			if (bw == bw_cap)
+				goto exit;
+		}
+	}
+exit:
+	if (buf)
+		kfree(buf);
+#undef LOCAL_BUF_SIZE
+	WL_INFORM(("return chanspec %x %d\n", ret_c, bw));
+	return ret_c;
+}
+
+void
+wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (cfg != NULL && ibss_vsie != NULL) {
+		if (cfg->ibss_vsie != NULL) {
+			kfree(cfg->ibss_vsie);
+		}
+		cfg->ibss_vsie = ibss_vsie;
+		cfg->ibss_vsie_len = ibss_vsie_len;
+	}
+}
+
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+	/* free & initiralize VSIE (Vendor Specific IE) */
+	if (cfg->ibss_vsie != NULL) {
+		kfree(cfg->ibss_vsie);
+		cfg->ibss_vsie = NULL;
+		cfg->ibss_vsie_len = 0;
+	}
+}
+
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	char *ioctl_buf = NULL;
+	s32 ret = BCME_OK;
+
+	if (cfg != NULL && cfg->ibss_vsie != NULL) {
+		ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+		if (!ioctl_buf) {
+			WL_ERR(("ioctl memory alloc failed\n"));
+			return -ENOMEM;
+		}
+
+		/* change the command from "add" to "del" */
+		strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+		cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+		ret = wldev_iovar_setbuf(dev, "ie",
+			cfg->ibss_vsie, cfg->ibss_vsie_len,
+			ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		WL_ERR(("ret=%d\n", ret));
+
+		if (ret == BCME_OK) {
+			/* free & initiralize VSIE */
+			kfree(cfg->ibss_vsie);
+			cfg->ibss_vsie = NULL;
+			cfg->ibss_vsie_len = 0;
+		}
+
+		if (ioctl_buf) {
+			kfree(ioctl_buf);
+		}
+	}
+
+	return ret;
+}
+
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wireless_dev* wdev = NULL;
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+	wl_aibss_if_t aibss_if;
+	wl_if_event_info *event = NULL;
+
+	if (cfg->ibss_cfgdev != NULL) {
+		WL_ERR(("IBSS interface %s already exists\n", name));
+		return NULL;
+	}
+
+	WL_ERR(("Try to create IBSS interface %s\n", name));
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+	/* generate a new MAC address for the IBSS interface */
+	get_primary_mac(cfg, &cfg->ibss_if_addr);
+	cfg->ibss_if_addr.octet[4] ^= 0x40;
+	memset(&aibss_if, sizeof(aibss_if), 0);
+	memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+	aibss_if.chspec = 0;
+	aibss_if.len = sizeof(aibss_if);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+		sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op)
+		goto fail;
+
+	event = &cfg->if_event_info;
+	strncpy(event->name, name, IFNAMSIZ - 1);
+	/* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+	 * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+	 * and will be freed by dhd_detach unless it gets unregistered before that. The
+	 * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+	 * be freed by wl_dealloc_netinfo
+	 */
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+		event->mac, event->bssidx);
+	if (new_ndev == NULL)
+		goto fail;
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (wdev == NULL)
+		goto fail;
+	wdev->wiphy = wiphy;
+	wdev->iftype = NL80211_IFTYPE_ADHOC;
+	wdev->netdev = new_ndev;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+	* needs to be modified to take one parameter (bool need_rtnl_lock)
+	 */
+	ASSERT_RTNL();
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK)
+		goto fail;
+
+	wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE);
+	cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+	WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+	return cfg->ibss_cfgdev;
+
+fail:
+	WL_ERR(("failed to create IBSS interface %s \n", name));
+	cfg->bss_pending_op = FALSE;
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+	if (wdev)
+		kfree(wdev);
+	return NULL;
+}
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+
+	if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+		return -EINVAL;
+	ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+		sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+		goto fail;
+	}
+
+	wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+	cfg->ibss_cfgdev = NULL;
+	return 0;
+
+fail:
+	cfg->bss_pending_op = FALSE;
+	return -1;
+}
+#endif /* WLAIBSS_MCHAN */
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+	s32 ret = BCME_OK;
+	s32 val = 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+		struct ether_addr ea;
+	} bss_setbuf;
+
+	WL_INFORM(("iface_type:%d del:%d \n", iface_type, del));
+
+	bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+	/* AP=3, STA=2, up=1, down=0, val=-1 */
+	if (del) {
+		val = -1;
+	} else if (iface_type == NL80211_IFTYPE_AP) {
+		/* AP Interface */
+		WL_DBG(("Adding AP Interface \n"));
+		val = 3;
+	} else if (iface_type == NL80211_IFTYPE_STATION) {
+		WL_DBG(("Adding STA Interface \n"));
+		val = 2;
+	} else {
+		WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type));
+		return -EINVAL;
+	}
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+
+	if (addr) {
+		memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+	}
+
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (ret != 0)
+		WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+	return ret;
+}
+
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+	enum nl80211_iftype iface_type,
+	u8 *mac_addr, const char *name)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 1;
+	u32 timeout;
+	wl_if_event_info *event = NULL;
+	struct wireless_dev *wdev = NULL;
+	u8 addr[ETH_ALEN];
+
+	WL_DBG(("Enter\n"));
+
+	if (!name) {
+		WL_ERR(("Interface name not provided\n"));
+		return NULL;
+	}
+
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (likely(!mac_addr)) {
+		/* Use primary MAC with the locally administered bit for the Secondary STA I/F */
+		memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+		addr[0] |= 0x02;
+	} else {
+		/* Use the application provided mac address (if any) */
+		memcpy(addr, mac_addr, ETH_ALEN);
+	}
+
+	if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) {
+		WL_ERR(("IFACE type:%d not supported. STA "
+					"or AP IFACE is only supported\n", iface_type));
+		return NULL;
+	}
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* De-initialize the p2p discovery interface, if operational */
+	if (p2p_is_on(cfg)) {
+		WL_DBG(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+		ret = wl_cfg80211_scan_stop(bcmcfg_to_p2p_wdev(cfg));
+#else
+		ret = wl_cfg80211_scan_stop(cfg->p2p_net);
+#endif
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+		}
+
+		wl_cfgp2p_disable_discovery(cfg);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		p2p_on(cfg) = false;
+	}
+
+	/*
+	 * Intialize the firmware I/F.
+	 */
+	if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+		bsscfg_idx, iface_type, 0, addr)) < 0) {
+		return NULL;
+	}
+
+	/*
+	 * Wait till the firmware send a confirmation event back.
+	 */
+	WL_DBG(("Wait for the FW I/F Event\n"));
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("ADD_IF event, didn't come. Return \n"));
+		goto fail;
+	}
+
+	/*
+	 * Since FW operation is successful,we can go ahead with the
+	 * the host interface creation.
+	 */
+	event = &cfg->if_event_info;
+	strncpy(event->name, name, IFNAMSIZ - 1);
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+		event->name, addr, event->bssidx);
+	if (!new_ndev) {
+		WL_ERR(("I/F allocation failed! \n"));
+		goto fail;
+	} else
+		WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+		 event->ifidx, event->bssidx));
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (!wdev) {
+		WL_ERR(("wireless_dev alloc failed! \n"));
+		goto fail;
+	}
+
+	wdev->wiphy = wiphy;
+	wdev->iftype = iface_type;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* RTNL lock must have been acquired. */
+	ASSERT_RTNL();
+
+	/* Set the locally administed mac addr, if not applied already */
+	if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
+		ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", addr, ETH_ALEN,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, event->bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(ret)) {
+				WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
+				goto fail;
+		}
+		memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+	}
+
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+		WL_ERR(("IFACE register failed \n"));
+		goto fail;
+	}
+
+	/* Initialize with the station mode params */
+	wl_alloc_netinfo(cfg, new_ndev, wdev,
+		(iface_type == NL80211_IFTYPE_STATION) ?
+		WL_MODE_BSS : WL_MODE_AP, PM_ENABLE);
+	cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev);
+	cfg->cfgdev_bssidx = event->bssidx;
+
+	WL_DBG(("Host Network Interface for Secondary I/F created"));
+
+	return cfg->bss_cfgdev;
+
+fail:
+	cfg->bss_pending_op = FALSE;
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+	if (wdev)
+		kfree(wdev);
+
+	return NULL;
+}
+
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 1;
+	u32 timeout;
+	enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+	WL_DBG(("Enter\n"));
+
+	if (!cfg->bss_cfgdev)
+		return 0;
+
+	/* If any scan is going on, abort it */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		WL_DBG(("Scan in progress. Aborting the scan!\n"));
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+	ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev);
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* Delete the firmware interface */
+	if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+		bsscfg_idx, iface_type, true, NULL)) < 0) {
+		WL_ERR(("DEL bss failed ret:%d \n", ret));
+		return ret;
+	}
+
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+	}
+
+	wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+	cfg->bss_cfgdev = NULL;
+	cfg->cfgdev_bssidx = -1;
+	cfg->bss_pending_op = FALSE;
+
+	WL_DBG(("IF_DEL Done.\n"));
+
+	return ret;
+}
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_bss *bss;
+	struct ieee80211_channel *chan;
+	struct wl_join_params join_params;
+	int scan_suppress;
+	struct cfg80211_ssid ssid;
+	s32 scan_retry = 0;
+	s32 err = 0;
+	size_t join_params_size;
+	chanspec_t chanspec = 0;
+	u32 param[2] = {0, 0};
+	u32 bw_cap = 0;
+#if defined(WLAIBSS) && defined(WLAIBSS_PS)
+	s32 atim = 10;
+#endif /* WLAIBSS & WLAIBSS_PS */
+
+	WL_TRACE(("In\n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+	if (!params->ssid || params->ssid_len <= 0) {
+		WL_ERR(("Invalid parameter\n"));
+		return -EINVAL;
+	}
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	chan = params->chandef.chan;
+#else
+	chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	if (chan)
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+		u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+		if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+			(memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) &&
+			(*channel == cfg->channel))) {
+			WL_ERR(("Connection already existed to " MACDBG "\n",
+				MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+			return -EISCONN;
+		}
+		WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
+			ssid->SSID, MAC2STRDBG(bssid)));
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+	if (!bss) {
+		if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) {
+			memcpy(ssid.ssid, params->ssid, params->ssid_len);
+			ssid.ssid_len = params->ssid_len;
+			do {
+				if (unlikely
+					(__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+					 -EBUSY)) {
+					wl_delay(150);
+				} else {
+					break;
+				}
+			} while (++scan_retry < WL_SCAN_RETRY_MAX);
+
+			/* rtnl lock code is removed here. don't see why rtnl lock
+			 * needs to be released.
+			 */
+
+			/* wait 4 secons till scan done.... */
+			schedule_timeout_interruptible(msecs_to_jiffies(4000));
+
+			bss = cfg80211_get_ibss(wiphy, NULL,
+				params->ssid, params->ssid_len);
+		}
+	}
+	if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
+		((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
+		!memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
+		cfg->ibss_starter = false;
+		WL_DBG(("Found IBSS\n"));
+	} else {
+		cfg->ibss_starter = true;
+	}
+	if (chan) {
+		if (chan->band == IEEE80211_BAND_5GHZ)
+			param[0] = WLC_BAND_5G;
+		else if (chan->band == IEEE80211_BAND_2GHZ)
+			param[0] = WLC_BAND_2G;
+		err = wldev_iovar_getint(dev, "bw_cap", param);
+		if (unlikely(err)) {
+			WL_ERR(("Get bw_cap Failed (%d)\n", err));
+			return err;
+		}
+		bw_cap = param[0];
+		chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+	}
+	/*
+	 * Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+		params->ssid_len);
+	join_params.ssid.SSID_len = htod32(params->ssid_len);
+	if (params->bssid) {
+		memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN);
+		err = wldev_ioctl(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid,
+			ETHER_ADDR_LEN, true);
+		if (unlikely(err)) {
+			WL_ERR(("Error (%d)\n", err));
+			return err;
+		}
+	} else
+		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+	wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED);
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = TRUE;
+		/* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
+			return err;
+		}
+	}
+
+	join_params.params.chanspec_list[0] = chanspec;
+	join_params.params.chanspec_num = 1;
+	wldev_iovar_setint(dev, "chanspec", chanspec);
+	join_params_size = sizeof(join_params);
+
+	/* Disable Authentication, IBSS will add key if it required */
+	wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	wldev_iovar_setint(dev, "wsec", 0);
+
+#ifdef WLAIBSS
+	/* Enable custom ibss features */
+	err = wldev_iovar_setint(dev, "aibss", TRUE);
+
+	if (unlikely(err)) {
+		WL_ERR(("Enable custom IBSS mode failed (%d)\n", err));
+		return err;
+	}
+#ifdef WLAIBSS_PS
+	err = wldev_ioctl(dev, WLC_SET_ATIM, &atim, sizeof(int), true);
+	if (unlikely(err)) {
+		WL_ERR(("Enable custom IBSS ATIM mode failed (%d)\n", err));
+		return err;
+	}
+#endif /* WLAIBSS_PS */
+#endif /* WLAIBSS */
+
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+		join_params_size, true);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = FALSE;
+		/* Reset the SCAN SUPPRESS Flag */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+			return err;
+		}
+	}
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+#ifdef WLAIBSS
+	cfg->aibss_txfail_seq = 0;	/* initialize the sequence */
+#endif /* WLAIBSS */
+	cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */
+	return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	scb_val_t scbval;
+	u8 *curbssid;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	wl_link_down(cfg);
+
+	WL_ERR(("Leave IBSS\n"));
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	wl_set_drv_status(cfg, DISCONNECTING, dev);
+	scbval.val = 0;
+	memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+	err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+		sizeof(scb_val_t), true);
+	if (unlikely(err)) {
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		WL_ERR(("error(%d)\n", err));
+		return err;
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	return err;
+}
+
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa)
+{
+	u16 suite_count;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	u16 len;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	if (!wpa2ie)
+		return -1;
+
+	len = wpa2ie->len;
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		capa[0] = *(u8 *)&mgmt->list[suite_count];
+		capa[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+	} else
+		return BCME_BADLEN;
+
+	return 0;
+}
+#endif /* MFP */
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK |
+#ifdef BCMCCX
+			WPA_AUTH_CCKM |
+#endif
+			WPA_AUTH_UNSPECIFIED;
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK|
+#ifdef BCMCCX
+			WPA2_AUTH_CCKM |
+#endif
+			WPA2_AUTH_UNSPECIFIED;
+	else
+		val = WPA_AUTH_DISABLED;
+
+	if (is_wps_conn(sme))
+		val = WPA_AUTH_DISABLED;
+
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG((" * wl_set_wpa_version, set wpa_auth"
+			" to WPA_AUTH_WAPI 0x400"));
+		val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+	}
+#endif
+	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG((" %s \n", __FUNCTION__));
+
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie, sme->ie_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (unlikely(err)) {
+			WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
+			return err;
+		}
+	} else
+		WL_DBG((" * skip \n"));
+	return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = WL_AUTH_OPEN_SYSTEM;
+		WL_DBG(("open system\n"));
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = WL_AUTH_SHARED_KEY;
+		WL_DBG(("shared key\n"));
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = WL_AUTH_OPEN_SHARED;
+		WL_DBG(("automatic\n"));
+		break;
+#ifdef BCMCCX
+	case NL80211_AUTHTYPE_NETWORK_EAP:
+		WL_DBG(("network eap\n"));
+		val = DOT11_LEAP_AUTH;
+		break;
+#endif
+	default:
+		val = 2;
+		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		break;
+	}
+
+	err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 err = 0;
+	s32 wsec_val = 0;
+#ifdef MFP
+	s32 mfp = 0;
+	bcm_tlv_t *wpa2_ie;
+	u8 rsn_cap[2];
+#endif /* MFP */
+
+#ifdef BCMWAPI_WPI
+	s32 val = 0;
+#endif
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher pairwise (%d)\n",
+				sme->crypto.ciphers_pairwise[0]));
+			return -EINVAL;
+		}
+	}
+#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
+	/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+	 * handshake.
+	 * Note that the FW feature flag only exists on kernels that support the
+	 * FT-EAP AKM suite.
+	 */
+	if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
+		if (pval == AES_ENABLED)
+			err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
+		else
+			err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 0, bssidx);
+
+		if (err) {
+			WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err));
+			return err;
+		}
+	}
+#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher group (%d)\n",
+				sme->crypto.cipher_group));
+			return -EINVAL;
+		}
+	}
+
+	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+	if (is_wps_conn(sme)) {
+		if (sme->privacy)
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+		else
+			/* WPS-2.0 allows no security */
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	} else {
+#ifdef BCMWAPI_WPI
+		if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+			WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
+		} else {
+#endif
+			WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+			wsec_val = pval | gval;
+
+#ifdef MFP
+			if (pval == AES_ENABLED) {
+				if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+					DOT11_MNG_RSN_ID)) != NULL) &&
+					(wl_cfg80211_get_rsn_capa(wpa2_ie, rsn_cap) == 0)) {
+
+					if (rsn_cap[0] & RSN_CAP_MFPC) {
+						/* MFP Capability advertised by supplicant. Check
+						 * whether MFP is supported in the firmware
+						 */
+						if ((err = wldev_iovar_getint_bsscfg(dev,
+								"mfp", &mfp, bssidx)) < 0) {
+							WL_ERR(("Get MFP failed! "
+								"Check MFP support in FW \n"));
+							return -1;
+						}
+
+						if ((sme->crypto.n_akm_suites == 1) &&
+							((sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_PSK) ||
+							(sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_1X))) {
+							wsec_val |= MFP_SHA256;
+						} else if (sme->crypto.n_akm_suites > 1) {
+							WL_ERR(("Multiple AKM Specified \n"));
+							return -EINVAL;
+						}
+
+						wsec_val |= MFP_CAPABLE;
+						if (rsn_cap[0] & RSN_CAP_MFPR)
+							wsec_val |= MFP_REQUIRED;
+					}
+				}
+			}
+#endif /* MFP */
+			WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec",
+				wsec_val, bssidx);
+#ifdef BCMWAPI_WPI
+		}
+#endif
+	}
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_akm_suites) {
+		err = wldev_iovar_getint(dev, "wpa_auth", &val);
+		if (unlikely(err)) {
+			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK |
+#ifdef BCMCCX
+			WPA_AUTH_CCKM |
+#endif
+			WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+#ifdef BCMCCX
+			case WLAN_AKM_SUITE_CCKM:
+				val = WPA_AUTH_CCKM;
+				break;
+#endif
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK |
+#ifdef BCMCCX
+			WPA2_AUTH_CCKM |
+#endif
+			WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+#ifdef MFP
+			case WL_AKM_SUITE_MFP_1X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+			case WL_AKM_SUITE_MFP_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+#endif
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
+			case WLAN_AKM_SUITE_FT_8021X:
+				val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+				break;
+#endif
+#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
+			case WLAN_AKM_SUITE_FT_PSK:
+				val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+				break;
+#endif
+#ifdef BCMCCX
+			case WLAN_AKM_SUITE_CCKM:
+				val = WPA2_AUTH_CCKM;
+				break;
+#endif
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+#ifdef BCMWAPI_WPI
+		else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_WAPI_CERT:
+				val = WAPI_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_WAPI_PSK:
+				val = WAPI_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+#endif
+		WL_DBG(("setting wpa_auth to %d\n", val));
+
+		err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("could not set wpa_auth (%d)\n", err));
+			return err;
+		}
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	struct wl_wsec_key key;
+	s32 val;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key len (%d)\n", sme->key_len));
+	if (sme->key_len) {
+		sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+			sec->wpa_versions, sec->cipher_pairwise));
+		if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+#ifdef BCMWAPI_WPI
+			NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
+#else
+			NL80211_WPA_VERSION_2)) &&
+#endif
+			(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+#ifdef BCMWAPI_WPI
+		WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
+#else
+		WLAN_CIPHER_SUITE_WEP104)))
+#endif
+		{
+			memset(&key, 0, sizeof(key));
+			key.len = (u32) sme->key_len;
+			key.index = (u32) sme->key_idx;
+			if (unlikely(key.len > sizeof(key.data))) {
+				WL_ERR(("Too long key length (%u)\n", key.len));
+				return -EINVAL;
+			}
+			memcpy(key.data, sme->key, key.len);
+			key.flags = WL_PRIMARY_KEY;
+			switch (sec->cipher_pairwise) {
+			case WLAN_CIPHER_SUITE_WEP40:
+				key.algo = CRYPTO_ALGO_WEP1;
+				break;
+			case WLAN_CIPHER_SUITE_WEP104:
+				key.algo = CRYPTO_ALGO_WEP128;
+				break;
+#ifdef BCMWAPI_WPI
+			case WLAN_CIPHER_SUITE_SMS4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				break;
+#endif
+			default:
+				WL_ERR(("Invalid algorithm (%d)\n",
+					sme->crypto.ciphers_pairwise[0]));
+				return -EINVAL;
+			}
+			/* Set the new key/index */
+			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+				key.len, key.index, key.algo));
+			WL_DBG(("key \"%s\"\n", key.data));
+			swap_key_from_BE(&key);
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				return err;
+			}
+			if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+				WL_DBG(("set auth_type to shared key\n"));
+				val = WL_AUTH_SHARED_KEY;	/* shared key */
+				err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+				if (unlikely(err)) {
+					WL_ERR(("set auth failed (%d)\n", err));
+					return err;
+				}
+			}
+		}
+	}
+	return err;
+}
+
+#if defined(ESCAN_RESULT_PATCH)
+static u8 connect_req_bssid[6];
+static u8 broad_bssid[6];
+#endif /* ESCAN_RESULT_PATCH */
+
+
+
+#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+	u32 chanspec = 0;
+	bool isvht80 = 0;
+
+	if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+		chanspec = wl_chspec_driver_to_host(chanspec);
+
+	isvht80 = chanspec & WL_CHANSPEC_BW_80;
+	WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+
+	return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct ieee80211_channel *chan = sme->channel;
+	wl_extjoin_params_t *ext_join_params;
+	struct wl_join_params join_params;
+	size_t join_params_size;
+	s32 err = 0;
+	wpa_ie_fixed_t *wpa_ie;
+	bcm_tlv_t *wpa2_ie;
+	u8* wpaie  = 0;
+	u32 wpaie_len = 0;
+	u32 chan_cnt = 0;
+	struct ether_addr bssid;
+	s32 bssidx;
+	int ret;
+	int wait_cnt;
+
+	WL_DBG(("In\n"));
+
+	if (unlikely(!sme->ssid)) {
+		WL_ERR(("Invalid ssid\n"));
+		return -EOPNOTSUPP;
+	}
+
+	if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) {
+		WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n",
+			sme->ssid, sme->ssid_len));
+		return -EINVAL;
+	}
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	/*
+	 * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+	 */
+#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+#endif
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req) {
+		wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
+	}
+#endif
+#if defined(ESCAN_RESULT_PATCH)
+	if (sme->bssid)
+		memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN);
+	else
+		bzero(connect_req_bssid, ETHER_ADDR_LEN);
+	bzero(broad_bssid, ETHER_ADDR_LEN);
+#endif
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+	maxrxpktglom = 0;
+#endif
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, CONNECTED, dev)&&
+		(ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) {
+		if (!ETHER_ISNULLADDR(&bssid)) {
+			scb_val_t scbval;
+			wl_set_drv_status(cfg, DISCONNECTING, dev);
+			scbval.val = DOT11_RC_DISASSOC_LEAVING;
+			memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN);
+			scbval.val = htod32(scbval.val);
+
+			WL_DBG(("drv status CONNECTED is not set, but connected in FW!" MACDBG "/n",
+				MAC2STRDBG(bssid.octet)));
+			err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+				sizeof(scb_val_t), true);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, DISCONNECTING, dev);
+				WL_ERR(("error (%d)\n", err));
+				return err;
+			}
+			wait_cnt = 500/10;
+			while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+				WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+					wait_cnt));
+				wait_cnt--;
+				OSL_SLEEP(10);
+			}
+		} else
+			WL_DBG(("Currently not associated!\n"));
+	} else {
+		/* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */
+		wait_cnt = 500/10;
+		while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+			WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt));
+			wait_cnt--;
+			OSL_SLEEP(10);
+		}
+	}
+
+	/* Clean BSSID */
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, DISCONNECTING, dev))
+		wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+	if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
+		/* we only allow to connect using virtual interface in case of P2P */
+			if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+				WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+				return BCME_ERROR;
+			}
+			wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+	} else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		/* find the RSN_IE */
+		if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+			DOT11_MNG_RSN_ID)) != NULL) {
+			WL_DBG((" WPA2 IE is found\n"));
+		}
+		/* find the WPA_IE */
+		if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+			sme->ie_len)) != NULL) {
+			WL_DBG((" WPA IE is found\n"));
+		}
+		if (wpa_ie != NULL || wpa2_ie != NULL) {
+			wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+			wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+			wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+			wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		} else {
+			wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		}
+
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+		err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+			VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len);
+		if (unlikely(err)) {
+			return err;
+		}
+	}
+	if (chan) {
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+		chan_cnt = 1;
+		WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
+			chan->center_freq, chan_cnt));
+	} else
+		cfg->channel = 0;
+#ifdef BCMWAPI_WPI
+	WL_DBG(("1. enable wapi auth\n"));
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG(("2. set wapi ie  \n"));
+		err = wl_set_set_wapi_ie(dev, sme);
+		if (unlikely(err))
+			return err;
+	} else
+		WL_DBG(("2. Not wapi ie  \n"));
+#endif
+	WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+	WL_DBG(("3. set wapi version \n"));
+	err = wl_set_wpa_version(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid wpa_version\n"));
+		return err;
+	}
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
+		WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
+	else {
+		WL_DBG(("4. wl_set_auth_type\n"));
+#endif
+		err = wl_set_auth_type(dev, sme);
+		if (unlikely(err)) {
+			WL_ERR(("Invalid auth type\n"));
+			return err;
+		}
+#ifdef BCMWAPI_WPI
+	}
+#endif
+
+	err = wl_set_set_cipher(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid ciper\n"));
+		return err;
+	}
+
+	err = wl_set_key_mgmt(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid key mgmt\n"));
+		return err;
+	}
+
+	err = wl_set_set_sharedkey(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid shared key\n"));
+		return err;
+	}
+
+	/*
+	 *  Join with specific BSSID and cached SSID
+	 *  If SSID is zero join based on BSSID only
+	 */
+	join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+		chan_cnt * sizeof(chanspec_t);
+	ext_join_params =  (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		goto exit;
+	}
+	ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+	memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+	ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+	/* increate dwell time to receive probe response or detect Beacon
+	* from target AP at a noisy air only during connect command
+	*/
+	ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+	ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+	/* Set up join scan parameters */
+	ext_join_params->scan.scan_type = -1;
+	ext_join_params->scan.nprobes = chan_cnt ?
+		(ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+	ext_join_params->scan.home_time = -1;
+
+	if (sme->bssid)
+		memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+	ext_join_params->assoc.chanspec_num = chan_cnt;
+	if (chan_cnt) {
+		u16 channel, band, bw, ctl_sb;
+		chanspec_t chspec;
+		channel = cfg->channel;
+		band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+			: WL_CHANSPEC_BAND_5G;
+		bw = WL_CHANSPEC_BW_20;
+		ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+		chspec = (channel | band | bw | ctl_sb);
+		ext_join_params->assoc.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		ext_join_params->assoc.chanspec_list[0] |= chspec;
+		ext_join_params->assoc.chanspec_list[0] =
+			wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+	}
+	ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+	if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+			ext_join_params->ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	WL_ERR(("Connectting with" MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
+		MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cfg->channel,
+		ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len));
+
+	kfree(ext_join_params);
+	if (err) {
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		if (err == BCME_UNSUPPORTED) {
+			WL_DBG(("join iovar is not supported\n"));
+			goto set_ssid;
+		} else {
+			WL_ERR(("error (%d)\n", err));
+			goto exit;
+		}
+	} else
+		goto exit;
+
+set_ssid:
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	if (sme->bssid)
+		memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+	wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size);
+	WL_DBG(("join_param_size %zu\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+	if (err) {
+		WL_ERR(("error (%d)\n", err));
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+	}
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scbval;
+	bool act = false;
+	s32 err = 0;
+	u8 *curbssid;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_ERR(("Reason %d\n", reason_code));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	if (act) {
+		/*
+		* Cancel ongoing scan to sync up with sme state machine of cfg80211.
+		*/
+#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
+		/* Let scan aborted by F/W */
+		if (cfg->scan_request) {
+			wl_notify_escan_complete(cfg, dev, true, true);
+		}
+#endif /* ESCAN_RESULT_PATCH */
+		wl_set_drv_status(cfg, DISCONNECTING, dev);
+		scbval.val = reason_code;
+		memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+		scbval.val = htod32(scbval.val);
+		err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+			sizeof(scb_val_t), true);
+		if (unlikely(err)) {
+			wl_clr_drv_status(cfg, DISCONNECTING, dev);
+			WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	/* set default cpucore */
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+		if (!(dhd->chan_isvht80))
+			dhd_set_cpucore(dhd, FALSE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm)
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	s32 dbm = MBM_TO_DBM(mbm);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+	defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES)
+	dbm = MBM_TO_DBM(dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			return -EINVAL;
+		}
+		break;
+	case NL80211_TX_POWER_FIXED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			return -EINVAL;
+		}
+		break;
+	}
+
+	err = wl_set_tx_power(ndev, type, dbm);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	cfg->conf->tx_power = dbm;
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm)
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wl_get_tx_power(ndev, dbm);
+	if (unlikely(err))
+		WL_ERR(("error (%d)\n", err));
+
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u32 index;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	if (wsec == WEP_ENABLED) {
+		/* Just select a new current key */
+		index = (u32) key_idx;
+		index = htod32(index);
+		err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+			sizeof(index), true);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_wsec_key key;
+	s32 err = 0;
+	s32 bssidx;
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+
+	if (!ETHER_ISMULTI(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+	key.len = (u32) params->key_len;
+
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		swap_key_from_BE(&key);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("key delete error (%d)\n", err));
+			return err;
+		}
+	} else {
+		if (key.len > sizeof(key.data)) {
+			WL_ERR(("Invalid key length (%d)\n", key.len));
+			return -EINVAL;
+		}
+		WL_DBG(("Setting the key index %d\n", key.index));
+		memcpy(key.data, params->key, key.len);
+
+		if ((mode == WL_MODE_BSS) &&
+			(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+			u8 keybuf[8];
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			key.algo = CRYPTO_ALGO_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			return -EINVAL;
+		}
+		swap_key_from_BE(&key);
+		/* need to guarantee EAPOL 4/4 send out before set key */
+		dhd_wait_pend8021x(dev);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			return err;
+		}
+	}
+	return err;
+}
+
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable)
+{
+	int err;
+	wl_eventmsg_buf_t ev_buf;
+
+	if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) {
+		/* roam offload is only for the primary device */
+		return -1;
+	}
+	err = wldev_iovar_setint(dev, "roam_offload", (int)enable);
+	if (err)
+		return err;
+
+	bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+	err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf);
+	if (!err) {
+		g_bcm_cfg->roam_offload = enable;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params)
+{
+	struct wl_wsec_key key;
+	s32 val = 0;
+	s32 wsec = 0;
+	s32 err = 0;
+	u8 keybuf[8];
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (mac_addr &&
+		((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP104))) {
+			wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+			goto exit;
+	}
+	memset(&key, 0, sizeof(key));
+
+	key.len = (u32) params->key_len;
+	key.index = (u32) key_idx;
+
+	if (unlikely(key.len > sizeof(key.data))) {
+		WL_ERR(("Too long key length (%u)\n", key.len));
+		return -EINVAL;
+	}
+	memcpy(key.data, params->key, key.len);
+
+	key.flags = WL_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key.algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+		if (mode == WL_MODE_BSS) {
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		break;
+#ifdef BCMWAPI_WPI
+	case WLAN_CIPHER_SUITE_SMS4:
+		key.algo = CRYPTO_ALGO_SMS4;
+		WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+		val = SMS4_ENABLED;
+		break;
+#endif /* BCMWAPI_WPI */
+#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK)
+	case WLAN_CIPHER_SUITE_PMK: {
+		int j;
+		wsec_pmk_t pmk;
+		char keystring[WSEC_MAX_PSK_LEN + 1];
+		char* charptr = keystring;
+		uint len;
+		struct wl_security *sec;
+
+		sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+		if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) {
+			err = wldev_iovar_setbuf(dev, "okc_info_pmk", params->key,
+				WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
+			if (err) {
+				/* could fail in case that 'okc' is not supported */
+				WL_INFORM(("Setting 'okc_info_pmk' failed, err=%d\n", err));
+			}
+		}
+		/* copy the raw hex key to the appropriate format */
+		for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+			sprintf(charptr, "%02x", params->key[j]);
+			charptr += 2;
+		}
+		len = strlen(keystring);
+		pmk.key_len = htod16(len);
+		bcopy(keystring, pmk.key, len);
+		pmk.flags = htod16(WSEC_PASSPHRASE);
+
+		err = wldev_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk), true);
+		if (err)
+			return err;
+	} break;
+#endif /* WLFBT && WLAN_CIPHER_SUITE_PMK */
+	default:
+		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		return -EINVAL;
+	}
+
+	/* Set the new key/index */
+	if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) {
+		WL_ERR(("IBSS KEY setted\n"));
+		wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
+	}
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		return err;
+	}
+
+exit:
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("get wsec error (%d)\n", err));
+		return err;
+	}
+
+	wsec |= val;
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wsec error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("Enter\n"));
+
+#ifndef IEEE80211W
+	if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
+		return -EINVAL;
+#endif
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+
+	key.flags = WL_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+	key.index = (u32) key_idx;
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		if (err == -EINVAL) {
+			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+				/* we ignore this key index in this case */
+				WL_DBG(("invalid key index (%d)\n", key_idx));
+			}
+		} else {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		}
+		return err;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+	void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_security *sec;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+	key.index = key_idx;
+	swap_key_to_BE(&key);
+	memset(&params, 0, sizeof(params));
+	params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+	memcpy(params.key, key.data, params.key_len);
+
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	switch (WSEC_ENABLED(wsec)) {
+		case WEP_ENABLED:
+			sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+			if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP40;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP104;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			}
+			break;
+		case TKIP_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case AES_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			key.algo = CRYPTO_ALGO_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		/* to connect to mixed mode AP */
+		case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid algo (0x%x)\n", wsec));
+			return -EINVAL;
+	}
+
+	callback(cookie, &params);
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev, u8 key_idx)
+{
+	WL_INFORM(("Not supported\n"));
+	return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	u8 *mac, struct station_info *sinfo)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s32 rssi;
+	s32 rate;
+	s32 err = 0;
+	sta_info_t *sta;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	RETURN_EIO_IF_NOT_UP(cfg);
+	if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+		err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+			ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("GET STA INFO failed, %d\n", err));
+			return err;
+		}
+		sinfo->filled = STATION_INFO_INACTIVE_TIME;
+		sta = (sta_info_t *)cfg->ioctl_buf;
+		sta->len = dtoh16(sta->len);
+		sta->cap = dtoh16(sta->cap);
+		sta->flags = dtoh32(sta->flags);
+		sta->idle = dtoh32(sta->idle);
+		sta->in = dtoh32(sta->in);
+		sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+		if (sta->flags & WL_STA_ASSOC) {
+			sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+			sinfo->connected_time = sta->in;
+		}
+		WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
+			bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+			sta->idle * 1000));
+#endif
+	} else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+		wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
+		get_pktcnt_t pktcnt;
+		u8 *curmacp;
+
+		if (cfg->roam_offload) {
+			struct ether_addr bssid;
+			err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+			if (err) {
+				WL_ERR(("Failed to get current BSSID\n"));
+			} else {
+				if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+					/* roaming is detected */
+					err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+					if (err)
+						WL_ERR(("Failed to handle the delayed roam, "
+							"err=%d", err));
+					mac = (u8 *)bssid.octet;
+				}
+			}
+		}
+		if (!wl_get_drv_status(cfg, CONNECTED, dev) ||
+			(dhd_is_associated(dhd, NULL, &err) == FALSE)) {
+			WL_ERR(("NOT assoc\n"));
+			if (err == -ERESTARTSYS)
+				return err;
+			err = -ENODEV;
+			return err;
+		}
+		curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+			WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+				MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+		}
+
+		/* Report the current tx rate */
+		err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+		if (err) {
+			WL_ERR(("Could not get rate (%d)\n", err));
+		} else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			int rxpktglom;
+#endif
+			rate = dtoh32(rate);
+			sinfo->filled |= STATION_INFO_TX_BITRATE;
+			sinfo->txrate.legacy = rate * 5;
+			WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+			if (maxrxpktglom != rxpktglom) {
+				maxrxpktglom = rxpktglom;
+				WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
+					maxrxpktglom));
+				err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+					(char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+					WLC_IOCTL_MAXLEN, NULL);
+				if (err < 0) {
+					WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+				}
+			}
+#endif
+		}
+
+		memset(&scb_val, 0, sizeof(scb_val));
+		scb_val.val = 0;
+		err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+			sizeof(scb_val_t), false);
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			goto get_station_err;
+		}
+		rssi = wl_rssi_offset(dtoh32(scb_val.val));
+		sinfo->filled |= STATION_INFO_SIGNAL;
+		sinfo->signal = rssi;
+		WL_DBG(("RSSI %d dBm\n", rssi));
+		err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt,
+			sizeof(pktcnt), false);
+		if (!err) {
+			sinfo->filled |= (STATION_INFO_RX_PACKETS |
+				STATION_INFO_RX_DROP_MISC |
+				STATION_INFO_TX_PACKETS |
+				STATION_INFO_TX_FAILED);
+			sinfo->rx_packets = pktcnt.rx_good_pkt;
+			sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+			sinfo->tx_packets = pktcnt.tx_good_pkt;
+			sinfo->tx_failed  = pktcnt.tx_bad_pkt;
+		}
+get_station_err:
+		if (err && (err != -ERESTARTSYS)) {
+			/* Disconnect due to zero BSSID or error to get RSSI */
+			WL_ERR(("force cfg80211_disconnected: %d\n", err));
+			wl_clr_drv_status(cfg, CONNECTED, dev);
+			cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+			wl_link_down(cfg);
+		}
+	}
+	else {
+		WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode ||
+		!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		return err;
+	}
+
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND);
+
+	pm = enabled ? PM_FAST : PM_OFF;
+	if (_net_info->pm_block) {
+		WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
+			dev->name, _net_info->pm_block));
+		pm = PM_OFF;
+	}
+	pm = htod32(pm);
+	WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled")));
+	err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+	if (unlikely(err)) {
+		if (err == -ENODEV)
+			WL_DBG(("net_device is not ready yet\n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	wl_cfg80211_update_power_mode(dev);
+	return err;
+}
+
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+	int err, pm = -1;
+
+	err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true);
+	if (err)
+		WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+	else if (pm != -1 && dev->ieee80211_ptr)
+		dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+	u32 ret = 0;
+
+	if (bit16 & 0xff00) {
+		ret += 8;
+		bit16 >>= 8;
+	}
+
+	if (bit16 & 0xf0) {
+		ret += 4;
+		bit16 >>= 4;
+	}
+
+	if (bit16 & 0xc) {
+		ret += 2;
+		bit16 >>= 2;
+	}
+
+	if (bit16 & 2)
+		ret += bit16 & 2;
+	else if (bit16)
+		ret += bit16;
+
+	return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready\n"));
+		return 0;
+	}
+
+	return err;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	unsigned long flags;
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready : status (%d)\n",
+			(int)cfg->status));
+		return 0;
+	}
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	for_each_ndev(cfg, iter, next) {
+		if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+			wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+		}
+	}
+#endif /* DHD_CLEAR_ON_SUSPEND */
+	return 0;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+	s32 err)
+{
+	int i, j;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (!pmk_list) {
+		printf("pmk_list is NULL\n");
+		return -EINVAL;
+	}
+	/* pmk list is supported only for STA interface i.e. primary interface
+	 * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+	 */
+	if (primary_dev != dev) {
+		WL_INFORM(("Not supporting Flushing pmklist on virtual"
+			" interfaces than primary interface\n"));
+		return err;
+	}
+
+	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		WL_DBG(("PMKID[%d]: %pM =\n", i,
+			&pmk_list->pmkids.pmkid[i].BSSID));
+		for (j = 0; j < WPA2_PMKID_LEN; j++) {
+			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+		}
+	}
+	if (likely(!err)) {
+		err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+			sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+			ETHER_ADDR_LEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+			ETHER_ADDR_LEN);
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+			WPA2_PMKID_LEN);
+		if (i == cfg->pmk_list->pmkids.npmkid)
+			cfg->pmk_list->pmkids.npmkid++;
+	} else {
+		err = -EINVAL;
+	}
+	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		&cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n",
+			cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
+			PMKID[i]));
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct _pmkid_list pmkid = {0};
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		&pmkid.pmkid[0].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+	}
+
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp
+		    (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETHER_ADDR_LEN))
+			break;
+
+	if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+		(i < cfg->pmk_list->pmkids.npmkid)) {
+		memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+		for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+				ETHER_ADDR_LEN);
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		cfg->pmk_list->pmkids.npmkid--;
+	} else {
+		err = -EINVAL;
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+	return err;
+
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	if (channel == -1)
+		params->channel_list[0] = htodchanspec(channel);
+	else
+		params->channel_list[0] = wl_ch_host_to_driver(channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+		(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
+#else
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel * channel,
+	enum nl80211_channel_type channel_type,
+	unsigned int duration, u64 *cookie)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 target_channel;
+	u32 id;
+	s32 err = BCME_OK;
+	struct ether_addr primary_mac;
+	struct net_device *ndev = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
+		ieee80211_frequency_to_channel(channel->center_freq),
+		duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
+
+	if (!cfg->p2p) {
+		WL_ERR(("cfg->p2p is not initialized\n"));
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+	target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+	memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+#if defined(WL_ENABLE_P2P_IF)
+	cfg->remain_on_chan_type = channel_type;
+#endif /* WL_ENABLE_P2P_IF */
+	id = ++cfg->last_roc_id;
+	if (id == 0)
+		id = ++cfg->last_roc_id;
+	*cookie = id;
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status(cfg, SCANNING, ndev)) {
+		struct timer_list *_timer;
+		WL_DBG(("scan is running. go to fake listen state\n"));
+
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			WL_DBG(("cancel current listen timer \n"));
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		_timer = &cfg->p2p->listen_timer;
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+		INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
+
+		err = BCME_OK;
+		goto exit;
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_CFG80211_SYNC_GON
+	if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		/* do not enter listen mode again if we are in listen mode already for next af.
+		 * remain on channel completion will be returned by waiting next af completion.
+		 */
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#else
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		goto exit;
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	if (cfg->p2p && !cfg->p2p->on) {
+		/* In case of p2p_listen command, supplicant send remain_on_channel
+		 * without turning on P2P
+		 */
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		p2p_on(cfg) = true;
+	}
+
+	if (p2p_is_on(cfg)) {
+		err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+		if (unlikely(err)) {
+			goto exit;
+		}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (err == BCME_OK) {
+			wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+		} else {
+			/* if failed, firmware may be internal scanning state.
+			 * so other scan request shall not abort it
+			 */
+			wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+		}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
+		 * and expire timer will send a completion to the upper layer
+		 */
+		err = BCME_OK;
+	}
+
+exit:
+	if (err == BCME_OK) {
+		WL_INFORM(("Success\n"));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			duration, GFP_KERNEL);
+#else
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			channel_type, duration, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	} else {
+		WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	s32 err = 0;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		WL_DBG((" enter ) on P2P dedicated discover interface\n"));
+	}
+#else
+	WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex));
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	return err;
+}
+
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+	struct afx_hdl *afx_instance;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 ret = BCME_OK;
+
+	afx_instance = container_of(work, struct afx_hdl, work);
+	if (afx_instance != NULL && cfg->afx_hdl->is_active) {
+		if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+			ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
+				(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
+		} else {
+			ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+				cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
+				NULL);
+		}
+		if (unlikely(ret != BCME_OK)) {
+			WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
+			if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+				complete(&cfg->act_frm_scan);
+		}
+	}
+}
+
+static s32
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+
+	if (dev == NULL)
+		return -1;
+
+	WL_DBG((" enter ) \n"));
+
+	wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+	cfg->afx_hdl->is_active = TRUE;
+
+	/* Loop to wait until we find a peer's channel or the
+	 * pending action frame tx is cancelled.
+	 */
+	while ((cfg->afx_hdl->retry < max_retry) &&
+		(cfg->afx_hdl->peer_chan == WL_INVALID)) {
+		cfg->afx_hdl->is_listen = FALSE;
+		wl_set_drv_status(cfg, SCANNING, dev);
+		WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+			cfg->afx_hdl->retry));
+		/* search peer on peer's listen channel */
+		schedule_work(&cfg->afx_hdl->work);
+		wait_for_completion_timeout(&cfg->act_frm_scan,
+			msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		if (cfg->afx_hdl->my_listen_chan) {
+			WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
+				cfg->afx_hdl->my_listen_chan));
+			/* listen on my listen channel */
+			cfg->afx_hdl->is_listen = TRUE;
+			schedule_work(&cfg->afx_hdl->work);
+			wait_for_completion_timeout(&cfg->act_frm_scan,
+				msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+		}
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		cfg->afx_hdl->retry++;
+
+		WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+	}
+
+	cfg->afx_hdl->is_active = FALSE;
+
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+
+	return (cfg->afx_hdl->peer_chan);
+}
+
+struct p2p_config_af_params {
+	s32 max_tx_retry;	/* max tx retry count if tx no ack */
+	/* To make sure to send successfully action frame, we have to turn off mpc
+	 * 0: off, 1: on,  (-1): do nothing
+	 */
+	s32 mpc_onoff;
+#ifdef WL_CFG80211_SYNC_GON
+	bool extra_listen;
+#endif
+	bool search_channel;	/* 1: search peer's channel to send af */
+};
+
+static s32
+wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
+	wl_action_frame_t *action_frame, wl_af_params_t *af_params,
+	struct p2p_config_af_params *config_af_params)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wifi_p2p_pub_act_frame_t *act_frm =
+		(wifi_p2p_pub_act_frame_t *) (action_frame->data);
+
+	/* initialize default value */
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params->extra_listen = true;
+#endif
+	config_af_params->search_channel = false;
+	config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params->mpc_onoff = -1;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+
+	switch (act_frm->subtype) {
+	case P2P_PAF_GON_REQ: {
+		WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
+		wl_set_p2p_status(cfg, GO_NEG_PHASE);
+
+		config_af_params->mpc_onoff = 0;
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+
+		break;
+	}
+	case P2P_PAF_GON_RSP: {
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for CONF frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME + 100;
+		break;
+	}
+	case P2P_PAF_GON_CONF: {
+		/* If we reached till GO Neg confirmation reset the filter */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+
+		/* turn on mpc again if go nego is done */
+		config_af_params->mpc_onoff = 1;
+
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	case P2P_PAF_INVITE_REQ: {
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_INVITE_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_DEVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* maximize dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_LONG_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_DEVDIS_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_PROVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		config_af_params->mpc_onoff = 0;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_PROVDIS_RSP: {
+		cfg->next_af_subtype = P2P_PAF_GON_REQ;
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	default:
+		WL_DBG(("Unknown p2p pub act frame subtype: %d\n",
+			act_frm->subtype));
+		err = BCME_BADARG;
+	}
+	return err;
+}
+
+#ifdef WL11U
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+	void *frame, u16 frame_len)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	bool result = false;
+	s32 i;
+	chanspec_t chanspec;
+
+	/* If DFS channel is 52~148, check to block it or not */
+	if (af_params &&
+		(af_params->channel >= 52 && af_params->channel <= 148)) {
+		if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+			bss_list = cfg->bss_list;
+			bi = next_bss(bss_list, bi);
+			for_each_bss(bss_list, bi, i) {
+				chanspec = wl_chspec_driver_to_host(bi->chanspec);
+				if (CHSPEC_IS5G(chanspec) &&
+					((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+					== af_params->channel)) {
+					result = true;	/* do not block the action frame */
+					break;
+				}
+			}
+		}
+	}
+	else {
+		result = true;
+	}
+
+	WL_DBG(("result=%s", result?"true":"false"));
+	return result;
+}
+#endif /* WL11U */
+
+
+static bool
+wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
+	bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params,
+	wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx)
+{
+#ifdef WL11U
+	struct net_device *ndev = NULL;
+#endif /* WL11U */
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	bool ack = false;
+	u8 category, action;
+	s32 tx_retry;
+	struct p2p_config_af_params config_af_params;
+#ifdef VSDB
+	ulong off_chan_started_jiffies = 0;
+#endif
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+
+	/* Add the default dwell time
+	 * Dwell time to stay off-channel to wait for a response action frame
+	 * after transmitting an GO Negotiation action frame
+	 */
+	af_params->dwell_time = WL_DWELL_TIME;
+
+#ifdef WL11U
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = dev;
+#else
+	ndev = ndev_to_cfgdev(cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+	category = action_frame->data[DOT11_ACTION_CAT_OFF];
+	action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+	/* initialize variables */
+	tx_retry = 0;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+	config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params.mpc_onoff = -1;
+	config_af_params.search_channel = false;
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params.extra_listen = false;
+#endif
+
+	/* config parameters */
+	/* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
+	if (category == DOT11_ACTION_CAT_PUBLIC) {
+		if ((action == P2P_PUB_AF_ACTION) &&
+			(action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) {
+			/* p2p public action frame process */
+			if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy,
+				action_frame, af_params, &config_af_params)) {
+				WL_DBG(("Unknown subtype.\n"));
+			}
+
+		} else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
+			/* service discovery process */
+			if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+				action == P2PSD_ACTION_ID_GAS_CREQ) {
+				/* configure service discovery query frame */
+
+				config_af_params.search_channel = true;
+
+				/* save next af suptype to cancel remained dwell time */
+				cfg->next_af_subtype = action + 1;
+
+				af_params->dwell_time = WL_MED_DWELL_TIME;
+			} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+				action == P2PSD_ACTION_ID_GAS_CRESP) {
+				/* configure service discovery response frame */
+				af_params->dwell_time = WL_MIN_DWELL_TIME;
+			} else {
+				WL_DBG(("Unknown action type: %d\n", action));
+			}
+		} else {
+			WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n",
+				category, action, action_frame_len));
+	}
+	} else if (category == P2P_AF_CATEGORY) {
+		/* do not configure anything. it will be sent with a default configuration */
+	} else {
+		WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
+			category, action));
+		if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+			wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+			return false;
+		}
+	}
+
+	/* To make sure to send successfully action frame, we have to turn off mpc */
+	if (config_af_params.mpc_onoff == 0) {
+		wldev_iovar_setint(dev, "mpc", 0);
+	}
+
+	/* validate channel and p2p ies */
+	if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
+		wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+		config_af_params.search_channel = true;
+	} else {
+		config_af_params.search_channel = false;
+	}
+#ifdef WL11U
+	if (ndev == bcmcfg_to_prmry_ndev(cfg))
+		config_af_params.search_channel = false;
+#endif /* WL11U */
+
+#ifdef VSDB
+	/* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
+	if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+		OSL_SLEEP(50);
+	}
+#endif
+
+	/* if scan is ongoing, abort current scan. */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+#ifdef WL11U
+	/* handling DFS channel exceptions */
+	if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+		return false;	/* the action frame was blocked */
+	}
+#endif /* WL11U */
+
+	/* set status and destination address before sending af */
+	if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+		/* set this status to cancel the remained dwell time in rx process */
+		wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+	}
+	wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+	memcpy(cfg->afx_hdl->tx_dst_addr.octet,
+		af_params->action_frame.da.octet,
+		sizeof(cfg->afx_hdl->tx_dst_addr.octet));
+
+	/* save af_params for rx process */
+	cfg->afx_hdl->pending_tx_act_frm = af_params;
+
+	/* search peer's channel */
+	if (config_af_params.search_channel) {
+		/* initialize afx_hdl */
+		if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			goto exit;
+		}
+		cfg->afx_hdl->dev = dev;
+		cfg->afx_hdl->retry = 0;
+		cfg->afx_hdl->peer_chan = WL_INVALID;
+
+		if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
+			WL_ERR(("couldn't find peer's channel.\n"));
+			wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
+				af_params->channel);
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+		/*
+		 * Abort scan even for VSDB scenarios. Scan gets aborted in firmware
+		 * but after the check of piggyback algorithm.
+		 * To take care of current piggback algo, lets abort the scan here itself.
+		 */
+		wl_notify_escan_complete(cfg, dev, true, true);
+		/* Suspend P2P discovery's search-listen to prevent it from
+		 * starting a scan or changing the channel.
+		 */
+		wl_cfgp2p_discover_enable_search(cfg, false);
+
+		/* update channel */
+		af_params->channel = cfg->afx_hdl->peer_chan;
+	}
+
+#ifdef VSDB
+	off_chan_started_jiffies = jiffies;
+#endif /* VSDB */
+
+	wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+
+	/* Now send a tx action frame */
+	ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+
+	/* if failed, retry it. tx_retry_max value is configure by .... */
+	while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) {
+#ifdef VSDB
+		if (af_params->channel) {
+			if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
+				OFF_CHAN_TIME_THRESHOLD_MS) {
+				WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+				off_chan_started_jiffies = jiffies;
+			} else
+				OSL_SLEEP(AF_RETRY_DELAY_TIME);
+		}
+#endif /* VSDB */
+		ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
+			false : true;
+	}
+
+	if (ack == false) {
+		WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
+	}
+	WL_DBG(("Complete to send action frame\n"));
+exit:
+	/* Clear SENDING_ACT_FRM after all sending af is done */
+	wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+
+#ifdef WL_CFG80211_SYNC_GON
+	/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+	 * if we coundn't get the next action response frame and dongle does not keep
+	 * the dwell time, go to listen state again to get next action response frame.
+	 */
+	if (ack && config_af_params.extra_listen &&
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+		cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
+		s32 extar_listen_time;
+
+		extar_listen_time = af_params->dwell_time -
+			jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
+
+		if (extar_listen_time > 50) {
+			wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+			WL_DBG(("Wait more time! actual af time:%d,"
+				"calculated extar listen:%d\n",
+				af_params->dwell_time, extar_listen_time));
+			if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
+				extar_listen_time + 100) == BCME_OK) {
+				wait_for_completion_timeout(&cfg->wait_next_af,
+					msecs_to_jiffies(extar_listen_time + 100 + 300));
+			}
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+		}
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+
+	if (cfg->afx_hdl->pending_tx_act_frm)
+		cfg->afx_hdl->pending_tx_act_frm = NULL;
+
+	WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n",
+		(ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
+
+
+	/* if all done, turn mpc on again */
+	if (config_af_params.mpc_onoff == 1) {
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return ack;
+}
+
+#define MAX_NUM_OF_ASSOCIATED_DEV       64
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+#else
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+	enum nl80211_channel_type channel_type,
+	bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
+	unsigned int wait, const u8* buf, size_t len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+	bool no_cck,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+	bool dont_wait_for_ack,
+#endif
+	u64 *cookie)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+{
+	wl_action_frame_t *action_frame;
+	wl_af_params_t *af_params;
+	scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	struct ieee80211_channel *channel = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+#endif
+	const struct ieee80211_mgmt *mgmt;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = NULL;
+	s32 err = BCME_OK;
+	s32 bssidx = 0;
+	u32 id;
+	bool ack = false;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_DBG(("Enter \n"));
+
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	/* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE)	*/
+	if (discover_cfgdev(cfgdev, cfg)) {
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	}
+	else {
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+	}
+
+	WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+	*cookie = 0;
+	id = cfg->send_action_id++;
+	if (id == 0)
+		id = cfg->send_action_id++;
+	*cookie = id;
+	mgmt = (const struct ieee80211_mgmt *)buf;
+	if (ieee80211_is_mgmt(mgmt->frame_control)) {
+		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+			s32 ie_offset =  DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+			s32 ie_len = len - ie_offset;
+			if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+				wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len);
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+		} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+			ieee80211_is_deauth(mgmt->frame_control)) {
+			char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+				sizeof(struct ether_addr) + sizeof(uint)] = {0};
+			int num_associated = 0;
+			struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+			if (!bcmp((const uint8 *)BSSID_BROADCAST,
+				(const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) {
+				assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+				err = wldev_ioctl(dev, WLC_GET_ASSOCLIST,
+					assoc_maclist, sizeof(mac_buf), false);
+				if (err < 0)
+					WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+				else
+					num_associated = assoc_maclist->count;
+			}
+			memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+			scb_val.val = mgmt->u.disassoc.reason_code;
+			err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+				sizeof(scb_val_t), true);
+			if (err < 0)
+				WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
+			WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+				bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf),
+				scb_val.val));
+
+			if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
+				wl_delay(400);
+
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+
+		} else if (ieee80211_is_action(mgmt->frame_control)) {
+			/* Abort the dwell time of any previous off-channel
+			* action frame that may be still in effect.  Sending
+			* off-channel action frames relies on the driver's
+			* scan engine.  If a previous off-channel action frame
+			* tx is still in progress (including the dwell time),
+			* then this new action frame will not be sent out.
+			*/
+/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary.
+ * And previous off-channel action frame must be ended before new af tx.
+ */
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_notify_escan_complete(cfg, dev, true, true);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		}
+
+	} else {
+		WL_ERR(("Driver only allows MGMT packet type\n"));
+		goto exit;
+	}
+
+	af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+	if (af_params == NULL)
+	{
+		WL_ERR(("unable to allocate frame\n"));
+		return -ENOMEM;
+	}
+
+	action_frame = &af_params->action_frame;
+
+	/* Add the packet Id */
+	action_frame->packetId = *cookie;
+	WL_DBG(("action frame %d\n", action_frame->packetId));
+	/* Add BSSID */
+	memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+	memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+	/* Add the length exepted for 802.11 header  */
+	action_frame->len = len - DOT11_MGMT_HDR_LEN;
+	WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+	/* Add the channel */
+	af_params->channel =
+		ieee80211_frequency_to_channel(channel->center_freq);
+	/* Save listen_chan for searching common channel */
+	cfg->afx_hdl->peer_listen_chan = af_params->channel;
+	WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	af_params->dwell_time = params->wait;
+#else
+	af_params->dwell_time = wait;
+#endif
+
+	memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+	ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params,
+		action_frame, action_frame->len, bssidx);
+	cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+
+	kfree(af_params);
+exit:
+	return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	u16 frame_type, bool reg)
+{
+
+	WL_DBG(("frame_type: %x, reg: %d\n", frame_type, reg));
+
+	if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+		return;
+
+	return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct bss_parameters *params)
+{
+	s32 err = 0;
+	s32 ap_isolate = 0;
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+	dhd_pub_t *dhd;
+	s32 gmode = -1, nmode = -1;
+	s32 gmode_prev = -1, nmode_prev = -1;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_ENABLE_P2P_IF)
+	if (cfg->p2p_net == dev)
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
+	dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+
+	if (params->use_cts_prot >= 0) {
+	}
+
+	if (params->use_short_preamble >= 0) {
+	}
+
+	if (params->use_short_slot_time >= 0) {
+	}
+
+	if (params->basic_rates) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+		switch ((int)(params->basic_rates[params->basic_rates_len -1])) {
+			case 22: /* B only , rate 11 */
+				gmode = 0;
+				nmode = 0;
+				break;
+			case 108: /* G only , rate 54 */
+				gmode = 2;
+				nmode = 0;
+				break;
+			default:
+				gmode = -1;
+				nmode = -1;
+				break;
+		}
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+	}
+
+	if (params->ap_isolate >= 0) {
+		ap_isolate = params->ap_isolate;
+		err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+		if (unlikely(err))
+		{
+			WL_ERR(("set ap_isolate Error (%d)\n", err));
+		}
+	}
+
+	if (params->ht_opmode >= 0) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+		nmode = 1;
+		gmode = 1;
+	} else {
+		nmode = 0;
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+	}
+
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+	err = wldev_iovar_getint(dev, "nmode", &nmode_prev);
+	if (unlikely(err)) {
+		WL_ERR(("error reading nmode (%d)\n", err));
+	}
+	if (nmode == nmode_prev) {
+		nmode = -1;
+	}
+	err = wldev_ioctl(dev, WLC_GET_GMODE, &gmode_prev, sizeof(gmode_prev), 0);
+	if (unlikely(err)) {
+		WL_ERR(("error reading gmode (%d)\n", err));
+	}
+	if (gmode == gmode_prev) {
+		gmode = -1;
+	}
+
+	if (((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) == DHD_FLAG_HOSTAP_MODE) &&
+		((gmode > -1) || (nmode > -1))) {
+		s32 val = 0;
+
+		err = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true);
+		if (unlikely(err))
+			WL_ERR(("WLC_DOWN command failed:[%d]\n", err));
+
+		if (nmode > -1) {
+			err = wldev_iovar_setint(dev, "nmode", nmode);
+			if (unlikely(err))
+				WL_ERR(("nmode command failed:mode[%d]:err[%d]\n", nmode, err));
+		}
+
+		if (gmode > -1) {
+			err = wldev_ioctl(dev, WLC_SET_GMODE, &gmode, sizeof(s32), true);
+			if (unlikely(err))
+				WL_ERR(("WLC_SET_GMODE command failed:mode[%d]:err[%d]\n",
+					gmode, err));
+		}
+
+		val = 0;
+		err = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true);
+		if (unlikely(err))
+			WL_ERR(("WLC_UP command failed:err[%d]\n", err));
+
+	}
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *chan,
+	enum nl80211_channel_type channel_type)
+{
+	s32 _chan;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+
+	dev = ndev_to_wlc_ndev(dev, cfg);
+	_chan = ieee80211_frequency_to_channel(chan->center_freq);
+	WL_ERR(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+		dev->ifindex, channel_type, _chan));
+
+
+	if (chan->band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				WL_ERR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (err) {
+					WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+				}
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+
+		}
+
+	} else if (chan->band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_chspec_host_to_driver(chspec);
+		if (fw_chspec != INVCHANSPEC) {
+			if ((err = wldev_iovar_setint(dev, "chanspec",
+				fw_chspec)) == BCME_BADCHAN) {
+				if (bw == WL_CHANSPEC_BW_80)
+					goto change_bw;
+				err = wldev_ioctl(dev, WLC_SET_CHANNEL,
+					&_chan, sizeof(_chan), true);
+				if (err < 0) {
+					WL_ERR(("WLC_SET_CHANNEL error %d"
+					"chip may not be supporting this channel\n", err));
+				}
+			} else if (err) {
+				WL_ERR(("failed to set chanspec error %d\n", err));
+			}
+		} else {
+			WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
+			err = BCME_ERROR;
+		}
+	} else {
+change_bw:
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+		err = BCME_ERROR;
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+		WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+	} else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+		(chspec & WL_CHANSPEC_BW_80)) {
+		/* If GO is vht80 */
+		dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+		dhd_set_cpucore(dhd, TRUE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+	return err;
+}
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+			return _net_info->ndev;
+	}
+	return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+{
+	s32 err = BCME_OK;
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+#ifndef CUSTOMER_HW10 /* for WEP Support */
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+#endif /* CUSTOMER_HW10 */
+
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	s32 len = 0;
+	s32 err = BCME_OK;
+	u16 auth = 0; /* d11 open authentication */
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	u16 suite_count;
+	u8 rsn_cap[2];
+	u32 wme_bss_disable;
+
+	if (wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len =  wpa2ie->len;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	switch (mcast->type) {
+		case WPA_CIPHER_NONE:
+			gval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			gval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	switch (ucast->list[0].type) {
+		case WPA_CIPHER_NONE:
+			pval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+	switch (mgmt->list[0].type) {
+		case RSN_AKM_NONE:
+			wpa_auth = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			wpa_auth = WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			wpa_auth = WPA2_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	s32 len = 0;
+	u32 i;
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 tmp = 0;
+
+	if (wpaie == NULL)
+		goto exit;
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFORM(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			tmp = 0;
+			switch (mcast->type) {
+				case WPA_CIPHER_NONE:
+					tmp = 0;
+					break;
+				case WPA_CIPHER_WEP_40:
+				case WPA_CIPHER_WEP_104:
+					tmp = WEP_ENABLED;
+					break;
+				case WPA_CIPHER_TKIP:
+					tmp = TKIP_ENABLED;
+					break;
+				case WPA_CIPHER_AES_CCM:
+					tmp = AES_ENABLED;
+					break;
+				default:
+					WL_ERR(("No Security Info\n"));
+			}
+			gval |= tmp;
+		}
+	}
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM(("no unicast suite\n"));
+		goto exit;
+	}
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				tmp = 0;
+				switch (ucast->list[i].type) {
+					case WPA_CIPHER_NONE:
+						tmp = 0;
+						break;
+					case WPA_CIPHER_WEP_40:
+					case WPA_CIPHER_WEP_104:
+						tmp = WEP_ENABLED;
+						break;
+					case WPA_CIPHER_TKIP:
+						tmp = TKIP_ENABLED;
+						break;
+					case WPA_CIPHER_AES_CCM:
+						tmp = AES_ENABLED;
+						break;
+					default:
+						WL_ERR(("No Security Info\n"));
+				}
+				pval |= tmp;
+			}
+		}
+	}
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+				tmp = 0;
+				switch (mgmt->list[i].type) {
+					case RSN_AKM_NONE:
+						tmp = WPA_AUTH_NONE;
+						break;
+					case RSN_AKM_UNSPECIFIED:
+						tmp = WPA_AUTH_UNSPECIFIED;
+						break;
+					case RSN_AKM_PSK:
+						tmp = WPA_AUTH_PSK;
+						break;
+					default:
+						WL_ERR(("No Key Mgmt Info\n"));
+				}
+				wpa_auth |= tmp;
+			}
+		}
+
+	}
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+static u32 wl_get_cipher_type(uint8 type)
+{
+	u32 ret = 0;
+	switch (type) {
+		case WPA_CIPHER_NONE:
+			ret = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			ret = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			ret = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			ret = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			ret = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	return ret;
+}
+
+static u32 wl_get_suite_auth_key_mgmt_type(uint8 type)
+{
+	u32 ret = 0;
+	switch (type) {
+		case RSN_AKM_NONE:
+			ret = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			ret = WPA_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			ret = WPA_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+	return ret;
+}
+
+static s32
+wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie,
+	bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	u32 wme_bss_disable;
+	u16 suite_count;
+	u8 rsn_cap[2];
+	s32 len = 0;
+	u32 i;
+	u32 wsec1, wsec2, wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 wpa_auth1 = 0;
+	u32 wpa_auth2 = 0;
+	u8* ptmp;
+
+	if (wpaie == NULL || wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFORM(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			gval |= wl_get_cipher_type(mcast->type);
+		}
+	}
+	WL_ERR(("\nwpa ie validate\n"));
+	WL_ERR(("wpa ie mcast cipher = 0x%X\n", gval));
+
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM(("no unicast suite\n"));
+		goto exit;
+	}
+
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				pval |= wl_get_cipher_type(ucast->list[i].type);
+			}
+		}
+	}
+	WL_ERR(("wpa ie ucast count =%d, cipher = 0x%X\n", count, pval));
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec1 = (pval | gval | SES_OW_ENABLED);
+	WL_ERR(("wpa ie wsec = 0x%X\n", wsec1));
+
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+
+				wpa_auth1 |= wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type);
+			}
+		}
+
+	}
+	WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth1));
+	WL_ERR(("\nwpa2 ie validate\n"));
+
+	pval = 0;
+	gval = 0;
+	len =  wpa2ie->len;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	ptmp = mcast->oui;
+	gval = wl_get_cipher_type(ptmp[DOT11_OUI_LEN]);
+
+	WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
+	if ((len -= WPA_SUITE_LEN) <= 0)
+	{
+		WL_ERR(("P:wpa2 ie len[%d]", len));
+		return BCME_BADLEN;
+	}
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
+	pval |= wl_get_cipher_type(ucast->list[0].type);
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	WL_ERR(("wpa2 ie ucast cipher = 0x%X\n", pval));
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec2 = (pval | gval | SES_OW_ENABLED);
+	WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
+
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+	ptmp = (u8 *)&mgmt->list[0];
+	wpa_auth2 = wl_get_suite_auth_key_mgmt_type(ptmp[DOT11_OUI_LEN]);
+	WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+		WL_DBG(("P:rsn_cap[0]=[0x%X]:wme_bss_disabled[%d]\n", rsn_cap[0], wme_bss_disable));
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	wsec = (wsec1 | wsec2);
+	wpa_auth = (wpa_auth1 | wpa_auth2);
+	WL_ERR(("wpa_wpa2 wsec=0x%X wpa_auth=0x%X\n", wsec, wpa_auth));
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+
+static s32
+wl_cfg80211_bcn_validate_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+		/* For P2P GO, the sec type is WPA2-PSK */
+		WL_DBG(("P2P GO: validating wpa2_ie"));
+		if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0)
+			return BCME_ERROR;
+
+	} else if (dev_role == NL80211_IFTYPE_AP) {
+
+		WL_DBG(("SoftAP: validating security"));
+		/* If wpa2_ie or wpa_ie is present validate it */
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) {
+			if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx)  < 0) {
+				cfg->ap_info->security_mode = false;
+				return BCME_ERROR;
+			}
+		}
+		else {
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+		if ((ies->wpa2_ie || ies->wpa_ie) &&
+			((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+			wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+			cfg->ap_info->security_mode = false;
+			return BCME_ERROR;
+		}
+
+		cfg->ap_info->security_mode = true;
+		if (cfg->ap_info->rsn_ie) {
+			kfree(cfg->ap_info->rsn_ie);
+			cfg->ap_info->rsn_ie = NULL;
+		}
+		if (cfg->ap_info->wpa_ie) {
+			kfree(cfg->ap_info->wpa_ie);
+			cfg->ap_info->wpa_ie = NULL;
+		}
+		if (cfg->ap_info->wps_ie) {
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = NULL;
+		}
+		if (ies->wpa_ie != NULL) {
+			/* WPAIE */
+			cfg->ap_info->rsn_ie = NULL;
+			cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+				ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		} else if (ies->wpa2_ie != NULL) {
+			/* RSNIE */
+			cfg->ap_info->wpa_ie = NULL;
+			cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+				ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		}
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+		if (!ies->wpa2_ie && !ies->wpa_ie) {
+			wl_validate_opensecurity(dev, bssidx);
+			cfg->ap_info->security_mode = false;
+		}
+
+		if (ies->wps_ie) {
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+	}
+
+	return 0;
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32 wl_cfg80211_bcn_set_params(
+	struct cfg80211_ap_settings *info,
+	struct net_device *dev,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 err = BCME_OK;
+
+	WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+		info->beacon_interval, info->dtim_period));
+
+	if (info->beacon_interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->beacon_interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if ((info->ssid) && (info->ssid_len > 0) &&
+		(info->ssid_len <= 32)) {
+		WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(cfg->hostapd_ssid.SSID, 0x00, 32);
+			memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+			cfg->hostapd_ssid.SSID_len = info->ssid_len;
+		} else {
+				/* P2P GO */
+			memset(cfg->p2p->ssid.SSID, 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+			cfg->p2p->ssid.SSID_len = info->ssid_len;
+		}
+	}
+
+	if (info->hidden_ssid) {
+		if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+			WL_ERR(("failed to set hidden : %d\n", err));
+		WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+	}
+
+	return err;
+}
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+static s32
+wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
+{
+	s32 err = BCME_OK;
+
+	memset(ies, 0, sizeof(struct parsed_ies));
+
+	/* find the WPSIE */
+	if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
+		WL_DBG(("WPSIE in beacon \n"));
+		ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+
+	/* find the RSN_IE */
+	if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len,
+		DOT11_MNG_RSN_ID)) != NULL) {
+		WL_DBG((" WPA2 IE found\n"));
+		ies->wpa2_ie_len = ies->wpa2_ie->len;
+	}
+
+	/* find the WPA_IE */
+	if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
+		WL_DBG((" WPA found\n"));
+		ies->wpa_ie_len = ies->wpa_ie->length;
+	}
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_bcn_bringup_ap(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_join_params join_params;
+	bool is_bssup = false;
+	s32 infra = 1;
+	s32 join_params_size = 0;
+	s32 ap = 1;
+#ifdef DISABLE_11H_SOFTAP
+	s32 spect = 0;
+#endif /* DISABLE_11H_SOFTAP */
+	s32 err = BCME_OK;
+
+	WL_DBG(("Enter dev_role: %d\n", dev_role));
+
+	/* Common code for SoftAP and P2P GO */
+	wldev_iovar_setint(dev, "mpc", 0);
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO) {
+		is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+		if (!is_bssup && (ies->wpa2_ie != NULL)) {
+
+			err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+			if (err < 0) {
+				WL_ERR(("SET INFRA error %d\n", err));
+				goto exit;
+			}
+
+			err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+				sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (err < 0) {
+				WL_ERR(("GO SSID setting error %d\n", err));
+				goto exit;
+			}
+
+			/* Do abort scan before creating GO */
+			wl_cfg80211_scan_abort(cfg);
+
+			if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) {
+				WL_ERR(("GO Bring up error %d\n", err));
+				goto exit;
+			}
+		} else
+			WL_DBG(("Bss is already up\n"));
+	} else if ((dev_role == NL80211_IFTYPE_AP) &&
+		(wl_get_drv_status(cfg, AP_CREATING, dev))) {
+		/* Device role SoftAP */
+		err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("WLC_DOWN error %d\n", err));
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			goto exit;
+		}
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			goto exit;
+		}
+#ifdef DISABLE_11H_SOFTAP
+		err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT,
+			&spect, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET SPECT_MANAGMENT error %d\n", err));
+			goto exit;
+		}
+#endif /* DISABLE_11H_SOFTAP */
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			goto exit;
+		}
+
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		join_params_size = sizeof(join_params.ssid);
+		memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+			cfg->hostapd_ssid.SSID_len);
+		join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len);
+
+		/* create softap */
+		if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+			join_params_size, true)) == 0) {
+			WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID));
+			wl_clr_drv_status(cfg, AP_CREATING, dev);
+			wl_set_drv_status(cfg, AP_CREATED, dev);
+		}
+	}
+
+
+exit:
+	return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+s32
+wl_cfg80211_parse_ap_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	struct parsed_ies *ies)
+{
+	struct parsed_ies prb_ies;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Parse Beacon IEs */
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Parse Probe Response IEs */
+	if (wl_cfg80211_parse_ies(vndr, vndr_ie_len, &prb_ies) < 0) {
+		WL_ERR(("PROBE RESP get IEs failed \n"));
+		err = -EINVAL;
+	}
+
+fail:
+
+	return err;
+}
+
+s32
+wl_cfg80211_set_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Set Beacon IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len)) < 0) {
+		WL_ERR(("Set Beacon IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Set Probe Response IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+		WL_ERR(("Set Probe Resp IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
+	}
+
+	return err;
+}
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+static s32 wl_cfg80211_hostapd_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	s32 bssidx)
+{
+	bool update_bss = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+
+	if (ies->wps_ie) {
+		if (cfg->ap_info->wps_ie &&
+			memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+			WL_DBG((" WPS IE is changed\n"));
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		} else if (cfg->ap_info->wps_ie == NULL) {
+			WL_DBG((" WPS IE is added\n"));
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) {
+			WL_ERR(("update bss - wpa_ie and  wpa2_ie is not null\n"));
+			if (!cfg->ap_info->security_mode) {
+				/* change from open mode to security mode */
+				update_bss = true;
+				cfg->ap_info->wpa_ie =
+					kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				cfg->ap_info->rsn_ie =
+					kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+			} else {
+				/* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */
+				if (cfg->ap_info->wpa_ie) {
+					if (memcmp(cfg->ap_info->wpa_ie,
+					ies->wpa_ie, ies->wpa_ie->length +
+					WPA_RSN_IE_TAG_FIXED_LEN)) {
+						kfree(cfg->ap_info->wpa_ie);
+						update_bss = true;
+						cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+						ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+						GFP_KERNEL);
+					}
+				}
+				else {
+					update_bss = true;
+					cfg->ap_info->wpa_ie =
+						kmemdup(ies->wpa_ie,
+						ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+						GFP_KERNEL);
+				}
+				if (cfg->ap_info->rsn_ie) {
+					if (memcmp(cfg->ap_info->rsn_ie,
+					ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+						update_bss = true;
+						kfree(cfg->ap_info->rsn_ie);
+						cfg->ap_info->rsn_ie =
+							kmemdup(ies->wpa2_ie,
+							ies->wpa2_ie->len +
+							WPA_RSN_IE_TAG_FIXED_LEN,
+							GFP_KERNEL);
+					}
+				}
+				else {
+					update_bss = true;
+					cfg->ap_info->rsn_ie =
+						kmemdup(ies->wpa2_ie,
+						ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+						GFP_KERNEL);
+				}
+			}
+			WL_ERR(("update_bss=%d\n", update_bss));
+			if (update_bss) {
+				cfg->ap_info->security_mode = true;
+				wl_cfgp2p_bss(cfg, dev, bssidx, 0);
+				if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie,
+					ies->wpa2_ie, bssidx)  < 0) {
+					return BCME_ERROR;
+				}
+				wl_cfgp2p_bss(cfg, dev, bssidx, 1);
+			}
+
+		}
+		else
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+		if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+			if (!cfg->ap_info->security_mode) {
+				/* change from open mode to security mode */
+				update_bss = true;
+				if (ies->wpa_ie != NULL) {
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else {
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				}
+			} else if (cfg->ap_info->wpa_ie) {
+				/* change from WPA2 mode to WPA mode */
+				if (ies->wpa_ie != NULL) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = NULL;
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else if (memcmp(cfg->ap_info->rsn_ie,
+					ies->wpa2_ie, ies->wpa2_ie->len
+					+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+					cfg->ap_info->wpa_ie = NULL;
+				}
+			}
+			if (update_bss) {
+				cfg->ap_info->security_mode = true;
+				wl_cfgp2p_bss(cfg, dev, bssidx, 0);
+				if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+					wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+					return BCME_ERROR;
+				}
+				wl_cfgp2p_bss(cfg, dev, bssidx, 1);
+			}
+		}
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+	return 0;
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_del_station(
+	struct wiphy *wiphy,
+	struct net_device *ndev,
+	u8* mac_addr)
+{
+	struct net_device *dev;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+	int err;
+	char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+	int num_associated = 0;
+
+	WL_DBG(("Entry\n"));
+	if (mac_addr == NULL) {
+		WL_DBG(("mac_addr is NULL ignore it\n"));
+		return 0;
+	}
+
+	dev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+
+	assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+	err = wldev_ioctl(ndev, WLC_GET_ASSOCLIST,
+		assoc_maclist, sizeof(mac_buf), false);
+	if (err < 0)
+		WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+	else
+		num_associated = assoc_maclist->count;
+
+	memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN);
+	scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+	err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+		sizeof(scb_val_t), true);
+	if (err < 0)
+		WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+	WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+		bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
+		scb_val.val));
+
+	if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
+		wl_delay(400);
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_change_station(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 *mac,
+	struct station_parameters *params)
+{
+	int err;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* Processing only authorize/de-authorize flag for now */
+	if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+		return -ENOTSUPP;
+
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+		err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true);
+		if (err)
+			WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+		return err;
+	}
+
+	err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true);
+	if (err)
+		WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+	return err;
+}
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32
+wl_cfg80211_start_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_ap_settings *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = BCME_OK;
+	struct parsed_ies ies;
+	s32 bssidx = 0;
+	u32 dev_role = 0;
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		WL_DBG(("Start AP req on primary iface: Softap\n"));
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		WL_DBG(("Start AP req on P2P iface: GO\n"));
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+		WL_DBG(("Start AP req on P2P connection iface\n"));
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+	if ((err = wl_cfg80211_set_channel(wiphy, dev,
+		dev->ieee80211_ptr->preset_chandef.chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+		goto fail;
+	}
+#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
+
+	if ((err = wl_cfg80211_bcn_set_params(info, dev,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon params set failed \n"));
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if ((wl_cfg80211_bcn_validate_sec(dev, &ies,
+		dev_role, bssidx)) < 0)
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	WL_DBG(("** AP/GO Created **\n"));
+
+#ifdef WL_CFG80211_ACL
+	/* Enfoce Admission Control. */
+	if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+		WL_ERR(("Set ACL failed\n"));
+	}
+#endif /* WL_CFG80211_ACL */
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
+		WL_ERR(("Set IEs failed \n"));
+
+	/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+	if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+		bool pbc = 0;
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc) {
+			WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+		}
+	}
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_stop_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev)
+{
+	int err = 0;
+	u32 dev_role = 0;
+	int infra = 0;
+	int ap = 0;
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto exit;
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		/* SoftAp on primary Interface.
+		 * Shut down AP and turn on MPC
+		 */
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, AP_CREATED, dev);
+		/* Turn on the MPC */
+		wldev_iovar_setint(dev, "mpc", 1);
+		if (cfg->ap_info) {
+			kfree(cfg->ap_info->wpa_ie);
+			kfree(cfg->ap_info->rsn_ie);
+			kfree(cfg->ap_info->wps_ie);
+			kfree(cfg->ap_info);
+			cfg->ap_info = NULL;
+		}
+	} else {
+		WL_DBG(("Stopping P2P GO \n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+			DHD_EVENT_TIMEOUT_MS*3);
+		DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
+	}
+
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_change_beacon(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct parsed_ies ies;
+	u32 dev_role = 0;
+	s32 bssidx = 0;
+	bool pbc = 0;
+
+	WL_DBG(("Enter \n"));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
+		WL_ERR(("Parse IEs failed \n"));
+		goto fail;
+	}
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+		/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+		if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+			wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+			WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+			if (pbc)
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+			else
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+		}
+	}
+
+fail:
+	return err;
+}
+#else
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+	struct beacon_parameters *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 ie_offset = 0;
+	s32 bssidx = 0;
+	u32 dev_role = NL80211_IFTYPE_AP;
+	struct parsed_ies ies;
+	bcm_tlv_t *ssid_ie;
+	bool pbc = 0;
+	WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+		info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+	/* find the SSID */
+	if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+		info->head_len - ie_offset,
+		DOT11_MNG_SSID_ID)) != NULL) {
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32);
+			memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len);
+			cfg->hostapd_ssid.SSID_len = ssid_ie->len;
+		} else {
+				/* P2P GO */
+			memset(&cfg->p2p->ssid.SSID[0], 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+			cfg->p2p->ssid.SSID_len = ssid_ie->len;
+		}
+	}
+
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, &ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len) < 0) {
+		WL_ERR(("Beacon set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
+		info->proberesp_ies_len) < 0) {
+		WL_ERR(("ProbeRsp set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
+	}
+#endif
+
+	if (!wl_cfgp2p_bss_isup(dev, bssidx) &&
+		(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0))
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	/* Set BI and DTIM period */
+	if (info->interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
+		/* Soft AP already running. Update changed params */
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+
+	/* Enable Probe Req filter */
+	if (((dev_role == NL80211_IFTYPE_P2P_GO) ||
+		(dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) {
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc)
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+	}
+
+	WL_DBG(("** ADD/SET beacon done **\n"));
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+	return err;
+
+}
+#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME		30
+#define PNO_REPEAT		4
+#define PNO_FREQ_EXPO_MAX	2
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                             struct net_device *dev,
+                             struct cfg80211_sched_scan_request *request)
+{
+	ushort pno_time = PNO_TIME;
+	int pno_repeat = PNO_REPEAT;
+	int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssid = NULL;
+	int ssid_count = 0;
+	int i;
+	int ret = 0;
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN START\n"));
+	WL_PNO(("Enter n_match_sets:%d   n_ssids:%d \n",
+		request->n_match_sets, request->n_ssids));
+	WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+		request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+
+	if (!request || !request->n_ssids || !request->n_match_sets) {
+		WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+		return -EINVAL;
+	}
+
+	memset(&ssids_local, 0, sizeof(ssids_local));
+
+	if (request->n_match_sets > 0) {
+		for (i = 0; i < request->n_match_sets; i++) {
+			ssid = &request->match_sets[i].ssid;
+			memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len);
+			ssids_local[i].SSID_len = ssid->ssid_len;
+			WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid));
+			ssid_count++;
+		}
+	}
+
+	if (request->n_ssids > 0) {
+		for (i = 0; i < request->n_ssids; i++) {
+			/* Active scan req for ssids */
+			WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid));
+
+			/* match_set ssids is a supert set of n_ssid list, so we need
+			 * not add these set seperately
+			 */
+		}
+	}
+
+	if (ssid_count) {
+		if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, request->n_match_sets,
+			pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+			WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+			return -EINVAL;
+		}
+		cfg->sched_scan_req = request;
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN STOP\n"));
+
+	if (dhd_dev_pno_stop_for_ssid(dev) < 0)
+		WL_ERR(("PNO Stop for SSID failed"));
+
+	if (cfg->scan_request && cfg->sched_scan_running) {
+		WL_PNO((">>> Sched scan running. Aborting it..\n"));
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+
+	 cfg->sched_scan_req = NULL;
+	 cfg->sched_scan_running = FALSE;
+
+	return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * Currently the dump_obss IOVAR is returning string as output so we need to
+ * parse the output buffer in an unoptimized way. Going forward if we get the
+ * IOVAR output in binary format this method can be optimized
+ */
+static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey)
+{
+	int i;
+	char *token;
+	char delim[] = " \n";
+
+	token = strsep(&buf, delim);
+	while (token != NULL) {
+		if (!strcmp(token, "OBSS")) {
+			for (i = 0; i < OBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->obss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "IBSS")) {
+			for (i = 0; i < IBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->ibss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "TXDur")) {
+			for (i = 0; i < TX_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->tx = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Category")) {
+			for (i = 0; i < CTG_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_ctg = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Packet")) {
+			for (i = 0; i < PKT_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_pckt = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Opp(time):")) {
+			for (i = 0; i < IDLE_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->idle = simple_strtoul(token, NULL, 10);
+		}
+
+		token = strsep(&buf, delim);
+	}
+
+	return 0;
+}
+
+static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req,
+	struct wl_dump_survey *survey)
+{
+	cca_stats_n_flags *results;
+	char *buf;
+	int retry, err;
+
+	buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		WL_ERR(("%s: buf alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req),
+			buf, WLC_IOCTL_MAXLEN, NULL);
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, dump_obss IOVAR failed\n"));
+		err = -BCME_ERROR;
+		goto exit;
+	}
+
+	results = (cca_stats_n_flags *)(buf);
+	wl_parse_dump_obss(results->buf, survey);
+	kfree(buf);
+
+	return 0;
+exit:
+	kfree(buf);
+	return err;
+}
+
+static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+	int idx, struct survey_info *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_dump_survey *survey;
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel*chan;
+	cca_msrmnt_query req;
+	int val, err, noise, retry;
+
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		return -ENOENT;
+	}
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (band && idx >= band->n_channels) {
+		idx -= band->n_channels;
+		band = NULL;
+	}
+
+	if (!band || idx >= band->n_channels) {
+		/* Move to 5G band */
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		if (idx >= band->n_channels) {
+			return -ENOENT;
+		}
+	}
+
+	chan = &band->channels[idx];
+	/* Setting current channel to the requested channel */
+	if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+	}
+
+	if (!idx) {
+		/* Disable mpc */
+		val = 0;
+		err = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+			sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+			&cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("set 'mpc' failed, error = %d\n", err));
+		}
+
+		/* Set interface up, explicitly. */
+		val = 1;
+		err = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+		if (err < 0) {
+			WL_ERR(("set interface up failed, error = %d\n", err));
+		}
+	}
+
+	/* Get noise value */
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		err = wldev_ioctl(ndev, WLC_GET_PHY_NOISE, &noise,
+			sizeof(noise), false);
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("Get Phy Noise failed, error = %d\n", err));
+		noise = CHAN_NOISE_DUMMY;
+	}
+
+	survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey),
+		GFP_KERNEL);
+	if (unlikely(!survey)) {
+		WL_ERR(("%s: alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	/* Start Measurement for obss stats on current channel */
+	req.msrmnt_query = 0;
+	req.time_req = ACS_MSRMNT_DELAY;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	/*
+	 * Wait for the meaurement to complete, adding a buffer value of 10 to take
+	 * into consideration any delay in IOVAR completion
+	 */
+	msleep(ACS_MSRMNT_DELAY + 10);
+
+	/* Issue IOVAR to collect measurement results */
+	req.msrmnt_query = 1;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	info->channel = chan;
+	info->noise = noise;
+	info->channel_time = ACS_MSRMNT_DELAY;
+	info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle;
+	info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg +
+		survey->no_pckt;
+	info->channel_time_tx = survey->tx;
+	info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
+		SURVEY_INFO_CHANNEL_TIME_BUSY |	SURVEY_INFO_CHANNEL_TIME_RX |
+		SURVEY_INFO_CHANNEL_TIME_TX;
+	kfree(survey);
+
+	return 0;
+exit:
+	kfree(survey);
+	return err;
+}
+#endif /* WL_SUPPORT_ACS */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = wl_cfg80211_add_virtual_iface,
+	.del_virtual_intf = wl_cfg80211_del_virtual_iface,
+	.change_virtual_intf = wl_cfg80211_change_virtual_iface,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	.start_p2p_device = wl_cfgp2p_start_p2p_device,
+	.stop_p2p_device = wl_cfgp2p_stop_p2p_device,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	.scan = wl_cfg80211_scan,
+	.set_wiphy_params = wl_cfg80211_set_wiphy_params,
+	.join_ibss = wl_cfg80211_join_ibss,
+	.leave_ibss = wl_cfg80211_leave_ibss,
+	.get_station = wl_cfg80211_get_station,
+	.set_tx_power = wl_cfg80211_set_tx_power,
+	.get_tx_power = wl_cfg80211_get_tx_power,
+	.add_key = wl_cfg80211_add_key,
+	.del_key = wl_cfg80211_del_key,
+	.get_key = wl_cfg80211_get_key,
+	.set_default_key = wl_cfg80211_config_default_key,
+	.set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = wl_cfg80211_set_power_mgmt,
+	.connect = wl_cfg80211_connect,
+	.disconnect = wl_cfg80211_disconnect,
+	.suspend = wl_cfg80211_suspend,
+	.resume = wl_cfg80211_resume,
+	.set_pmksa = wl_cfg80211_set_pmksa,
+	.del_pmksa = wl_cfg80211_del_pmksa,
+	.flush_pmksa = wl_cfg80211_flush_pmksa,
+	.remain_on_channel = wl_cfg80211_remain_on_channel,
+	.cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+	.mgmt_tx = wl_cfg80211_mgmt_tx,
+	.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+	.change_bss = wl_cfg80211_change_bss,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || defined(WL_COMPAT_WIRELESS)
+	.set_channel = wl_cfg80211_set_channel,
+#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS)
+	.set_beacon = wl_cfg80211_add_set_beacon,
+	.add_beacon = wl_cfg80211_add_set_beacon,
+#else
+	.change_beacon = wl_cfg80211_change_beacon,
+	.start_ap = wl_cfg80211_start_ap,
+	.stop_ap = wl_cfg80211_stop_ap,
+#endif /* LINUX_VERSION < KERNEL_VERSION(3,4,0) && !WL_COMPAT_WIRELESS */
+#ifdef WL_SCHED_SCAN
+	.sched_scan_start = wl_cfg80211_sched_scan_start,
+	.sched_scan_stop = wl_cfg80211_sched_scan_stop,
+#endif /* WL_SCHED_SCAN */
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+	.del_station = wl_cfg80211_del_station,
+	.change_station = wl_cfg80211_change_station,
+	.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+	.tdls_oper = wl_cfg80211_tdls_oper,
+#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
+#ifdef WL_SUPPORT_ACS
+	.dump_survey = wl_cfg80211_dump_survey,
+#endif /* WL_SUPPORT_ACS */
+#ifdef WL_CFG80211_ACL
+	.set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+	s32 err = 0;
+
+	switch (mode) {
+	case WL_MODE_BSS:
+		return NL80211_IFTYPE_STATION;
+	case WL_MODE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	case WL_MODE_AP:
+		return NL80211_IFTYPE_AP;
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+
+	return err;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(
+	struct wiphy *wiphy,
+	struct regulatory_request *request)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	int ret = 0;
+
+	if (!request || !cfg) {
+		WL_ERR(("Invalid arg\n"));
+		return -EINVAL;
+	}
+
+	WL_DBG(("ccode: %c%c Initiator: %d\n",
+		request->alpha2[0], request->alpha2[1], request->initiator));
+
+	/* We support only REGDOM_SET_BY_USER as of now */
+	if ((request->initiator != NL80211_REGDOM_SET_BY_USER) &&
+		(request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+		WL_ERR(("reg_notifier for intiator:%d not supported : set default\n",
+			request->initiator));
+		/* in case of no supported country by regdb
+		     lets driver setup platform default Locale
+		*/
+	}
+
+	WL_ERR(("Set country code %c%c from %s\n",
+		request->alpha2[0], request->alpha2[1],
+		((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
+
+	if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
+		false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) {
+		WL_ERR(("set country Failed :%d\n", ret));
+	}
+
+	return ret;
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#endif /* CONFIG_PM */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
+{
+	s32 err = 0;
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+	dhd_pub_t *dhd = (dhd_pub_t *)context;
+	BCM_REFERENCE(dhd);
+
+	if (!dhd) {
+		WL_ERR(("DHD is NULL!!"));
+		err = -ENODEV;
+		return err;
+	}
+#endif
+
+	wdev->wiphy =
+	    wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
+	if (unlikely(!wdev->wiphy)) {
+		WL_ERR(("Couldn not allocate wiphy device\n"));
+		err = -ENOMEM;
+		return err;
+	}
+	set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+	wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	/* Report  how many SSIDs Driver can support per Scan request */
+	wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+#ifdef WL_SCHED_SCAN
+	wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif /* WL_SCHED_SCAN */
+	wdev->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION)
+		| BIT(NL80211_IFTYPE_ADHOC)
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_MONITOR)
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO)
+#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_DEVICE)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		| BIT(NL80211_IFTYPE_AP);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+	WL_DBG(("Setting interface combinations for common mode\n"));
+	wdev->wiphy->iface_combinations = common_iface_combinations;
+	wdev->wiphy->n_iface_combinations =
+		ARRAY_SIZE(common_iface_combinations);
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wdev->wiphy->cipher_suites = __wl_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wdev->wiphy->max_remain_on_channel_duration = 5000;
+	wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+	wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif				/* !WL_POWERSAVE_DISABLED */
+	wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+		WIPHY_FLAG_4ADDR_AP |
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && !defined(WL_COMPAT_WIRELESS)
+		WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+		WIPHY_FLAG_4ADDR_STATION;
+#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && ((LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)) && !0
+	/* Please use supplicant ver >= 76 if FW_ROAM is enabled
+	 * If driver advertises FW_ROAM, older supplicant wouldn't
+	 * send the BSSID & Freq in the connect req command. This
+	 * will delay the ASSOC as the FW need to do a full scan
+	 * before attempting to connect. Supplicant >=76 has patch
+	 * to allow bssid & freq to be sent down to driver even if
+	 * FW ROAM is advertised.
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+	wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+		WIPHY_FLAG_OFFCHAN_TX;
+#endif
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	4, 0))
+	/* From 3.4 kernel ownards AP_SME flag can be advertised
+	 * to remove the patch from supplicant
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+	/* Configure ACL capabilities. */
+	wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+	/* Supplicant distinguish between the SoftAP mode and other
+	 * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+	 * response frame from Supplicant MR1 and Kernel 3.4.0 or
+	 * later version. To add Vendor specific IE into the
+	 * probe response frame in case of SoftAP mode,
+	 * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+	 */
+	if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+		wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+		wdev->wiphy->probe_resp_offload = 0;
+	}
+#endif
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+	wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+	/*
+	 * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+	 * disconnection of connected network before suspend. So a dummy wowlan
+	 * filter is configured for kernels linux-3.8 and above.
+	 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	wdev->wiphy->wowlan = &brcm_wowlan_support;
+#else
+	wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 10) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Registering custom regulatory)\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+	wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+	wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+
+	WL_DBG(("Registering Vendor80211)\n"));
+	err = cfgvendor_attach(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+	}
+
+	/* Now we can register wiphy with cfg80211 module */
+	err = wiphy_register(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		wiphy_free(wdev->wiphy);
+	}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
+#endif
+
+	return err;
+}
+
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = cfg->wdev;
+	struct wiphy *wiphy;
+	if (!wdev) {
+		WL_ERR(("wdev is invalid\n"));
+		return;
+	}
+	wiphy = wdev->wiphy;
+
+	cfgvendor_detach(wdev->wiphy);
+
+	wiphy_unregister(wdev->wiphy);
+	wdev->wiphy->dev.parent = NULL;
+
+	wl_delete_all_netinfo(cfg);
+	wiphy_free(wiphy);
+	/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
+	 * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+	 */
+}
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	s32 i;
+
+	bss_list = cfg->bss_list;
+	WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i) {
+		err = wl_inform_single_bss(cfg, bi, false);
+		if (unlikely(err))
+			break;
+	}
+	return err;
+}
+
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *band;
+	struct wl_cfg80211_bss_info *notif_bss_info;
+	struct wl_scan_req *sr = wl_to_sr(cfg);
+	struct beacon_proberesp *beacon_proberesp;
+	struct cfg80211_bss *cbss = NULL;
+	s32 mgmt_type;
+	s32 signal;
+	u32 freq;
+	s32 err = 0;
+	gfp_t aflags;
+
+	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		return err;
+	}
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+		- sizeof(u8) + WL_BSS_INFO_MAX, aflags);
+	if (unlikely(!notif_bss_info)) {
+		WL_ERR(("notif_bss_info alloc failed\n"));
+		return -ENOMEM;
+	}
+	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+	notif_bss_info->channel =
+		wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+
+	if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	notif_bss_info->rssi = wl_rssi_offset(dtoh16(bi->RSSI));
+	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	mgmt_type = cfg->active_scan ?
+		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+	    mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+	}
+	beacon_proberesp = cfg->active_scan ?
+		(struct beacon_proberesp *)&mgmt->u.probe_resp :
+		(struct beacon_proberesp *)&mgmt->u.beacon;
+	beacon_proberesp->timestamp = 0;
+	beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+	beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+	wl_rst_ie(cfg);
+	wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+	wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+	wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+		offsetof(struct wl_cfg80211_bss_info, frame_buf));
+	notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+		u.beacon.variable) + wl_get_ielen(cfg);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+	if (freq == 0) {
+		WL_ERR(("Invalid channel, fail to chcnage channel to freq\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (unlikely(!channel)) {
+		WL_ERR(("ieee80211_get_channel error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+			"mgmt_type %d frame_len %d\n", bi->SSID,
+			notif_bss_info->rssi, notif_bss_info->channel,
+			mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+			notif_bss_info->frame_len));
+
+	signal = notif_bss_info->rssi * 100;
+	if (!mgmt->u.probe_resp.timestamp) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+		struct timespec ts;
+		get_monotonic_boottime(&ts);
+		mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+				+ ts.tv_nsec / 1000;
+#else
+		struct timeval tv;
+		do_gettimeofday(&tv);
+		mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+				+ tv.tv_usec;
+#endif
+	}
+
+
+	cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+		le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
+	if (unlikely(!cbss)) {
+		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+	cfg80211_put_bss(wiphy, cbss);
+#else
+	cfg80211_put_bss(cbss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	kfree(notif_bss_info);
+	return err;
+}
+
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status =  ntoh32(e->status);
+	u16 flags = ntoh16(e->flags);
+
+	WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+	if (event == WLC_E_SET_SSID) {
+		if (status == WLC_E_STATUS_SUCCESS) {
+			if (!wl_is_ibssmode(cfg, ndev))
+				return true;
+		}
+	} else if (event == WLC_E_LINK) {
+		if (flags & WLC_EVENT_MSG_LINK)
+			return true;
+	}
+
+	WL_DBG(("wl_is_linkup false\n"));
+	return false;
+}
+
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+
+	if (event == WLC_E_DEAUTH_IND ||
+	event == WLC_E_DISASSOC_IND ||
+	event == WLC_E_DISASSOC ||
+	event == WLC_E_DEAUTH) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+
+	if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+		return true;
+	if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+		return true;
+
+	return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	u32 len = ntoh32(e->datalen);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
+	&& !defined(WL_COMPAT_WIRELESS)
+	bool isfree = false;
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	s32 freq;
+	s32 channel;
+	u8 *body = NULL;
+	u16 fc = 0;
+
+	struct ieee80211_supported_band *band;
+	struct ether_addr da;
+	struct ether_addr bssid;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	channel_info_t ci;
+#else
+	struct station_info sinfo;
+#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+
+	WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
+	/* if link down, bsscfg is disabled. */
+	if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+		wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+		wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+		WL_INFORM(("AP mode link down !! \n"));
+		complete(&cfg->iface_disable);
+		return 0;
+	}
+
+	if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+		WL_ERR(("event %s(%d) status %d reason %d\n",
+		bcmevent_get_name(event), event, ntoh32(e->status), reason));
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
+	&& !defined(WL_COMPAT_WIRELESS)
+	WL_DBG(("Enter \n"));
+	if (!len && (event == WLC_E_DEAUTH)) {
+		len = 2; /* reason code field */
+		data = &reason;
+	}
+	if (len) {
+		body = kzalloc(len, GFP_KERNEL);
+
+		if (body == NULL) {
+			WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+			return WL_INVALID;
+		}
+	}
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+		kfree(body);
+		return WL_INVALID;
+	}
+	if (len)
+		memcpy(body, data, len);
+
+	wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+	memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+	err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+	switch (event) {
+		case WLC_E_ASSOC_IND:
+			fc = FC_ASSOC_REQ;
+			break;
+		case WLC_E_REASSOC_IND:
+			fc = FC_REASSOC_REQ;
+			break;
+		case WLC_E_DISASSOC_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH:
+			fc = FC_DISASSOC;
+			break;
+		default:
+			fc = 0;
+			goto exit;
+	}
+	if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) {
+		kfree(body);
+		return err;
+	}
+
+	channel = dtoh32(ci.hw_channel);
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		if (body)
+			kfree(body);
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+	err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+		&mgmt_frame, &len, body);
+	if (err < 0)
+		goto exit;
+	isfree = true;
+
+	if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+	} else if (event == WLC_E_DISASSOC_IND) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+	}
+
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	if (body)
+		kfree(body);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+	sinfo.filled = 0;
+	if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+		reason == DOT11_SC_SUCCESS) {
+		sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+		if (!data) {
+			WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+			return -EINVAL;
+		}
+		sinfo.assoc_req_ies = data;
+		sinfo.assoc_req_ies_len = len;
+		cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+	} else if (event == WLC_E_DISASSOC_IND) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	}
+#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+	return err;
+}
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e)
+{
+	u32 reason = ntoh32(e->reason);
+	u32 event = ntoh32(e->event_type);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	WL_DBG(("event type : %d, reason : %d\n", event, reason));
+	if (sec) {
+		switch (event) {
+		case WLC_E_ASSOC:
+		case WLC_E_AUTH:
+				sec->auth_assoc_res_status = reason;
+		default:
+			break;
+		}
+	} else
+		WL_ERR(("sec is NULL\n"));
+	return 0;
+}
+
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+	u32 status =  ntoh32(e->status);
+	bool active;
+
+	if (event == WLC_E_JOIN) {
+		WL_DBG(("joined in IBSS network\n"));
+	}
+	if (event == WLC_E_START) {
+		WL_DBG(("started IBSS network\n"));
+	}
+	if (event == WLC_E_JOIN || event == WLC_E_START ||
+		(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+			/* ROAM or Redundant */
+			u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+				WL_DBG(("IBSS connected event from same BSSID("
+					MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
+				return err;
+			}
+			WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+				MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr)));
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+		}
+		else {
+			/* New connection */
+			WL_INFORM(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
+			wl_link_up(cfg);
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			active = true;
+			wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
+		}
+	} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
+		event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+		wl_clr_drv_status(cfg, CONNECTED, ndev);
+		wl_link_down(cfg);
+		wl_init_prof(cfg, ndev);
+	}
+	else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
+		WL_DBG(("no action - join fail (IBSS mode)\n"));
+	}
+	else {
+		WL_DBG(("no action (IBSS mode)\n"));
+}
+	return err;
+}
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
+		err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+			ntoh32(e->event_type), ntoh32(e->status), ndev));
+		if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
+			wl_get_auth_assoc_status(cfg, ndev, e);
+			return 0;
+		}
+		if (wl_is_linkup(cfg, e, ndev)) {
+			wl_link_up(cfg);
+			act = true;
+			if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					printf("wl_bss_connect_done succeeded with " MACDBG "\n",
+						MAC2STRDBG((u8*)(&e->addr)));
+					wl_bss_connect_done(cfg, ndev, e, data, true);
+					WL_DBG(("joined in BSS network \"%s\"\n",
+					((struct wlc_ssid *)
+					 wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+				}
+			wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+
+		} else if (wl_is_linkdown(cfg, e)) {
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				scb_val_t scbval;
+				u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+				s32 reason = 0;
+				if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
+					reason = ntoh32(e->reason);
+				/* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
+				reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason;
+
+				printf("link down if %s may call cfg80211_disconnected. "
+					"event : %d, reason=%d from " MACDBG "\n",
+					ndev->name, event, ntoh32(e->reason),
+					MAC2STRDBG((u8*)(&e->addr)));
+				if (!cfg->roam_offload &&
+					memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+					WL_ERR(("BSSID of event is not the connected BSSID"
+						"(ignore it) cur: " MACDBG " event: " MACDBG"\n",
+						MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr))));
+					return 0;
+				}
+				wl_clr_drv_status(cfg, CONNECTED, ndev);
+				if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					/* To make sure disconnect, explictly send dissassoc
+					*  for BSSID 00:00:00:00:00:00 issue
+					*/
+					scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+					memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+					scbval.val = htod32(scbval.val);
+					err = wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+						sizeof(scb_val_t), true);
+					if (err < 0) {
+						WL_ERR(("WLC_DISASSOC error %d\n", err));
+						err = 0;
+					}
+					cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL);
+					wl_link_down(cfg);
+					wl_init_prof(cfg, ndev);
+				}
+			}
+			else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+				printf("link down, during connecting\n");
+#ifdef ESCAN_RESULT_PATCH
+				if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
+					/* In case this event comes while associating another AP */
+#endif /* ESCAN_RESULT_PATCH */
+					wl_bss_connect_done(cfg, ndev, e, data, false);
+			}
+			wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+
+			/* if link down, bsscfg is diabled */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg))
+				complete(&cfg->iface_disable);
+
+		} else if (wl_is_nonetwork(cfg, e)) {
+			printf("connect failed event=%d e->status %d e->reason %d \n",
+				event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
+			/* Clean up any pending scan request */
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTING, ndev))
+				wl_bss_connect_done(cfg, ndev, e, data, false);
+		} else {
+			WL_DBG(("%s nothing\n", __FUNCTION__));
+		}
+	}
+		else {
+		WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev)));
+	}
+	return err;
+}
+
+void wl_cfg80211_set_rmc_pid(int pid)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	if (pid > 0)
+		cfg->rmc_event_pid = pid;
+	WL_DBG(("set pid for rmc event : pid=%d\n", pid));
+}
+
+#ifdef WLAIBSS
+void wl_cfg80211_set_txfail_pid(int pid)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	if (pid > 0)
+		cfg->aibss_txfail_pid = pid;
+	WL_DBG(("set pid for aibss fail event : pid=%d\n", pid));
+}
+
+static s32
+wl_notify_aibss_txfail(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	u32 evt = ntoh32(e->event_type);
+	int ret = -1;
+
+	if (cfg->aibss_txfail_pid != 0) {
+		ret = wl_netlink_send_msg(cfg->aibss_txfail_pid, AIBSS_EVENT_TXFAIL,
+			cfg->aibss_txfail_seq++, (void *)&e->addr, ETHER_ADDR_LEN);
+	}
+
+	WL_DBG(("txfail : evt=%d, pid=%d, ret=%d, mac=" MACF "\n",
+		evt, cfg->aibss_txfail_pid, ret, ETHERP_TO_MACF(&e->addr)));
+	return ret;
+}
+#endif /* WLAIBSS */
+
+static s32
+wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	u32 evt = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	int ret = -1;
+
+	switch (reason) {
+		case WLC_E_REASON_RMC_AR_LOST:
+		case WLC_E_REASON_RMC_AR_NO_ACK:
+			if (cfg->rmc_event_pid != 0) {
+				ret = wl_netlink_send_msg(cfg->rmc_event_pid,
+					RMC_EVENT_LEADER_CHECK_FAIL,
+					cfg->rmc_event_seq++, NULL, 0);
+			}
+			break;
+		default:
+			break;
+	}
+	WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret));
+	return ret;
+}
+
+static s32
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	u32 status = be32_to_cpu(e->status);
+	WL_DBG(("Enter \n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+		wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+		cfg->disable_roam_event = TRUE;
+	}
+
+	if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+		return err;
+
+	if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev))
+			wl_bss_roaming_done(cfg, ndev, e, data);
+		else
+			wl_bss_connect_done(cfg, ndev, e, data, true);
+		act = true;
+		wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+		wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+	}
+	return err;
+}
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	wl_assoc_info_t assoc_info;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
+		WL_ASSOC_INFO_MAX, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("could not get assoc info (%d)\n", err));
+		return err;
+	}
+	memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+	if (conn_info->req_ie_len) {
+		conn_info->req_ie_len = 0;
+		bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+	}
+	if (conn_info->resp_ie_len) {
+		conn_info->resp_ie_len = 0;
+		bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+	}
+	if (assoc_info.req_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc req (%d)\n", err));
+			return err;
+		}
+		conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+		if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+			conn_info->req_ie_len -= ETHER_ADDR_LEN;
+		}
+		if (conn_info->req_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->req_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->req_ie_len = 0;
+	}
+	if (assoc_info.resp_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc resp (%d)\n", err));
+			return err;
+		}
+		conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+		if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->resp_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->resp_ie_len = 0;
+	}
+	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+		conn_info->resp_ie_len));
+
+	return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+        size_t *join_params_size)
+{
+	chanspec_t chanspec = 0;
+	if (ch != 0) {
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+			wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num =
+			htod32(join_params->params.chanspec_num);
+		WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+			join_params->params.chanspec_list[0],
+			join_params->params.chanspec_num));
+	}
+}
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+{
+	struct cfg80211_bss *bss;
+	struct wl_bss_info *bi;
+	struct wlc_ssid *ssid;
+	struct bcm_tlv *tim;
+	s32 beacon_interval;
+	s32 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	u8 *curbssid;
+	s32 err = 0;
+	struct wiphy *wiphy;
+	u32 channel;
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+
+	ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+		ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+		WLAN_CAPABILITY_ESS);
+
+	mutex_lock(&cfg->usr_sync);
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+		cfg->extra_buf, WL_EXTRA_BUF_MAX, false);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get bss info %d\n", err));
+		goto update_bss_info_out;
+	}
+	bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+	channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+	wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+
+	if (!bss) {
+		WL_DBG(("Could not find the AP\n"));
+		if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+			WL_ERR(("Bssid doesn't match\n"));
+			err = -EIO;
+			goto update_bss_info_out;
+		}
+		err = wl_inform_single_bss(cfg, bi, roam);
+		if (unlikely(err))
+			goto update_bss_info_out;
+
+		ie = ((u8 *)bi) + bi->ie_offset;
+		ie_len = bi->ie_length;
+		beacon_interval = cpu_to_le16(bi->beacon_period);
+	} else {
+		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		ie = (u8 *)bss->ies->data;
+		ie_len = bss->ies->len;
+#else
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		beacon_interval = bss->beacon_interval;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+		cfg80211_put_bss(wiphy, bss);
+#else
+		cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	}
+
+	tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim) {
+		dtim_period = tim->data[1];
+	} else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information.
+		*/
+		err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+			&dtim_period, sizeof(dtim_period), false);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			goto update_bss_info_out;
+		}
+	}
+
+	wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+	wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+	if (unlikely(err)) {
+		WL_ERR(("Failed with error %d\n", err));
+	}
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+	u8 *curbssid;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *notify_channel = NULL;
+	u32 *channel;
+	u32 freq;
+#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
+
+#ifdef WLFBT
+	uint32 data_len = 0;
+	if (data)
+		data_len = ntoh32(e->datalen);
+#endif /* WLFBT */
+
+	wl_get_assoc_ies(cfg, ndev);
+	wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	wl_update_bss_info(cfg, ndev, true);
+	wl_update_pmklist(ndev, cfg->pmk_list, err);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+	/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
+	channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+	if (*channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	freq = ieee80211_channel_to_frequency(*channel, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+#endif /* LINUX_VERSION > 2.6.39  || WL_COMPAT_WIRELESS */
+#ifdef WLFBT
+	/* back up the given FBT key for the further supplicant request,
+	 * currently not checking the FBT is enabled for current BSS in DHD,
+	 * because the supplicant decides to take it or not.
+	 */
+	if (data && (data_len == FBT_KEYLEN)) {
+		memcpy(cfg->fbt_key, data, FBT_KEYLEN);
+	}
+#endif /* WLFBT */
+	printf("wl_bss_roaming_done succeeded to " MACDBG "\n",
+		MAC2STRDBG((u8*)(&e->addr)));
+
+	cfg80211_roamed(ndev,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+		notify_channel,
+#endif
+		curbssid,
+		conn_info->req_ie, conn_info->req_ie_len,
+		conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+	WL_DBG(("Report roaming result\n"));
+
+	wl_set_drv_status(cfg, CONNECTED, ndev);
+
+	return err;
+}
+
+static s32
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#if defined(CUSTOM_SET_CPUCORE)
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif
+	s32 err = 0;
+	u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	if (!sec) {
+		WL_ERR(("sec is NULL\n"));
+		return -ENODEV;
+	}
+	WL_DBG((" enter\n"));
+#ifdef ESCAN_RESULT_PATCH
+	if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+		if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
+			WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
+				ntoh32(e->event_type), ntoh32(e->status)));
+			return err;
+		}
+	}
+	if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 &&
+		memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) {
+		WL_DBG(("copy bssid\n"));
+		memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN);
+	}
+
+#else
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, ndev, true, true);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+		wl_cfg80211_scan_abort(cfg);
+		wl_clr_drv_status(cfg, CONNECTING, ndev);
+		if (completed) {
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+			curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			wl_update_pmklist(ndev, cfg->pmk_list, err);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+				init_completion(&cfg->iface_disable);
+#else
+				/* reinitialize completion to clear previous count */
+				INIT_COMPLETION(cfg->iface_disable);
+#endif
+			}
+#ifdef CUSTOM_SET_CPUCORE
+			if (wl_get_chan_isvht80(ndev, dhd)) {
+				if (ndev == bcmcfg_to_prmry_ndev(cfg))
+					dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+				else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+					dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+				dhd_set_cpucore(dhd, TRUE);
+			}
+#endif /* CUSTOM_SET_CPUCORE */
+
+		}
+		cfg80211_connect_result(ndev,
+			curbssid,
+			conn_info->req_ie,
+			conn_info->req_ie_len,
+			conn_info->resp_ie,
+			conn_info->resp_ie_len,
+			completed ? WLAN_STATUS_SUCCESS :
+			(sec->auth_assoc_res_status) ?
+			sec->auth_assoc_res_status :
+			WLAN_STATUS_UNSPECIFIED_FAILURE,
+			GFP_KERNEL);
+		if (completed)
+			WL_INFORM(("Report connect result - connection succeeded\n"));
+		else
+			WL_ERR(("Report connect result - connection failed\n"));
+	}
+#ifdef CONFIG_TCPACK_FASTTX
+	if (wl_get_chan_isvht80(ndev, dhd))
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+	else
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+
+	return err;
+}
+
+static s32
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u16 flags = ntoh16(e->flags);
+	enum nl80211_key_type key_type;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (flags & WLC_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+		NULL, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+
+	return 0;
+}
+
+#ifdef BT_WIFI_HANDOVER
+static s32
+wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u32 event = ntoh32(e->event_type);
+	u32 datalen = ntoh32(e->datalen);
+	s32 err;
+
+	WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen));
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0);
+
+	return err;
+}
+#endif /* BT_WIFI_HANDOVER */
+
+#ifdef PNO_SUPPORT
+static s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+
+	WL_ERR((">>> PNO Event\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifndef WL_SCHED_SCAN
+	mutex_lock(&cfg->usr_sync);
+	/* TODO: Use cfg80211_sched_scan_results(wiphy); */
+	cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+#else
+	/* If cfg80211 scheduled scan is supported, report the pno results via sched
+	 * scan results
+	 */
+	wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+	return 0;
+}
+#endif /* PNO_SUPPORT */
+
+static s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct channel_info channel_inform;
+	struct wl_scan_results *bss_list;
+	struct net_device *ndev = NULL;
+	u32 len = WL_SCAN_BUF_MAX;
+	s32 err = 0;
+	unsigned long flags;
+
+	WL_DBG(("Enter \n"));
+	if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+		WL_ERR(("scan is not ready \n"));
+		return err;
+	}
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+		sizeof(channel_inform), false);
+	if (unlikely(err)) {
+		WL_ERR(("scan busy (%d)\n", err));
+		goto scan_done_out;
+	}
+	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+	if (unlikely(channel_inform.scan_channel)) {
+
+		WL_DBG(("channel_inform.scan_channel (%d)\n",
+			channel_inform.scan_channel));
+	}
+	cfg->bss_list = cfg->scan_results;
+	bss_list = cfg->bss_list;
+	memset(bss_list, 0, len);
+	bss_list->buflen = htod32(len);
+	err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+	if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		err = -EINVAL;
+		goto scan_done_out;
+	}
+	bss_list->buflen = dtoh32(bss_list->buflen);
+	bss_list->version = dtoh32(bss_list->version);
+	bss_list->count = dtoh32(bss_list->count);
+
+	err = wl_inform_bss(cfg);
+
+scan_done_out:
+	del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, false);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	WL_DBG(("cfg80211_scan_done\n"));
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody)
+{
+	struct dot11_management_header *hdr;
+	u32 totlen = 0;
+	s32 err = 0;
+	u8 *offset;
+	u32 prebody_len = *body_len;
+	switch (fc) {
+		case FC_ASSOC_REQ:
+			/* capability , listen interval */
+			totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+			break;
+
+		case FC_REASSOC_REQ:
+			/* capability, listen inteval, ap address */
+			totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+			break;
+	}
+	totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+	*pheader = kzalloc(totlen, GFP_KERNEL);
+	if (*pheader == NULL) {
+		WL_ERR(("memory alloc failed \n"));
+		return -ENOMEM;
+	}
+	hdr = (struct dot11_management_header *) (*pheader);
+	hdr->fc = htol16(fc);
+	hdr->durid = 0;
+	hdr->seq = 0;
+	offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+	bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+	bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+	bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+	if ((pbody != NULL) && prebody_len)
+		bcopy((const char*)pbody, offset, prebody_len);
+	*body_len = totlen;
+	return err;
+}
+
+
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+		if (cfg->afx_hdl != NULL) {
+			if (cfg->afx_hdl->dev != NULL) {
+				wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+				wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+			}
+			cfg->afx_hdl->peer_chan = WL_INVALID;
+		}
+		complete(&cfg->act_frm_scan);
+		WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+	} else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+			wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+			wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+
+		WL_DBG(("*** Wake UP ** abort actframe iovar\n"));
+		/* if channel is not zero, "actfame" uses off channel scan.
+		 * So abort scan for off channel completion.
+		 */
+		if (cfg->af_sent_channel)
+			wl_cfg80211_scan_abort(cfg);
+	}
+#ifdef WL_CFG80211_SYNC_GON
+	else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
+		/* So abort scan to cancel listen */
+		wl_cfg80211_scan_abort(cfg);
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+}
+
+
+int wl_cfg80211_get_ioctl_version(void)
+{
+	return ioctl_version;
+}
+
+static s32
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct ieee80211_supported_band *band;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ether_addr da;
+	struct ether_addr bssid;
+	bool isfree = false;
+	s32 err = 0;
+	s32 freq;
+	struct net_device *ndev = NULL;
+	wifi_p2p_pub_act_frame_t *act_frm = NULL;
+	wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+	wl_event_rx_frame_data_t *rxframe =
+		(wl_event_rx_frame_data_t*)data;
+	u32 event = ntoh32(e->event_type);
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+	u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+	if (event == WLC_E_ACTION_FRAME_RX) {
+		wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+			NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+
+		err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+		if (err < 0)
+			 WL_ERR(("WLC_GET_BSSID error %d\n", err));
+		memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+		err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+			&mgmt_frame, &mgmt_frame_len,
+			(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+		if (err < 0) {
+			WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n",
+				mgmt_frame_len, channel, freq));
+			goto exit;
+		}
+		isfree = true;
+		if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			act_frm = (wifi_p2p_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+		} else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			p2p_act_frm = (wifi_p2p_action_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			(void) p2p_act_frm;
+		} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+#ifdef WL_SDO
+			if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+				WL_ERR(("SD offload is in progress. Don't report the"
+					"frame via rx_mgmt path\n"));
+				goto exit;
+			}
+#endif
+
+			sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == sd_act_frm->action) {
+					WL_DBG(("We got a right next frame of SD!(%d)\n",
+						sd_act_frm->action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+			(void) sd_act_frm;
+		} else {
+
+			if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+				u8 action = 0;
+				if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+					mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
+					WL_DBG(("Recived action is not public action frame\n"));
+				} else if (cfg->next_af_subtype == action) {
+					WL_DBG(("Recived action is the waiting action(%d)\n",
+						action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		if (act_frm) {
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == act_frm->subtype) {
+					WL_DBG(("We got a right next frame!(%d)\n",
+						act_frm->subtype));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+						OSL_SLEEP(20);
+					}
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel);
+		/*
+		 * After complete GO Negotiation, roll back to mpc mode
+		 */
+		if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) ||
+			(act_frm->subtype == P2P_PAF_PROVDIS_RSP))) {
+			wldev_iovar_setint(ndev, "mpc", 1);
+		}
+		if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
+			WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+			wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		}
+	} else if (event == WLC_E_PROBREQ_MSG) {
+
+		/* Handle probe reqs frame
+		 * WPS-AP certification 4.2.13
+		 */
+		struct parsed_ies prbreq_ies;
+		u32 prbreq_ie_len = 0;
+		bool pbc = 0;
+
+		WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+		mgmt_frame = (u8 *)(data);
+		mgmt_frame_len = ntoh32(e->datalen);
+
+		prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+		/* Parse prob_req IEs */
+		if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			prbreq_ie_len, &prbreq_ies) < 0) {
+			WL_ERR(("Prob req get IEs failed\n"));
+			return 0;
+		}
+		if (prbreq_ies.wps_ie != NULL) {
+			wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+			WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+			/* if pbc method, send prob_req mgmt frame to upper layer */
+			if (!pbc)
+				return 0;
+		} else
+			return 0;
+	} else {
+		mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+
+		/* wpa supplicant use probe request event for restarting another GON Req.
+		 * but it makes GON Req repetition.
+		 * so if src addr of prb req is same as my target device,
+		 * do not send probe request event during sending action frame.
+		 */
+		if (event == WLC_E_P2P_PROBREQ_MSG) {
+			WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
+				"WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
+
+
+			/* Filter any P2P probe reqs arriving during the
+			 * GO-NEG Phase
+			 */
+			if (cfg->p2p &&
+				wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
+				WL_DBG(("Filtering P2P probe_req while "
+					"being in GO-Neg state\n"));
+				return 0;
+			}
+		}
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	cfg80211_rx_mgmt(cfgdev, freq, 0,  mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+	defined(WL_COMPAT_WIRELESS)
+	cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#else
+	cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */
+
+	WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+		mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	return 0;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND		0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	wl_pfn_net_info_t *netinfo, *pnetinfo;
+	struct wiphy *wiphy	= bcmcfg_to_wiphy(cfg);
+	int err = 0;
+	struct cfg80211_scan_request *request = NULL;
+	struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+	struct ieee80211_channel *channel = NULL;
+	int channel_req = 0;
+	int band = 0;
+	struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data;
+	int n_pfn_results = pfn_result->count;
+
+	WL_DBG(("Enter\n"));
+
+	if (e->event_type == WLC_E_PFN_NET_LOST) {
+		WL_PNO(("PFN NET LOST event. Do Nothing \n"));
+		return 0;
+	}
+	WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results));
+	if (n_pfn_results > 0) {
+		int i;
+
+		if (n_pfn_results > MAX_PFN_LIST_COUNT)
+			n_pfn_results = MAX_PFN_LIST_COUNT;
+		pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t)
+				- sizeof(wl_pfn_net_info_t));
+
+		memset(&ssid, 0x00, sizeof(ssid));
+
+		request = kzalloc(sizeof(*request)
+			+ sizeof(*request->channels) * n_pfn_results,
+			GFP_KERNEL);
+		channel = (struct ieee80211_channel *)kzalloc(
+			(sizeof(struct ieee80211_channel) * n_pfn_results),
+			GFP_KERNEL);
+		if (!request || !channel) {
+			WL_ERR(("No memory"));
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		request->wiphy = wiphy;
+
+		for (i = 0; i < n_pfn_results; i++) {
+			netinfo = &pnetinfo[i];
+			if (!netinfo) {
+				WL_ERR(("Invalid netinfo ptr. index:%d", i));
+				err = -EINVAL;
+				goto out_err;
+			}
+			WL_PNO((">>> SSID:%s Channel:%d \n",
+				netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel));
+			/* PFN result doesn't have all the info which are required by the supplicant
+			 * (For e.g IEs) Do a target Escan so that sched scan results are reported
+			 * via wl_inform_single_bss in the required format. Escan does require the
+			 * scan request in the form of cfg80211_scan_request. For timebeing, create
+			 * cfg80211_scan_request one out of the received PNO event.
+			 */
+			memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID,
+				netinfo->pfnsubnet.SSID_len);
+			ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
+			request->n_ssids++;
+
+			channel_req = netinfo->pfnsubnet.channel;
+			band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+				: NL80211_BAND_5GHZ;
+			channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band);
+			channel[i].band = band;
+			channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+			request->channels[i] = &channel[i];
+			request->n_channels++;
+		}
+
+		/* assign parsed ssid array */
+		if (request->n_ssids)
+			request->ssids = &ssid[0];
+
+		if (wl_get_drv_status_all(cfg, SCANNING)) {
+			/* Abort any on-going scan */
+			wl_notify_escan_complete(cfg, ndev, true, true);
+		}
+
+		if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+			WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+			err = wl_cfgp2p_discover_enable_search(cfg, false);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, SCANNING, ndev);
+				goto out_err;
+			}
+			p2p_scan(cfg) = false;
+		}
+
+		wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+		WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+		WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif
+		if (err) {
+			wl_clr_drv_status(cfg, SCANNING, ndev);
+			goto out_err;
+		}
+		cfg->sched_scan_running = TRUE;
+	}
+	else {
+		WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+	}
+out_err:
+	if (request)
+		kfree(request);
+	if (channel)
+		kfree(channel);
+	return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+	WL_DBG(("Enter \n"));
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	memset(profile, 0, sizeof(struct wl_profile));
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+	memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+
+	cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+	cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+	cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+	cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+#ifdef PNO_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+#endif /* PNO_SUPPORT */
+#ifdef WL_SDO
+	cfg->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler;
+	cfg->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery;
+	cfg->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery;
+#endif
+#ifdef WLTDLS
+	cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+#endif /* WLTDLS */
+	cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+#ifdef WLAIBSS
+	cfg->evt_handler[WLC_E_AIBSS_TXFAIL] = wl_notify_aibss_txfail;
+#endif /* WLAIBSS */
+#ifdef BT_WIFI_HANDOVER
+	cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
+#endif
+#ifdef WL_NAN
+	cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status;
+	cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status;
+#endif /* WL_NAN */
+	cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
+}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+static void
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
+		DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+	bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
+}
+
+static void
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = NULL;
+
+}
+#endif /* STATIC_WL_PRIV_STRUCT */
+
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	WL_DBG(("Enter \n"));
+	cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->scan_results)) {
+		WL_ERR(("Scan results alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+	if (unlikely(!cfg->conf)) {
+		WL_ERR(("wl_conf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->scan_req_int =
+	    (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
+	if (unlikely(!cfg->scan_req_int)) {
+		WL_ERR(("Scan req alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->escan_ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->extra_buf)) {
+		WL_ERR(("Extra buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+	if (unlikely(!cfg->pmk_list)) {
+		WL_ERR(("pmk list alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL);
+	if (unlikely(!cfg->sta_info)) {
+		WL_ERR(("sta info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+	cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
+	if (unlikely(!cfg->conn_info)) {
+		WL_ERR(("cfg->conn_info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
+	if (unlikely(!cfg->ie)) {
+		WL_ERR(("cfg->ie  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl_init_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
+	if (unlikely(!cfg->afx_hdl)) {
+		WL_ERR(("afx hdl  alloc failed\n"));
+		goto init_priv_mem_out;
+	} else {
+		init_completion(&cfg->act_frm_scan);
+		init_completion(&cfg->wait_next_af);
+
+		INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+	}
+	return 0;
+
+init_priv_mem_out:
+	wl_deinit_priv_mem(cfg);
+
+	return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	kfree(cfg->scan_results);
+	cfg->scan_results = NULL;
+	kfree(cfg->conf);
+	cfg->conf = NULL;
+	kfree(cfg->scan_req_int);
+	cfg->scan_req_int = NULL;
+	kfree(cfg->ioctl_buf);
+	cfg->ioctl_buf = NULL;
+	kfree(cfg->escan_ioctl_buf);
+	cfg->escan_ioctl_buf = NULL;
+	kfree(cfg->extra_buf);
+	cfg->extra_buf = NULL;
+	kfree(cfg->pmk_list);
+	cfg->pmk_list = NULL;
+	kfree(cfg->sta_info);
+	cfg->sta_info = NULL;
+#if defined(STATIC_WL_PRIV_STRUCT)
+	kfree(cfg->conn_info);
+	cfg->conn_info = NULL;
+	kfree(cfg->ie);
+	cfg->ie = NULL;
+	wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	if (cfg->afx_hdl) {
+		cancel_work_sync(&cfg->afx_hdl->work);
+		kfree(cfg->afx_hdl);
+		cfg->afx_hdl = NULL;
+	}
+
+	if (cfg->ap_info) {
+		kfree(cfg->ap_info->wpa_ie);
+		kfree(cfg->ap_info->rsn_ie);
+		kfree(cfg->ap_info->wps_ie);
+		kfree(cfg->ap_info);
+		cfg->ap_info = NULL;
+	}
+}
+
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	WL_DBG(("Enter \n"));
+
+	/* Do not use DHD in cfg driver */
+	cfg->event_tsk.thr_pid = -1;
+
+	PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler");
+	if (cfg->event_tsk.thr_pid < 0)
+		ret = -ENOMEM;
+	return ret;
+}
+
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0)
+		PROC_STOP(&cfg->event_tsk);
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	if (!(cfg->scan_request)) {
+		WL_ERR(("timer expired but no scan request\n"));
+		return;
+	}
+	bzero(&msg, sizeof(wl_event_msg_t));
+	WL_ERR(("timer expired\n"));
+	msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+	msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+	msg.reason = 0xFFFFFFFF;
+	wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL);
+}
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+	unsigned long state,
+	void *ndev)
+{
+	struct net_device *dev = ndev;
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_DBG(("Enter \n"));
+
+	if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
+		return NOTIFY_DONE;
+
+	switch (state) {
+		case NETDEV_DOWN:
+		{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+			int max_wait_timeout = 2;
+			int max_wait_count = 100;
+			int refcnt = 0;
+			unsigned long limit = jiffies + max_wait_timeout * HZ;
+			while (work_pending(&wdev->cleanup_work)) {
+				if (refcnt%5 == 0) {
+					WL_ERR(("[NETDEV_DOWN] wait for "
+						"complete of cleanup_work"
+						" (%d th)\n", refcnt));
+				}
+				if (!time_before(jiffies, limit)) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d sec\n",
+						max_wait_timeout));
+					break;
+				}
+				if (refcnt >= max_wait_count) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d loop\n",
+						max_wait_count));
+					break;
+				}
+				set_current_state(TASK_INTERRUPTIBLE);
+				(void)schedule_timeout(100);
+				set_current_state(TASK_RUNNING);
+				refcnt++;
+			}
+#endif /* LINUX_VERSION <  VERSION(3, 14, 0) */
+			break;
+		}
+
+		case NETDEV_UNREGISTER:
+			/* after calling list_del_rcu(&wdev->list) */
+			wl_dealloc_netinfo(cfg, ndev);
+			break;
+		case NETDEV_GOING_DOWN:
+			/* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+			*  In front of door, the function checks
+			*  whether current scan is working or not.
+			*  If the scanning is still working, wdev_cleanup_work call WARN_ON and
+			*  make the scan done forcibly.
+			*/
+			if (wl_get_drv_status(cfg, SCANNING, dev))
+				wl_notify_escan_complete(cfg, dev, true, true);
+			break;
+	}
+	return NOTIFY_DONE;
+}
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+	.notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
+
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+	wl_scan_params_t *params = NULL;
+	s32 params_size = 0;
+	s32 err = BCME_OK;
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	if (!in_atomic()) {
+		/* Our scan params only need space for 1 channel and 0 ssids */
+		params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+		if (params == NULL) {
+			WL_ERR(("scan params allocation failed \n"));
+			err = -ENOMEM;
+		} else {
+			/* Do a scan abort to stop the driver's scan engine */
+			err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+			if (err < 0) {
+				WL_ERR(("scan abort  failed \n"));
+			}
+			kfree(params);
+		}
+	}
+}
+
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev,
+	bool aborted, bool fw_abort)
+{
+	s32 err = BCME_OK;
+	unsigned long flags;
+	struct net_device *dev;
+
+	WL_DBG(("Enter \n"));
+	if (!ndev) {
+		WL_ERR(("ndev is null\n"));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->escan_info.ndev != ndev) {
+		WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->scan_request) {
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+		if (cfg->scan_request->dev != cfg->p2p_net)
+			dev = cfg->scan_request->dev;
+#endif /* WL_ENABLE_P2P_IF */
+	}
+	else {
+		WL_DBG(("cfg->scan_request is NULL may be internal scan."
+			"doing scan_abort for ndev %p primary %p",
+				ndev, bcmcfg_to_prmry_ndev(cfg)));
+		dev = ndev;
+	}
+	if (fw_abort && !in_atomic())
+		wl_cfg80211_scan_abort(cfg);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+	if (likely(cfg->scan_request)) {
+		cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+		wl_inform_bss(cfg);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req && !cfg->scan_request) {
+		WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
+		if (!aborted)
+			cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+		cfg->sched_scan_running = FALSE;
+		cfg->sched_scan_req = NULL;
+	}
+#endif /* WL_SCHED_SCAN */
+	if (likely(cfg->scan_request)) {
+		cfg80211_scan_done(cfg->scan_request, aborted);
+		cfg->scan_request = NULL;
+	}
+	if (p2p_is_on(cfg))
+		wl_clr_p2p_status(cfg, SCANNING);
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SDO
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS) && !in_atomic()) {
+		wl_cfg80211_resume_sdo(ndev, cfg);
+	}
+#endif
+
+	return err;
+}
+
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_bss_info_t *bi;
+	wl_escan_result_t *escan_result;
+	wl_bss_info_t *bss = NULL;
+	wl_scan_results_t *list;
+	wifi_p2p_ie_t * p2p_ie;
+	struct net_device *ndev = NULL;
+	u32 bi_length;
+	u32 i;
+	u8 *p2p_dev_addr = NULL;
+
+	WL_DBG((" enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	/* P2P SCAN is coming from primary interface */
+	if (wl_get_p2p_status(cfg, SCANNING)) {
+		if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+			ndev = cfg->afx_hdl->dev;
+		else
+			ndev = cfg->escan_info.ndev;
+
+	}
+	if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+		WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+			ndev, wl_get_drv_status(cfg, SCANNING, ndev),
+			ntoh32(e->event_type), ntoh32(e->status)));
+		goto exit;
+	}
+	escan_result = (wl_escan_result_t *)data;
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+		if (!escan_result) {
+			WL_ERR(("Invalid escan result (NULL pointer)\n"));
+			goto exit;
+		}
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		if (!bi) {
+			WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+			goto exit;
+		}
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+		if (wl_escan_check_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id) < 0)
+			goto exit;
+
+		if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+			if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+				WL_DBG(("Ignoring IBSS result\n"));
+				goto exit;
+			}
+		}
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+			if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+				cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+				s32 channel = wf_chspec_ctlchan(
+					wl_chspec_driver_to_host(bi->chanspec));
+
+				if ((channel > MAXCHANNEL) || (channel <= 0))
+					channel = WL_INVALID;
+				else
+					WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+						" channel : %d\n",
+						MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+						channel));
+
+				wl_clr_p2p_status(cfg, SCANNING);
+				cfg->afx_hdl->peer_chan = channel;
+				complete(&cfg->act_frm_scan);
+				goto exit;
+			}
+
+		} else {
+			int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+			list = wl_escan_get_buf(cfg, FALSE);
+			if (scan_req_match(cfg)) {
+#ifdef WL_HOST_BAND_MGMT
+				s32 channel = 0;
+				s32 channel_band = 0;
+				chanspec_t chspec;
+#endif /* WL_HOST_BAND_MGMT */
+				/* p2p scan && allow only probe response */
+				if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+					(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+					goto exit;
+				if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+					bi->ie_length)) == NULL) {
+						WL_ERR(("Couldn't find P2PIE in probe"
+							" response/beacon\n"));
+						goto exit;
+				}
+#ifdef WL_HOST_BAND_MGMT
+				chspec = wl_chspec_driver_to_host(bi->chanspec);
+				channel = wf_chspec_ctlchan(chspec);
+				channel_band = CHSPEC2WLC_BAND(chspec);
+
+				if ((cfg->curr_band == WLC_BAND_5G) &&
+					(channel_band == WLC_BAND_2G)) {
+					/* Avoid sending the GO results in band conflict */
+					if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
+						P2P_SEID_GROUP_ID) != NULL)
+						goto exit;
+				}
+#endif /* WL_HOST_BAND_MGMT */
+			}
+			for (i = 0; i < list->count; i++) {
+				bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+					: list->bss_info;
+
+				if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+					(CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+					== CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+					bi->SSID_len == bss->SSID_len &&
+					!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+					/* do not allow beacon data to update
+					*the data recd from a probe response
+					*/
+					if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+						(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+						goto exit;
+
+					WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+						" flags 0x%x, new: RSSI %d flags 0x%x\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+						bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+					if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+						/* preserve max RSSI if the measurements are
+						* both on-channel or both off-channel
+						*/
+						WL_SCAN(("%s("MACDBG"), same onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+					} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+						/* preserve the on-channel rssi measurement
+						* if the new measurement is off channel
+						*/
+						WL_SCAN(("%s("MACDBG"), prev onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = bss->RSSI;
+						bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+					}
+					if (dtoh32(bss->length) != bi_length) {
+						u32 prev_len = dtoh32(bss->length);
+
+						WL_SCAN(("bss info replacement"
+							" is occured(bcast:%d->probresp%d)\n",
+							bss->ie_length, bi->ie_length));
+						WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						prev_len, bi_length));
+
+						if (list->buflen - prev_len + bi_length
+							> ESCAN_BUF_SIZE) {
+							WL_ERR(("Buffer is too small: keep the"
+								" previous result of this AP\n"));
+							/* Only update RSSI */
+							bss->RSSI = bi->RSSI;
+							bss->flags |= (bi->flags
+								& WL_BSS_FLAGS_RSSI_ONCHANNEL);
+							goto exit;
+						}
+
+						if (i < list->count - 1) {
+							/* memory copy required by this case only */
+							memmove((u8 *)bss + bi_length,
+								(u8 *)bss + prev_len,
+								list->buflen - cur_len - prev_len);
+						}
+						list->buflen -= prev_len;
+						list->buflen += bi_length;
+					}
+					list->version = dtoh32(bi->version);
+					memcpy((u8 *)bss, (u8 *)bi, bi_length);
+					goto exit;
+				}
+				cur_len += dtoh32(bss->length);
+			}
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+				WL_ERR(("Buffer is too small: ignoring\n"));
+				goto exit;
+			}
+
+			memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+			list->version = dtoh32(bi->version);
+			list->buflen += bi_length;
+			list->count++;
+
+		}
+
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+			escan_result->sync_id);
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN COMPLETED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, false, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+	}
+	else if (status == WLC_E_STATUS_ABORT) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			wl_clr_p2p_status(cfg, SCANNING);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN ABORTED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+	} else if (status == WLC_E_STATUS_NEWSCAN) {
+		WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
+			escan_result->bss_count));
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+		}
+	} else {
+		WL_ERR(("unexpected Escan Event %d : abort\n", status));
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+					"scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, 2);
+	}
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+{
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	struct net_info *iter, *next;
+	int err;
+
+	if (!cfg->roamoff_on_concurrent)
+		return;
+	if (enable && connected_cnt > 1) {
+		for_each_ndev(cfg, iter, next) {
+			/* Save the current roam setting */
+			if ((err = wldev_iovar_getint(iter->ndev, "roam_off",
+				(s32 *)&iter->roam_off)) != BCME_OK) {
+				WL_ERR(("%s:Failed to get current roam setting err %d\n",
+					iter->ndev->name, err));
+				continue;
+			}
+			if ((err = wldev_iovar_setint(iter->ndev, "roam_off", 1)) != BCME_OK) {
+				WL_ERR((" %s:failed to set roam_off : %d\n",
+					iter->ndev->name, err));
+			}
+		}
+	}
+	else if (!enable) {
+		for_each_ndev(cfg, iter, next) {
+			if (iter->roam_off != WL_INVALID) {
+				if ((err = wldev_iovar_setint(iter->ndev, "roam_off",
+					iter->roam_off)) == BCME_OK)
+					iter->roam_off = WL_INVALID;
+				else {
+					WL_ERR((" %s:failed to set roam_off : %d\n",
+						iter->ndev->name, err));
+				}
+			}
+		}
+	}
+	return;
+}
+
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *iter, *next;
+	u32 ctl_chan = 0;
+	u32 chanspec = 0;
+	u32 pre_ctl_chan = 0;
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	cfg->vsdb_mode = false;
+
+	if (connected_cnt <= 1)  {
+		return;
+	}
+	for_each_ndev(cfg, iter, next) {
+		chanspec = 0;
+		ctl_chan = 0;
+		if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+			if (wldev_iovar_getint(iter->ndev, "chanspec",
+				(s32 *)&chanspec) == BCME_OK) {
+				chanspec = wl_chspec_driver_to_host(chanspec);
+				ctl_chan = wf_chspec_ctlchan(chanspec);
+				wl_update_prof(cfg, iter->ndev, NULL,
+					&ctl_chan, WL_PROF_CHAN);
+			}
+			if (!cfg->vsdb_mode) {
+				if (!pre_ctl_chan && ctl_chan)
+					pre_ctl_chan = ctl_chan;
+				else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+					cfg->vsdb_mode = true;
+				}
+			}
+		}
+	}
+	WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel"));
+	return;
+}
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set)
+{
+	s32 pm = PM_FAST;
+	s32 err = BCME_OK;
+	u32 mode;
+	u32 chan = 0;
+	struct net_info *iter, *next;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+	WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+		state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+	if (state != WL_STATUS_CONNECTED)
+		return 0;
+	mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+	if (set) {
+		wl_cfg80211_concurrent_roam(cfg, 1);
+
+		if (mode == WL_MODE_AP) {
+
+			if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+				WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		if (cfg->vsdb_mode || _net_info->pm_block) {
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN);
+			/* save PM_FAST in _net_info to restore this
+			 * if _net_info->pm_block is false
+			 */
+			if (!_net_info->pm_block && (mode == WL_MODE_BSS)) {
+				_net_info->pm = PM_FAST;
+				_net_info->pm_restore = true;
+			}
+			pm = PM_OFF;
+			for_each_ndev(cfg, iter, next) {
+				if (iter->pm_restore)
+					continue;
+				/* Save the current power mode */
+				err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
+					sizeof(iter->pm), false);
+				WL_DBG(("%s:power save %s\n", iter->ndev->name,
+					iter->pm ? "enabled" : "disabled"));
+				if (!err && iter->pm) {
+					iter->pm_restore = true;
+				}
+
+			}
+			for_each_ndev(cfg, iter, next) {
+				if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
+					continue;
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+					sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+					wl_cfg80211_update_power_mode(iter->ndev);
+				}
+			}
+		} else {
+			/* add PM Enable timer to go to power save mode
+			 * if supplicant control pm mode, it will be cleared or
+			 * updated by wl_cfg80211_set_power_mgmt() if not - for static IP & HW4 P2P,
+			 * PM will be configured when timer expired
+			 */
+
+			/*
+			 * before calling pm_enable_timer, we need to set PM -1 for all ndev
+			 */
+			pm = PM_OFF;
+			if (!_net_info->pm_block) {
+				for_each_ndev(cfg, iter, next) {
+					if (iter->pm_restore)
+						continue;
+					/* Save the current power mode */
+					err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
+						sizeof(iter->pm), false);
+					WL_DBG(("%s:power save %s\n", iter->ndev->name,
+						iter->pm ? "enabled" : "disabled"));
+					if (!err && iter->pm) {
+						iter->pm_restore = true;
+					}
+				}
+			}
+			for_each_ndev(cfg, iter, next) {
+				if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
+					continue;
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+					sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+				}
+			}
+
+			if (cfg->pm_enable_work_on) {
+				wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+			}
+
+			cfg->pm_enable_work_on = true;
+			wl_add_remove_pm_enable_work(cfg, TRUE, WL_HANDLER_NOTUSE);
+		}
+#if defined(WLTDLS)
+#if defined(DISABLE_TDLS_IN_P2P)
+		if (cfg->vsdb_mode || p2p_is_on(cfg))
+#else
+		if (cfg->vsdb_mode)
+#endif /* defined(DISABLE_TDLS_IN_P2P) */
+		{
+
+			err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
+		}
+#endif /* defined(WLTDLS) */
+	}
+	 else { /* clear */
+		chan = 0;
+		/* clear chan information when the net device is disconnected */
+		wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		for_each_ndev(cfg, iter, next) {
+			if (iter->pm_restore && iter->pm) {
+				WL_DBG(("%s:restoring power save %s\n",
+					iter->ndev->name, (iter->pm ? "enabled" : "disabled")));
+				err = wldev_ioctl(iter->ndev,
+					WLC_SET_PM, &iter->pm, sizeof(iter->pm), true);
+				if (unlikely(err)) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error(%d)\n", iter->ndev->name, err));
+					break;
+				}
+				iter->pm_restore = 0;
+				wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+		wl_cfg80211_concurrent_roam(cfg, 0);
+#if defined(WLTDLS)
+		if (!cfg->vsdb_mode) {
+			err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+		}
+#endif /* defined(WLTDLS) */
+	}
+	return err;
+}
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+	int err = 0;
+
+	cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	wl_escan_init_sync_id(cfg);
+
+	/* Init scan_timeout timer */
+	init_timer(&cfg->scan_timeout);
+	cfg->scan_timeout.data = (unsigned long) cfg;
+	cfg->scan_timeout.function = wl_scan_timeout;
+
+	return err;
+}
+
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	cfg->scan_request = NULL;
+	cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+	cfg->roam_on = false;
+	cfg->active_scan = true;
+	cfg->rf_blocked = false;
+	cfg->vsdb_mode = false;
+#if defined(BCMSDIO)
+	cfg->wlfc_on = false;
+#endif
+	cfg->roamoff_on_concurrent = true;
+	cfg->disable_roam_event = false;
+	/* register interested state */
+	set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+	spin_lock_init(&cfg->cfgdrv_lock);
+	mutex_init(&cfg->ioctl_buf_sync);
+	init_waitqueue_head(&cfg->netif_change_event);
+	init_completion(&cfg->send_af_done);
+	init_completion(&cfg->iface_disable);
+	wl_init_eq(cfg);
+	err = wl_init_priv_mem(cfg);
+	if (err)
+		return err;
+	if (wl_create_event_handler(cfg))
+		return -ENOMEM;
+	wl_init_event_handler(cfg);
+	mutex_init(&cfg->usr_sync);
+	mutex_init(&cfg->event_sync);
+	err = wl_init_scan(cfg);
+	if (err)
+		return err;
+	wl_init_conf(cfg->conf);
+	wl_init_prof(cfg, ndev);
+	wl_link_down(cfg);
+	DNGL_FUNC(dhd_cfg80211_init, (cfg));
+
+	return err;
+}
+
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+	wl_destroy_event_handler(cfg);
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	del_timer_sync(&cfg->scan_timeout);
+	wl_deinit_priv_mem(cfg);
+	if (wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = FALSE;
+		unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+	}
+}
+
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+static s32 wl_cfg80211_attach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_TRACE(("Enter \n"));
+
+	if (wl_cfgp2p_register_ndev(cfg) < 0) {
+		WL_ERR(("P2P attach failed. \n"));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static s32  wl_cfg80211_detach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wireless_dev *wdev;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	} else
+		wdev = cfg->p2p_wdev;
+
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+	if (!wdev) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	wl_cfgp2p_unregister_ndev(cfg);
+
+	cfg->p2p_wdev = NULL;
+	cfg->p2p_net = NULL;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+	WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+	kfree(wdev);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	return 0;
+}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+	struct bcm_cfg80211 * cfg = NULL;
+	s32 err = 0;
+	s32 ret = 0;
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	cfg = g_bcm_cfg;
+	if (unlikely(!cfg)) {
+		WL_ERR(("cfg is invaild\n"));
+		return -EINVAL;
+	}
+	if (!wl_get_drv_status(cfg, READY, ndev)) {
+		if (cfg->wdev) {
+			ret = wl_cfgp2p_supported(cfg, ndev);
+			if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+				cfg->wdev->wiphy->interface_modes |=
+					(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+				if (cfg->p2p_net) {
+					/* Update MAC addr for p2p0 interface here. */
+					memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+					cfg->p2p_net->dev_addr[0] |= 0x02;
+					WL_ERR(("%s: p2p_dev_addr="MACDBG "\n",
+						cfg->p2p_net->name,
+						MAC2STRDBG(cfg->p2p_net->dev_addr)));
+				} else {
+					WL_ERR(("p2p_net not yet populated."
+					" Couldn't update the MAC Address for p2p0 \n"));
+					return -ENODEV;
+				}
+#endif /* WL_ENABLE_P2P_IF */
+				cfg->p2p_supported = true;
+			} else if (ret == 0) {
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+			} else {
+				/* SDIO bus timeout */
+				err = -ENODEV;
+				goto fail;
+			}
+		}
+	}
+	wl_set_drv_status(cfg, READY, ndev);
+fail:
+	return err;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+	struct wireless_dev *wdev;
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	struct device *dev;
+
+	WL_TRACE(("In\n"));
+	if (!ndev) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+	dev = wl_cfg80211_get_parent_dev();
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return -ENOMEM;
+	}
+	err = wl_setup_wiphy(wdev, dev, context);
+	if (unlikely(err)) {
+		kfree(wdev);
+		return -ENOMEM;
+	}
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+	cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+	cfg->wdev = wdev;
+	cfg->pub = context;
+	INIT_LIST_HEAD(&cfg->net_list);
+	ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+	wdev->netdev = ndev;
+	cfg->state_notifier = wl_notifier_change_state;
+	err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
+	if (err) {
+		WL_ERR(("Failed to alloc net_info (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+	err = wl_init_priv(cfg);
+	if (err) {
+		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+
+	err = wl_setup_rfkill(cfg, TRUE);
+	if (err) {
+		WL_ERR(("Failed to setup rfkill %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#ifdef DEBUGFS_CFG80211
+	err = wl_setup_debugfs(cfg);
+	if (err) {
+		WL_ERR(("Failed to setup debugfs %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#endif
+	if (!wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = TRUE;
+		err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+		if (err) {
+			wl_cfg80211_netdev_notifier_registered = FALSE;
+			WL_ERR(("Failed to register notifierl %d\n", err));
+			goto cfg80211_attach_out;
+		}
+	}
+#if defined(COEX_DHCP)
+	cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+	if (!cfg->btcoex_info)
+		goto cfg80211_attach_out;
+#endif
+
+	g_bcm_cfg = cfg;
+
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+	err = wl_cfg80211_attach_p2p();
+	if (err)
+		goto cfg80211_attach_out;
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	return err;
+
+cfg80211_attach_out:
+	wl_setup_rfkill(cfg, FALSE);
+	wl_free_wdev(cfg);
+	return err;
+}
+
+void wl_cfg80211_detach(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+
+	(void)para;
+	cfg = g_bcm_cfg;
+
+	WL_TRACE(("In\n"));
+
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+#if defined(COEX_DHCP)
+	wl_cfg80211_btcoex_deinit();
+	cfg->btcoex_info = NULL;
+#endif
+
+	wl_setup_rfkill(cfg, FALSE);
+#ifdef DEBUGFS_CFG80211
+	wl_free_debugfs(cfg);
+#endif
+	if (cfg->p2p_supported) {
+		if (timer_pending(&cfg->p2p->listen_timer))
+			del_timer_sync(&cfg->p2p->listen_timer);
+		wl_cfgp2p_deinit_priv(cfg);
+	}
+
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF  */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+	wl_cfg80211_detach_p2p();
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	wl_cfg80211_ibss_vsie_free(cfg);
+	wl_deinit_priv(cfg);
+	g_bcm_cfg = NULL;
+	wl_cfg80211_clear_parent_dev();
+	wl_free_wdev(cfg);
+	/* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+	 * structure "cfg", which is the private part of wiphy, has been freed in
+	 * wl_free_wdev !!!!!!!!!!!
+	 */
+}
+
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0) {
+		DHD_OS_WAKE_LOCK(cfg->pub);
+		up(&cfg->event_tsk.sema);
+	}
+}
+
+static s32 wl_event_handler(void *data)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct wl_event_q *e;
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	bcm_struct_cfgdev *cfgdev = NULL;
+
+	cfg = (struct bcm_cfg80211 *)tsk->parent;
+
+	WL_ERR(("tsk Enter, tsk = 0x%p\n", tsk));
+
+	while (down_interruptible (&tsk->sema) == 0) {
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk->terminated)
+			break;
+		while ((e = wl_deq_event(cfg))) {
+			WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+			/* All P2P device address related events comes on primary interface since
+			 * there is no corresponding bsscfg for P2P interface. Map it to p2p0
+			 * interface.
+			 */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev)) {
+				cfgdev = bcmcfg_to_p2p_wdev(cfg);
+			} else {
+				struct net_device *ndev = NULL;
+
+				ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
+				if (ndev)
+					cfgdev = ndev_to_wdev(ndev);
+			}
+#elif defined(WL_ENABLE_P2P_IF)
+			if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
+				cfgdev = cfg->p2p_net;
+			} else {
+				cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
+					e->emsg.ifidx);
+			}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+			if (!cfgdev) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfgdev = bcmcfg_to_prmry_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+				cfgdev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+			if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+				cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata);
+			} else {
+				WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+			}
+			wl_put_event(e);
+		}
+		DHD_OS_WAKE_UNLOCK(cfg->pub);
+	}
+	WL_ERR(("was terminated\n"));
+	complete_and_exit(&tsk->completed, 0);
+	return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+	u32 event_type = ntoh32(e->event_type);
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+#if (WL_DBG_LEVEL > 0)
+	s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+	    wl_dbg_estr[event_type] : (s8 *) "Unknown";
+	WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+		WL_ERR(("during IF change, ignore event %d\n", event_type));
+		return;
+	}
+
+	if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) {
+		if ((cfg->bss_cfgdev) &&
+			(ndev == cfgdev_to_wlc_ndev(cfg->bss_cfgdev, cfg))) {
+			/* Event is corresponding to the secondary STA interface */
+			WL_DBG(("DualSta event (%d), proceed to enqueue it \n", event_type));
+		} else if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) &&
+#if defined(WL_ENABLE_P2P_IF)
+			(ndev != (cfg->p2p_net ? cfg->p2p_net :
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) &&
+#else
+			(ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) &&
+#endif /* WL_ENABLE_P2P_IF */
+			TRUE) {
+			WL_ERR(("ignore event %d, not interested\n", event_type));
+			return;
+		}
+	}
+
+	if (event_type == WLC_E_PFN_NET_FOUND) {
+		WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+	}
+	else if (event_type == WLC_E_PFN_NET_LOST) {
+		WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+	}
+
+	if (likely(!wl_enq_event(cfg, ndev, event_type, e, data)))
+		wl_wakeup_event(cfg);
+}
+
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
+{
+	wl_init_eq_lock(cfg);
+	INIT_LIST_HEAD(&cfg->eq_list);
+}
+
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	while (!list_empty(&cfg->eq_list)) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(cfg, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e = NULL;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	if (likely(!list_empty(&cfg->eq_list))) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(cfg, flags);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+	const wl_event_msg_t *msg, void *data)
+{
+	struct wl_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+	unsigned long flags;
+	gfp_t aflags;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct wl_event_q) + data_len;
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	e = kzalloc(evtq_size, aflags);
+	if (unlikely(!e)) {
+		WL_ERR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	flags = wl_lock_eq(cfg);
+	list_add_tail(&e->eq_list, &cfg->eq_list);
+	wl_unlock_eq(cfg, flags);
+
+	return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+	kfree(e);
+}
+
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
+{
+	s32 infra = 0;
+	s32 err = 0;
+	s32 mode = 0;
+	switch (iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		WL_ERR(("type (%d) : currently we do not support this mode\n",
+			iftype));
+		err = -EINVAL;
+		return err;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		infra = 1;
+		break;
+	default:
+		err = -EINVAL;
+		WL_ERR(("invalid type (%d)\n", iftype));
+		return err;
+	}
+	infra = htod32(infra);
+	err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		return err;
+	}
+
+	wl_set_mode_by_netdev(cfg, ndev, mode);
+
+	return 0;
+}
+
+void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set)
+{
+	if (!ev || (event > WLC_E_LAST))
+		return;
+
+	if (ev->num < MAX_EVENT_BUF_NUM) {
+		ev->event[ev->num].type = event;
+		ev->event[ev->num].set = set;
+		ev->num++;
+	} else {
+		WL_ERR(("evenbuffer doesn't support > %u events. Update"
+			" the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));
+		ASSERT(0);
+	}
+}
+
+s32 wl_cfg80211_apply_eventbuffer(
+	struct net_device *ndev,
+	struct bcm_cfg80211 *cfg,
+	wl_eventmsg_buf_t *ev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int i, ret = 0;
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	if (!ev || (!ev->num))
+		return -EINVAL;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(ret)) {
+		WL_ERR(("Get event_msgs error (%d)\n", ret));
+		goto exit;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+
+	/* apply the set bits */
+	for (i = 0; i < ev->num; i++) {
+		if (ev->event[i].set)
+			setbit(eventmask, ev->event[i].type);
+		else
+			clrbit(eventmask, ev->event[i].type);
+	}
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(ret)) {
+		WL_ERR(("Set event_msgs error (%d)\n", ret));
+	}
+
+exit:
+	mutex_unlock(&cfg->event_sync);
+	return ret;
+}
+
+s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (!ndev || !cfg)
+		return -ENODEV;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(err)) {
+		WL_ERR(("Get event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+	if (add) {
+		setbit(eventmask, event);
+	} else {
+		clrbit(eventmask, event);
+	}
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(err)) {
+		WL_ERR(("Set event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+
+eventmsg_out:
+	mutex_unlock(&cfg->event_sync);
+	return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+{
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ieee80211_channel *band_chan_arr = NULL;
+	wl_uint32_list_t *list;
+	u32 i, j, index, n_2g, n_5g, band, channel, array_size;
+	u32 *n_cnt = NULL;
+	chanspec_t c = 0;
+	s32 err = BCME_OK;
+	bool update;
+	bool ht40_allowed;
+	u8 *pbuf = NULL;
+	bool dfs_radar_disabled = FALSE;
+
+#define LOCAL_BUF_LEN 1024
+	pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL);
+
+	if (pbuf == NULL) {
+		WL_ERR(("failed to allocate local buf\n"));
+		return -ENOMEM;
+	}
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+	if (err != 0) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		kfree(pbuf);
+		return err;
+	}
+#undef LOCAL_BUF_LEN
+
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	band = array_size = n_2g = n_5g = 0;
+	for (i = 0; i < dtoh32(list->count); i++) {
+		index = 0;
+		update = false;
+		ht40_allowed = false;
+		c = (chanspec_t)dtoh32(list->element[i]);
+		c = wl_chspec_driver_to_host(c);
+		channel = wf_chspec_ctlchan(c);
+
+		if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) {
+			WL_DBG(("HT80/160/80p80 center channel : %d\n", channel));
+			continue;
+		}
+		if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+			(channel <= CH_MAX_2G_CHANNEL)) {
+			band_chan_arr = __wl_2ghz_channels;
+			array_size = ARRAYSIZE(__wl_2ghz_channels);
+			n_cnt = &n_2g;
+			band = IEEE80211_BAND_2GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_40ALL)? true : false;
+		} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+			band_chan_arr = __wl_5ghz_a_channels;
+			array_size = ARRAYSIZE(__wl_5ghz_a_channels);
+			n_cnt = &n_5g;
+			band = IEEE80211_BAND_5GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_20ALL)? false : true;
+		} else {
+			WL_ERR(("Invalid channel Sepc. 0x%x.\n", c));
+			continue;
+		}
+		if (!ht40_allowed && CHSPEC_IS40(c))
+			continue;
+		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+			if (band_chan_arr[j].hw_value == channel) {
+				update = true;
+				break;
+			}
+		}
+		if (update)
+			index = j;
+		else
+			index = *n_cnt;
+		if (index <  array_size) {
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel);
+#else
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel, band);
+#endif
+			band_chan_arr[index].hw_value = channel;
+
+			if (CHSPEC_IS40(c) && ht40_allowed) {
+				/* assuming the order is HT20, HT40 Upper,
+				 *  HT40 lower from chanspecs
+				 */
+				u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40;
+				if (CHSPEC_SB_UPPER(c)) {
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags &=
+							~IEEE80211_CHAN_NO_HT40;
+					band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS;
+				} else {
+					/* It should be one of
+					 * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS
+					 */
+					band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40;
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags |=
+							IEEE80211_CHAN_NO_HT40MINUS;
+				}
+			} else {
+				band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40;
+				if (!dfs_radar_disabled) {
+					if (band == IEEE80211_BAND_2GHZ)
+						channel |= WL_CHANSPEC_BAND_2G;
+					else
+						channel |= WL_CHANSPEC_BAND_5G;
+					channel |= WL_CHANSPEC_BW_20;
+					channel = wl_chspec_host_to_driver(channel);
+					err = wldev_iovar_getint(dev, "per_chan_info", &channel);
+					if (!err) {
+						if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								(IEEE80211_CHAN_RADAR
+								| IEEE80211_CHAN_NO_IBSS);
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_RADAR;
+#endif
+						}
+
+						if (channel & WL_CHAN_PASSIVE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_NO_IR;
+#endif
+					} else if (err == BCME_UNSUPPORTED) {
+						dfs_radar_disabled = TRUE;
+						WL_ERR(("does not support per_chan_info\n"));
+					}
+				}
+			}
+			if (!update)
+				(*n_cnt)++;
+		}
+
+	}
+	__wl_band_2ghz.n_channels = n_2g;
+	__wl_band_5ghz_a.n_channels = n_5g;
+	kfree(pbuf);
+	return err;
+}
+
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+	struct wiphy *wiphy;
+	struct net_device *dev;
+	u32 bandlist[3];
+	u32 nband = 0;
+	u32 i = 0;
+	s32 err = 0;
+	s32 index = 0;
+	s32 nmode = 0;
+	bool rollback_lock = false;
+	s32 bw_cap = 0;
+	s32 cur_band = -1;
+	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
+
+	if (cfg == NULL) {
+		cfg = g_bcm_cfg;
+		mutex_lock(&cfg->usr_sync);
+		rollback_lock = true;
+	}
+	dev = bcmcfg_to_prmry_ndev(cfg);
+
+	memset(bandlist, 0, sizeof(bandlist));
+	err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist,
+		sizeof(bandlist), false);
+	if (unlikely(err)) {
+		WL_ERR(("error read bandlist (%d)\n", err));
+		goto end_bands;
+	}
+	err = wldev_ioctl(dev, WLC_GET_BAND, &cur_band,
+		sizeof(s32), false);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto end_bands;
+	}
+
+	err = wldev_iovar_getint(dev, "nmode", &nmode);
+	if (unlikely(err)) {
+		WL_ERR(("error reading nmode (%d)\n", err));
+	} else {
+		/* For nmodeonly  check bw cap */
+		err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+		}
+	}
+
+	err = wl_construct_reginfo(cfg, bw_cap);
+	if (err) {
+		WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
+		if (err != BCME_UNSUPPORTED)
+			goto end_bands;
+		err = 0;
+	}
+	wiphy = bcmcfg_to_wiphy(cfg);
+	nband = bandlist[0];
+
+	for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
+		index = -1;
+		if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) {
+			bands[IEEE80211_BAND_5GHZ] =
+				&__wl_band_5ghz_a;
+			index = IEEE80211_BAND_5GHZ;
+			if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+		else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
+			bands[IEEE80211_BAND_2GHZ] =
+				&__wl_band_2ghz;
+			index = IEEE80211_BAND_2GHZ;
+			if (bw_cap == WLC_N_BW_40ALL)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+
+		if ((index >= 0) && nmode) {
+			bands[index]->ht_cap.cap |=
+				(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+			bands[index]->ht_cap.ht_supported = TRUE;
+			bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+			bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+			/* An HT shall support all EQM rates for one spatial stream */
+			bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+		}
+
+	}
+
+	wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+	wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+
+	/* check if any bands populated otherwise makes 2Ghz as default */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+		wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+		/* Setup 2Ghz band as default */
+		wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	}
+
+	if (notify)
+		wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+
+	end_bands:
+		if (rollback_lock)
+			mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+#ifdef WL_HOST_BAND_MGMT
+	s32 ret = 0;
+#endif /* WL_HOST_BAND_MGMT */
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+	WL_DBG(("In\n"));
+
+	err = dhd_config_dongle(cfg);
+	if (unlikely(err))
+		return err;
+
+	err = wl_config_ifmode(cfg, ndev, wdev->iftype);
+	if (unlikely(err && err != -EINPROGRESS)) {
+		WL_ERR(("wl_config_ifmode failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+	err = wl_update_wiphybands(cfg, true);
+	if (unlikely(err)) {
+		WL_ERR(("wl_update_wiphybands failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+
+	err = dhd_monitor_init(cfg->pub);
+
+#ifdef WL_HOST_BAND_MGMT
+	/* By default the curr_band is initialized to BAND_AUTO */
+	if ((ret = wl_cfg80211_set_band(ndev, WLC_BAND_AUTO)) < 0) {
+		if (ret == BCME_UNSUPPORTED) {
+			/* Don't fail the initialization, lets just
+			 * fall back to the original method
+			 */
+			WL_ERR(("WL_HOST_BAND_MGMT defined, "
+				"but roam_band iovar not supported \n"));
+		} else {
+			WL_ERR(("roam_band failed. ret=%d", ret));
+			err = -1;
+		}
+	}
+#endif /* WL_HOST_BAND_MGMT */
+#if defined(DHCP_SCAN_SUPPRESS)
+	/* wlan scan_supp timer and work thread info */
+	init_timer(&cfg->scan_supp_timer);
+	cfg->scan_supp_timer.data = (ulong)cfg;
+	cfg->scan_supp_timer.function = wl_cfg80211_scan_supp_timerfunc;
+	INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler);
+#endif /* DHCP_SCAN_SUPPRESS */
+	INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+	wl_set_drv_status(cfg, READY, ndev);
+	return err;
+}
+
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	unsigned long flags;
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
+	defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+	struct net_device *p2p_net = cfg->p2p_net;
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
+	u32 bssidx = 0;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	WL_DBG(("In\n"));
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+#ifdef WL_NAN
+	wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, NULL);
+#endif /* WL_NAN */
+
+	if (cfg->p2p_supported) {
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (cfg->p2p->vif_created) {
+			bool enabled = false;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_deinit(dhd);
+				cfg->wlfc_on = false;
+			}
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+#if defined(DHCP_SCAN_SUPPRESS)
+	/* Force clear of scan_suppress */
+	if (cfg->scan_suppressed)
+		wl_cfg80211_scan_suppress(ndev, 0);
+	if (timer_pending(&cfg->scan_supp_timer))
+		del_timer_sync(&cfg->scan_supp_timer);
+	cancel_work_sync(&cfg->wlan_work);
+#endif /* DHCP_SCAN_SUPPRESS */
+
+	/* If primary BSS is operational (for e.g SoftAP), bring it down */
+	if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) &&
+		wl_cfgp2p_bss_isup(ndev, bssidx)) {
+		if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0)
+			WL_ERR(("BSS down failed \n"));
+	}
+
+	/* Check if cfg80211 interface is already down */
+	if (!wl_get_drv_status(cfg, READY, ndev))
+		return err;	/* it is even not ready */
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+
+#ifdef WL_SDO
+	wl_cfg80211_sdo_deinit(cfg);
+#endif
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, READY, iter->ndev);
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+		wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+	}
+	bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+		NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
+	defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+		if (p2p_net)
+			dev_close(p2p_net);
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	if (cfg->p2p_supported)
+		wl_cfgp2p_down(cfg);
+	if (cfg->ap_info) {
+		kfree(cfg->ap_info->wpa_ie);
+		kfree(cfg->ap_info->rsn_ie);
+		kfree(cfg->ap_info->wps_ie);
+		kfree(cfg->ap_info);
+		cfg->ap_info = NULL;
+	}
+	dhd_monitor_uninit();
+#ifdef WLAIBSS_MCHAN
+	bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
+#endif /* WLAIBSS_MCHAN */
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+	/* Clean up if not removed already */
+	if (cfg->bss_cfgdev)
+		wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev);
+#endif /* defined (DUAL_STA) || defined (DUAL_STA_STATIC_IF) */
+
+	DNGL_FUNC(dhd_cfg80211_down, (cfg));
+
+	return err;
+}
+
+s32 wl_cfg80211_up(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	int val = 1;
+	dhd_pub_t *dhd;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+
+	if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+		sizeof(int), false) < 0)) {
+		WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+		return err;
+	}
+	val = dtoh32(val);
+	if (val != WLC_IOCTL_VERSION && val != 1) {
+		WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+			val, WLC_IOCTL_VERSION));
+		return BCME_VERSION;
+	}
+	ioctl_version = val;
+	WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
+
+	mutex_lock(&cfg->usr_sync);
+	dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+		if (unlikely(err))
+			return err;
+	}
+#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
+	if (dhd->fw_4way_handshake)
+		cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
+#endif
+	err = __wl_cfg80211_up(cfg);
+	if (unlikely(err))
+		WL_ERR(("__wl_cfg80211_up failed\n"));
+	mutex_unlock(&cfg->usr_sync);
+
+#ifdef WLAIBSS_MCHAN
+	bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef DUAL_STA_STATIC_IF
+#ifdef DUAL_STA
+#error "Both DUAL_STA and DUAL_STA_STATIC_IF can't be enabled together"
+#endif
+	/* Static Interface support is currently supported only for STA only builds (without P2P) */
+	wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
+#endif /* DUAL_STA_STATIC_IF */
+
+	return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	WL_ERR(("In : chip crash eventing\n"));
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+	cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+	if (cfg != NULL) {
+		wl_link_down(cfg);
+	}
+	return 0;
+}
+
+s32 wl_cfg80211_down(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+	mutex_lock(&cfg->usr_sync);
+	err = __wl_cfg80211_down(cfg);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+	unsigned long flags;
+	void *rptr = NULL;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return NULL;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SEC:
+		rptr = &profile->sec;
+		break;
+	case WL_PROF_ACT:
+		rptr = &profile->active;
+		break;
+	case WL_PROF_BSSID:
+		rptr = profile->bssid;
+		break;
+	case WL_PROF_SSID:
+		rptr = &profile->ssid;
+		break;
+	case WL_PROF_CHAN:
+		rptr = &profile->channel;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	if (!rptr)
+		WL_ERR(("invalid item (%d)\n", item));
+	return rptr;
+}
+
+static s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item)
+{
+	s32 err = 0;
+	struct wlc_ssid *ssid;
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return WL_INVALID;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SSID:
+		ssid = (wlc_ssid_t *) data;
+		memset(profile->ssid.SSID, 0,
+			sizeof(profile->ssid.SSID));
+		memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+		profile->ssid.SSID_len = ssid->SSID_len;
+		break;
+	case WL_PROF_BSSID:
+		if (data)
+			memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+		else
+			memset(profile->bssid, 0, ETHER_ADDR_LEN);
+		break;
+	case WL_PROF_SEC:
+		memcpy(&profile->sec, data, sizeof(profile->sec));
+		break;
+	case WL_PROF_ACT:
+		profile->active = *(bool *)data;
+		break;
+	case WL_PROF_BEACONINT:
+		profile->beacon_interval = *(u16 *)data;
+		break;
+	case WL_PROF_DTIMPERIOD:
+		profile->dtim_period = *(u8 *)data;
+		break;
+	case WL_PROF_CHAN:
+		profile->channel = *(u32*)data;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (err == -EOPNOTSUPP)
+		WL_ERR(("unsupported item (%d)\n", item));
+
+	return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+	/*
+	* prohibit to change debug level
+	* by insmod parameter.
+	* eventually debug level will be configured
+	* in compile time by using CONFIG_XXX
+	*/
+	/* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+	return cfg->ibss_starter;
+}
+
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	ie->buf[ie->offset] = t;
+	ie->buf[ie->offset + 1] = l;
+	memcpy(&ie->buf[ie->offset + 2], v, l);
+	ie->offset += l + 2;
+
+	return err;
+}
+
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam)
+{
+	u8 *ssidie;
+	ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+	if (!ssidie)
+		return;
+	if (ssidie[1] != bi->SSID_len) {
+		if (ssidie[1]) {
+			WL_ERR(("%s: Wrong SSID len: %d != %d\n",
+				__FUNCTION__, ssidie[1], bi->SSID_len));
+		}
+		if (roam) {
+			WL_ERR(("Changing the SSID Info.\n"));
+			memmove(ssidie + bi->SSID_len + 2,
+				(ssidie + 2) + ssidie[1],
+				*ie_size - (ssidie + 2 + ssidie[1] - ie_stream));
+			memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+			*ie_size = *ie_size + bi->SSID_len - ssidie[1];
+			ssidie[1] = bi->SSID_len;
+		}
+		return;
+	}
+	if (*(ssidie + 2) == '\0')
+		 memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+	return;
+}
+
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+	ie->offset += ie_size;
+
+	return err;
+}
+
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset > dst_size)) {
+		WL_ERR(("dst_size is not enough\n"));
+		return -ENOSPC;
+	}
+	memcpy(dst, &ie->buf[0], ie->offset);
+
+	return err;
+}
+
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	return ie->offset;
+}
+
+static void wl_link_up(struct bcm_cfg80211 *cfg)
+{
+	cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+	WL_DBG(("In\n"));
+	cfg->link_up = false;
+	conn_info->req_ie_len = 0;
+	conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cfg->eq_lock, flags);
+	return flags;
+}
+
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
+{
+	spin_unlock_irqrestore(&cfg->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+	spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+	if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+		OSL_DELAY(ms*1000);
+	} else {
+		OSL_SLEEP(ms);
+	}
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct ether_addr p2pif_addr;
+	struct ether_addr primary_mac;
+	if (!cfg->p2p)
+		return -1;
+	if (!p2p_is_on(cfg)) {
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+	} else {
+		memcpy(p2pdev_addr->octet,
+			cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+	}
+
+
+	return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_channel_to_freq(u32 channel)
+{
+	int freq = 0;
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	{
+		u16 band = 0;
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = IEEE80211_BAND_2GHZ;
+		else
+			band = IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+	}
+#endif
+	return freq;
+}
+
+#ifdef WL_SDO
+#define MAX_QR_LEN NLMSG_GOODSIZE
+
+typedef struct wl_cfg80211_dev_info {
+	u16 band;
+	u16 freq;
+	s16 rssi;
+	u16 ie_len;
+	u8 bssid[ETH_ALEN];
+} wl_cfg80211_dev_info_t;
+
+static s32
+wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	int err = 0;
+	u32 event = ntoh32(e->event_type);
+	wl_cfg80211_dev_info_t info;
+	struct wl_bss_info *bi = NULL;
+	struct net_device *ndev = NULL;
+	u8 *buf = NULL;
+	u32 buflen = 0;
+	u16 channel = 0;
+	 wl_escan_result_t *escan_result;
+
+	WL_SD(("Enter. type:%d \n", event));
+
+	if ((event != WLC_E_P2PO_ADD_DEVICE) && (event != WLC_E_P2PO_DEL_DEVICE)) {
+		WL_ERR(("Unknown Event\n"));
+		return -EINVAL;
+	}
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (event == WLC_E_P2PO_DEL_DEVICE) {
+		WL_SD(("DEV_LOST MAC:"MACDBG" \n", MAC2STRDBG(e->addr.octet)));
+		err = wl_genl_send_msg(ndev, event, (u8 *)e->addr.octet, ETH_ALEN, 0, 0);
+	} else {
+
+		escan_result = (wl_escan_result_t *) data;
+
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		bi = escan_result->bss_info;
+		buflen = dtoh32(bi->length);
+		if (unlikely(buflen > WL_BSS_INFO_MAX)) {
+			WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		/* Update sub-header */
+		bzero(&info, sizeof(wl_cfg80211_dev_info_t));
+		channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+		info.freq = wl_cfg80211_channel_to_freq(channel);
+		info.rssi = wl_rssi_offset(dtoh16(bi->RSSI));
+		memcpy(info.bssid, &bi->BSSID, ETH_ALEN);
+		info.ie_len = buflen;
+
+		WL_SD(("DEV_FOUND band:%x Freq:%d rssi:%x "MACDBG" \n",
+			info.band, info.freq, info.rssi, MAC2STRDBG(info.bssid)));
+
+		buf =  ((u8 *) bi) + bi->ie_offset;
+		err = wl_genl_send_msg(ndev, event, buf,
+			buflen, (u8 *)&info, sizeof(wl_cfg80211_dev_info_t));
+	}
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+s32
+wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg)
+{
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	if (cfg->sdo) {
+		WL_SD(("SDO already initialized\n"));
+		return 0;
+	}
+
+	cfg->sdo = kzalloc(sizeof(sd_offload_t), kflags);
+	if (!cfg->sdo) {
+		WL_ERR(("malloc failed for SDO \n"));
+		return -ENOMEM;
+	}
+
+	return  0;
+}
+
+s32
+wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg)
+{
+	s32 bssidx;
+	int ret = 0;
+	int sdo_pause = 0;
+	if (!cfg || !cfg->p2p) {
+		WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+			cfg, cfg ? cfg->p2p : 0));
+		return 0;
+	}
+
+	bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (!cfg->sdo) {
+		WL_DBG(("SDO Not Initialized. Do nothing. \n"));
+		return 0;
+	}
+	if (cfg->sdo->dd_state &&
+		(ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+		"p2po_stop", (void*)&sdo_pause, sizeof(sdo_pause),
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
+		WL_ERR(("p2po_stop Failed :%d\n", ret));
+	}
+	kfree(cfg->sdo);
+	cfg->sdo = NULL;
+
+	WL_SD(("SDO Deinit Done \n"));
+
+	return  0;
+}
+
+s32
+wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
+{
+	wl_sd_listen_t sd_listen;
+	int ret = 0;
+	s32 bssidx =  wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+
+	WL_DBG(("Enter\n"));
+
+	if (!cfg->sdo) {
+		return -EINVAL;
+	}
+
+	if (dev == NULL)
+		dev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* Disable back the ESCAN events for the offload */
+	wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+	/* Resume according to the saved state */
+	if (cfg->sdo->dd_state == WL_DD_STATE_SEARCH) {
+		if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
+			WL_ERR(("p2po_find Failed :%d\n", ret));
+		}
+	} else if (cfg->sdo->dd_state == WL_DD_STATE_LISTEN) {
+		sd_listen.interval = cfg->sdo->sd_listen.interval;
+		sd_listen.period = cfg->sdo->sd_listen.period;
+
+		if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
+			sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, NULL)) < 0) {
+			WL_ERR(("p2po_listen Failed :%d\n", ret));
+		}
+
+	}
+
+	 /* p2po_stop clears of the eventmask for GAS. Set it back */
+	 wl_add_remove_eventmsg(dev, WLC_E_SERVICE_FOUND, true);
+	 wl_add_remove_eventmsg(dev, WLC_E_GAS_FRAGMENT_RX, true);
+	 wl_add_remove_eventmsg(dev, WLC_E_GAS_COMPLETE, true);
+
+	WL_SD(("SDO Resumed \n"));
+
+	return ret;
+}
+
+s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
+{
+
+	int ret = 0;
+	s32 bssidx =  wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	int sdo_pause = 1;
+
+	WL_DBG(("Enter \n"));
+
+	if (!cfg->sdo) {
+		WL_ERR(("SDO not initialized \n"));
+		return -EINVAL;
+	}
+
+	if (dev == NULL)
+		dev = bcmcfg_to_prmry_ndev(cfg);
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop",
+		(void*)&sdo_pause, sizeof(sdo_pause),
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_stop Failed :%d\n", ret));
+	}
+
+	/* Enable back the ESCAN events for the SCAN */
+	wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+	WL_SD(("SDO Paused \n"));
+
+	return ret;
+}
+
+static s32
+wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	u32 event = ntoh32(e->event_type);
+	struct net_device *ndev = NULL;
+	u8 *dst_mac = (u8 *)e->addr.octet;
+	int ret = 0;
+	wl_event_sd_t *gas = NULL;
+	int status = ntoh32(e->status);
+	sdo_event_t sdo_hdr;
+	u32 data_len = ntoh32(e->datalen);
+	u8 *data_ptr = NULL;
+	u32 tot_len = 0;
+
+
+	WL_SD(("Enter event_type:%d status:%d\n", event, status));
+
+	if (!cfg->sdo) {
+		WL_ERR(("SDO Not initialized \n"));
+		return -EINVAL;
+	}
+
+	if (!(cfg->sdo->sd_state & WL_SD_SEARCH_SVC)) {
+		/* We are not searching for any service. Drop
+		 * any bogus Event
+		 */
+		WL_ERR(("Bogus SDO Event. Do nothing.. \n"));
+		return -1;
+	}
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (event == WLC_E_SERVICE_FOUND) {
+
+		if ((status != WLC_E_STATUS_SUCCESS) && (status != WLC_E_STATUS_PARTIAL)) {
+			WL_ERR(("WLC_E_SERVICE_FOUND: unknown status \n"));
+			goto exit;
+		}
+
+		gas = (wl_event_sd_t *)data;
+		if (!gas) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		bzero(&sdo_hdr, sizeof(sdo_event_t));
+		sdo_hdr.freq = wl_cfg80211_channel_to_freq(gas->channel);
+		sdo_hdr.count = gas->count;
+		memcpy(sdo_hdr.addr, dst_mac, ETH_ALEN);
+		data_ptr = (char *)gas->tlv;
+		tot_len = data_len - (sizeof(wl_event_sd_t) - sizeof(wl_sd_tlv_t));
+
+		WL_SD(("WLC_E_SERVICE_FOUND "MACDBG" data_len:%d tlv_count:%d \n",
+			MAC2STRDBG(dst_mac), data_len, sdo_hdr.count));
+
+		if (tot_len > NLMSG_DEFAULT_SIZE) {
+			WL_ERR(("size(%u)  > %lu not supported \n", tot_len, NLMSG_DEFAULT_SIZE));
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		if (wl_genl_send_msg(ndev, event, data_ptr,
+			tot_len, (u8 *)&sdo_hdr, sizeof(sdo_event_t)) < 0)
+			WL_ERR(("Couldn't send up the NETLINK Event \n"));
+		else
+			WL_SD(("GAS event sent up \n"));
+	} else {
+		WL_ERR(("Unsupported Event: %d \n", event));
+	}
+
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return ret;
+}
+
+s32 wl_cfg80211_DsdOffloadParseProto(char* proto_str, u8* proto)
+{
+	s32 len = -1;
+	int i = 0;
+
+	for (i = 0; i < MAX_SDO_PROTO; i++) {
+		if (strncmp(proto_str, wl_sdo_protos[i].str, strlen(wl_sdo_protos[i].str)) == 0) {
+			WL_SD(("Matching proto (%d) found \n", wl_sdo_protos[i].val));
+			*proto = wl_sdo_protos[i].val;
+			len = strlen(wl_sdo_protos[i].str);
+			break;
+		}
+	}
+	return len;
+}
+
+/*
+ * register to search for a UPnP service
+ * ./DRIVER P2P_SD_REQ upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1
+ *
+ * Enable discovery
+ * ./cfg p2po_find
+*/
+#define UPNP_QUERY_VER_OFFSET 3
+s32 wl_sd_handle_sd_req(
+	struct net_device *dev,
+	u8 * buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = 0;
+	wl_sd_qr_t *sdreq;
+	u8 proto = 0;
+	s32 ret = 0;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	u32 tot_len = len + sizeof(wl_sd_qr_t);
+	u16 version = 0;
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("find_idx failed\n"));
+		return -EINVAL;
+	}
+	/* Check for the least arg length expected */
+	if (!buf || (len < strlen("all"))) {
+		WL_ERR(("Wrong Arg\n"));
+		return -EINVAL;
+	}
+
+	if (tot_len > WLC_IOCTL_MAXLEN) {
+		WL_ERR(("Length > %lu not supported \n", MAX_QR_LEN));
+		return -EINVAL;
+	}
+
+	sdreq = kzalloc(tot_len, kflags);
+	if (!sdreq) {
+		WL_ERR(("malloc failed\n"));
+		return -ENOMEM;
+	}
+
+	WL_SD(("%s Len: %d\n", buf, len));
+	if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+		WL_ERR(("Unknown proto \n"));
+		goto exit;
+	}
+
+	sdreq->protocol = proto;
+	buf += ret;
+	buf++; /* skip the space */
+	sdreq->transaction_id = simple_strtoul(buf, NULL, 16);
+	WL_SD(("transaction_id:%d\n", sdreq->transaction_id));
+	buf += sizeof(sdreq->transaction_id);
+
+	if (*buf == '\0') {
+		WL_SD(("No Query present. Proto:%d \n", proto));
+		sdreq->query_len = 0;
+	} else {
+		buf++; /* skip the space */
+		/* UPNP version needs to put as binary val */
+		if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+			/* Extract UPNP version */
+			version = simple_strtoul(buf, NULL, 16);
+			buf = buf + UPNP_QUERY_VER_OFFSET;
+			buf[0] = version;
+			WL_SD(("Upnp version: 0x%x \n", version));
+		}
+
+		len = strlen(buf);
+		WL_SD(("Len after stripping proto: %d Query: %s\n", len, buf));
+		/* copy the query part */
+		memcpy(sdreq->qrbuf, buf, len);
+		sdreq->query_len = len;
+	}
+
+	/* Enable discovery */
+	if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+		WL_ERR(("cfgp2p_enable discovery failed"));
+		goto exit;
+	}
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_req_resp", (void*)sdreq,
+		tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("Find SVC Failed \n"));
+		goto exit;
+	}
+
+	cfg->sdo->sd_state |= WL_SD_SEARCH_SVC;
+
+exit:
+	kfree(sdreq);
+	return ret;
+}
+
+s32 wl_sd_handle_sd_cancel_req(
+	struct net_device *dev,
+	u8 *buf)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx =  wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+
+	if (wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_cancel", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		bssidx, &cfg->ioctl_buf_sync) < 0) {
+		WL_ERR(("Cancel SD Failed \n"));
+		return -EINVAL;
+	}
+
+	cfg->sdo->sd_state &= ~WL_SD_SEARCH_SVC;
+
+	return 0;
+}
+
+/*
+ * register a UPnP service to be discovered
+ * ./cfg P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu
+ * id:6859dede-8574-59ab-9332-123456789012::urn:schemas-upnporg:device:InternetGate
+ * wayDevice:1
+*/
+s32 wl_sd_handle_sd_add_svc(
+	struct net_device *dev,
+	u8 * buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = 0;
+	wl_sd_qr_t *sdreq;
+	u8 proto = 0;
+	u16 version = 0;
+	s32 ret = 0;
+	u8 *resp = NULL;
+	u8 *query = NULL;
+	u32 tot_len = len + sizeof(wl_sd_qr_t);
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	if (!buf || !len)
+		return -EINVAL;
+
+	WL_SD(("%s Len: %d\n", buf, len));
+	if (tot_len > WLC_IOCTL_MAXLEN) {
+		WL_ERR(("Query-Resp length > %d not supported \n", WLC_IOCTL_MAXLEN));
+		return -ENOMEM;
+	}
+
+	sdreq = kzalloc(tot_len, kflags);
+	if (!sdreq) {
+		WL_ERR(("malloc failed\n"));
+		return -ENOMEM;
+	}
+
+	if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+		WL_ERR(("Unknown Proto \n"));
+		goto exit;
+	}
+
+	sdreq->protocol = proto;
+	buf += ret;
+
+	if (*buf == '\0') {
+		WL_ERR(("No Query Resp pair present \n"));
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	buf++; /* Skip the space */
+	len = strlen(buf);
+	query = strsep((char **)&buf, " ");
+	if (!query || !buf) {
+		WL_ERR(("No Query RESP Present\n"));
+		ret = -EINVAL;
+		goto exit;
+	}
+	resp = buf;
+
+	if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+		/* Extract UPNP version */
+		version = simple_strtoul(query, NULL, 16);
+		query = query + UPNP_QUERY_VER_OFFSET;
+		resp = resp + UPNP_QUERY_VER_OFFSET;
+		query[0] = version;
+		resp[0] = version;
+		WL_SD(("Upnp version: 0x%x \n", version));
+	}
+
+	sdreq->query_len = strlen(query);
+	sdreq->response_len = strlen(buf);
+	WL_SD(("query:%s len:%u \n", query, sdreq->query_len));
+	WL_SD(("resp:%s len:%u \n", buf, sdreq->response_len));
+
+	memcpy(sdreq->qrbuf, query, sdreq->query_len);
+	memcpy((sdreq->qrbuf + sdreq->query_len), resp, sdreq->response_len);
+
+	/* Enable discovery */
+	if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+		WL_ERR(("cfgp2p_enable discovery failed"));
+		goto exit;
+	}
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_addsvc", (void*)sdreq,
+		tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("FW Failed in doing p2po_addsvc. RET:%d \n", ret));
+		goto exit;
+	}
+
+	cfg->sdo->sd_state |= WL_SD_ADV_SVC;
+
+exit:
+	kfree(sdreq);
+	return ret;
+}
+
+s32 wl_sd_handle_sd_del_svc(
+	struct net_device *dev,
+	u8 * buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = 0;
+	wl_sd_qr_t *sdreq;
+	u8 proto = 0;
+	s32 ret = 0;
+	u32 tot_len = len + sizeof(wl_sd_qr_t);
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	u16 version = 0;
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("find_idx failed\n"));
+		return -EINVAL;
+	}
+
+	sdreq = (wl_sd_qr_t *)kzalloc(tot_len, kflags);
+	if (!sdreq) {
+		WL_ERR(("malloc failed\n"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* Check for the least arg length expected */
+	if (buf && len >= strlen("all")) {
+		WL_DBG(("%s Len: %d\n", buf, len));
+		if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
+			WL_ERR(("Unknown Proto \n"));
+			goto exit;
+		}
+		sdreq->protocol = proto;
+		buf += ret;
+
+		if (*buf == ' ') {
+			/* Query present */
+			buf++; /* Skip the space */
+			/* UPNP version needs to put as binary val */
+			if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
+				/* Extract UPNP version */
+				version = simple_strtoul(buf, NULL, 16);
+				buf = buf + UPNP_QUERY_VER_OFFSET;
+				buf[0] = version;
+				WL_SD(("Upnp version: 0x%x \n", version));
+			}
+			memcpy(sdreq->qrbuf, buf, strlen(buf));
+			sdreq->query_len = strlen(buf);
+			WL_SD(("Query to be deleted:%s len:%d\n", buf, sdreq->query_len));
+		}
+	} else {
+		/* ALL */
+		proto = 0;
+	}
+
+	sdreq->protocol = proto;
+	WL_SD(("Proto: %d \n", proto));
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_delsvc", (void*)sdreq,
+		tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("FW Failed in doing sd_delsvc. ret=%d \n", ret));
+		goto exit;
+	}
+
+	cfg->sdo->sd_state &= ~WL_SD_ADV_SVC;
+
+exit:
+	if (sdreq)
+		kfree(sdreq);
+
+	return ret;
+}
+
+s32 wl_sd_handle_sd_stop_discovery(
+	struct net_device *dev,
+	u8 * buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	int ret = 0;
+	int sdo_pause = 0;
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", (void*)&sdo_pause,
+		sizeof(sdo_pause), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_stop Failed :%d\n", ret));
+		return -1;
+	}
+
+	if (wldev_iovar_setint(dev, "mpc", 1) < 0) {
+		/* Setting of MPC failed */
+		WL_ERR(("mpc enabling back failed\n"));
+		return -1;
+	}
+
+	/* clear the states */
+	cfg->sdo->dd_state = WL_DD_STATE_IDLE;
+	wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+
+	bzero(&cfg->sdo->sd_listen, sizeof(wl_sd_listen_t));
+
+	/* Remove ESCAN from waking up the host if ofind/olisten is enabled */
+	wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+	return ret;
+}
+
+s32 wl_sd_handle_sd_find(
+	struct net_device *dev,
+	u8 * buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	int ret = 0;
+	s32 disc_bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	vndr_ie_setbuf_t *ie_setbuf;
+	vndr_ie_t *vndrie;
+	vndr_ie_buf_t *vndriebuf;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	int tot_len = 0;
+	uint channel = 0;
+
+	u8 p2pie_buf[] = {
+				0x09, 0x02, 0x02, 0x00, 0x27, 0x0c, 0x06, 0x05, 0x00,
+				0x55, 0x53, 0x04, 0x51, 0x0b, 0x11, 0x05, 0x00, 0x55,
+				0x53, 0x04, 0x51, 0x0b
+			  };
+
+	/* Enable discovery */
+	if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+		WL_ERR(("cfgp2p_enable discovery failed"));
+		return -1;
+	}
+
+	if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
+		buf += strlen("chan=");
+		channel = simple_strtol(buf, NULL, 10);
+		WL_SD(("listen_chan to be set:%d\n", channel));
+		if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+			sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, &cfg->ioctl_buf_sync)) < 0) {
+				WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+				return -1;
+		}
+	}
+
+	tot_len = sizeof(vndr_ie_setbuf_t) + sizeof(p2pie_buf);
+	ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+	if (!ie_setbuf) {
+		WL_ERR(("IE memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* Apply the p2p_ie for p2po_find */
+	strcpy(ie_setbuf->cmd, "add");
+
+	vndriebuf = &ie_setbuf->vndr_ie_buffer;
+	vndriebuf->iecount = htod32(1);
+	vndriebuf->vndr_ie_list[0].pktflag =  htod32(16);
+
+	vndrie =  &vndriebuf->vndr_ie_list[0].vndr_ie_data;
+
+	vndrie->id = (uchar) DOT11_MNG_PROPR_ID;
+	vndrie->len = sizeof(p2pie_buf);
+	memcpy(vndrie->oui, WFA_OUI, WFA_OUI_LEN);
+	memcpy(vndrie->data, p2pie_buf, sizeof(p2pie_buf));
+
+	/* Remove ESCAN from waking up the host if SDO is enabled */
+	wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+	if (wldev_iovar_setbuf_bsscfg(dev, "ie", (void*)ie_setbuf,
+		tot_len, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		disc_bssidx, &cfg->ioctl_buf_sync) < 0) {
+		WL_ERR(("p2p add_ie failed \n"));
+		ret = -EINVAL;
+		goto exit;
+	} else
+		WL_SD(("p2p add_ie applied successfully len:%d \n", tot_len));
+
+	if (wldev_iovar_setint(dev, "mpc", 0) < 0) {
+		/* Setting of MPC failed */
+		WL_ERR(("mpc disabling faild\n"));
+		ret = -1;
+		goto exit;
+	}
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_find Failed :%d\n", ret));
+		ret = -1;
+		goto exit;
+	}
+
+	/* set the states */
+	cfg->sdo->dd_state = WL_DD_STATE_SEARCH;
+	wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+
+exit:
+	if (ie_setbuf)
+		kfree(ie_setbuf);
+
+	/* Incase of failure enable back the ESCAN event */
+	if (ret)
+		wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+
+	return ret;
+}
+
+s32 wl_sd_handle_sd_listen(
+	struct net_device *dev,
+	u8 *buf,
+	int len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	wl_sd_listen_t sd_listen;
+	int ret = 0;
+	u8 * ptr = NULL;
+	uint channel = 0;
+
+	/* Just in case if it is not enabled */
+	if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+		WL_ERR(("cfgp2p_enable discovery failed"));
+		return -1;
+	}
+
+	if (wldev_iovar_setint(dev, "mpc", 0) < 0) {
+		/* Setting of MPC failed */
+		WL_ERR(("mpc disabling faild\n"));
+		return -1;
+	}
+
+	bzero(&sd_listen, sizeof(wl_sd_listen_t));
+
+	if (len) {
+		ptr = strsep((char **)&buf, " ");
+		if (ptr == NULL) {
+			/* period and duration given wrongly */
+			WL_ERR(("Arguments in wrong format \n"));
+			return -EINVAL;
+		}
+		else if (strncmp(ptr, "chan=", strlen("chan=")) == 0) {
+			sd_listen.interval = 65535;
+			sd_listen.period = 65535;
+			ptr += strlen("chan=");
+			channel = simple_strtol(ptr, NULL, 10);
+		}
+		else {
+			sd_listen.period = simple_strtol(ptr, NULL, 10);
+			ptr = strsep((char **)&buf, " ");
+			if (ptr == NULL) {
+				WL_ERR(("Arguments in wrong format \n"));
+				return -EINVAL;
+			}
+			sd_listen.interval = simple_strtol(ptr, NULL, 10);
+			if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
+				buf += strlen("chan=");
+				channel = simple_strtol(buf, NULL, 10);
+			}
+		}
+		WL_SD(("listen_period:%d, listen_interval:%d and listen_channel:%d\n",
+			sd_listen.period, sd_listen.interval, channel));
+	}
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+		sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+			WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+			return -1;
+	}
+
+	WL_SD(("p2po_listen period:%d  interval:%d \n",
+		sd_listen.period, sd_listen.interval));
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
+		sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_listen Failed :%d\n", ret));
+		return -1;
+	}
+
+	/* Remove ESCAN from waking up the host if ofind/olisten is enabled */
+	wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+
+	/* Store the extended listen values for use in sdo_resume */
+	cfg->sdo->sd_listen.interval = sd_listen.interval;
+	cfg->sdo->sd_listen.period = sd_listen.period;
+
+	/* set the states */
+	cfg->sdo->dd_state = WL_DD_STATE_LISTEN;
+	wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+
+	return 0;
+}
+
+s32 wl_cfg80211_sd_offload(struct net_device *dev, char *cmd, char* buf, int len)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_SD(("Entry cmd:%s arg_len:%d \n", cmd, len));
+
+	if (!cfg->sdo) {
+		WL_SD(("Initializing SDO \n"));
+		if ((ret = wl_cfg80211_sdo_init(cfg)) < 0)
+			goto exit;
+	}
+
+	if (strncmp(cmd, "P2P_SD_REQ", strlen("P2P_SD_REQ")) == 0) {
+		ret = wl_sd_handle_sd_req(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_SD_CANCEL_REQ", strlen("P2P_SD_CANCEL_REQ")) == 0) {
+		ret = wl_sd_handle_sd_cancel_req(dev, buf);
+	} else if (strncmp(cmd, "P2P_SD_SVC_ADD", strlen("P2P_SD_SVC_ADD")) == 0) {
+		ret = wl_sd_handle_sd_add_svc(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_SD_SVC_DEL", strlen("P2P_SD_SVC_DEL")) == 0) {
+		ret = wl_sd_handle_sd_del_svc(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_SD_FIND", strlen("P2P_SD_FIND")) == 0) {
+		ret = wl_sd_handle_sd_find(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_SD_LISTEN", strlen("P2P_SD_LISTEN")) == 0) {
+		ret = wl_sd_handle_sd_listen(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_SD_STOP", strlen("P2P_STOP")) == 0) {
+		ret = wl_sd_handle_sd_stop_discovery(dev, buf, len);
+	} else {
+		WL_ERR(("Request for Unsupported CMD:%s \n", buf));
+		ret = -EINVAL;
+	}
+
+exit:
+	return ret;
+}
+#endif /* WL_SDO */
+
+#ifdef WLTDLS
+static s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data) {
+
+	struct net_device *ndev = NULL;
+	u32 reason = ntoh32(e->reason);
+	s8 *msg = NULL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	switch (reason) {
+	case WLC_E_TDLS_PEER_DISCOVERED :
+		msg = " TDLS PEER DISCOVERD ";
+		break;
+	case WLC_E_TDLS_PEER_CONNECTED :
+#ifdef PCIE_FULL_DONGLE
+		dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+		msg = " TDLS PEER CONNECTED ";
+		break;
+	case WLC_E_TDLS_PEER_DISCONNECTED :
+#ifdef PCIE_FULL_DONGLE
+		dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+		msg = "TDLS PEER DISCONNECTED ";
+		break;
+	}
+	if (msg) {
+		WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
+			(bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+	}
+	return 0;
+
+}
+#endif  /* WLTDLS */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper)
+{
+	s32 ret = 0;
+#ifdef WLTDLS
+	struct bcm_cfg80211 *cfg;
+	tdls_iovar_t info;
+	cfg = g_bcm_cfg;
+	memset(&info, 0, sizeof(tdls_iovar_t));
+	if (peer)
+		memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+	switch (oper) {
+	case NL80211_TDLS_DISCOVERY_REQ:
+		/* turn on TDLS */
+		ret = dhd_tdls_enable(dev, true, false, NULL);
+		if (ret < 0)
+			return ret;
+		info.mode = TDLS_MANUAL_EP_DISCOVERY;
+		break;
+	case NL80211_TDLS_SETUP:
+		/* auto mode on */
+		ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	case NL80211_TDLS_TEARDOWN:
+		info.mode = TDLS_MANUAL_EP_DELETE;
+		/* auto mode off */
+		ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		WL_ERR(("Unsupported operation : %d\n", oper));
+		goto out;
+	}
+	if (info.mode) {
+		ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret) {
+			WL_ERR(("tdls_endpoint error %d\n", ret));
+		}
+	}
+out:
+#endif /* WLTDLS */
+	return ret;
+}
+#endif /* LINUX_VERSION > VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type)
+{
+	struct bcm_cfg80211 *cfg;
+	struct net_device *ndev = NULL;
+	struct ether_addr primary_mac;
+	s32 ret = 0;
+	s32 bssidx = 0;
+	s32 pktflag = 0;
+	cfg = g_bcm_cfg;
+
+	if (wl_get_drv_status(cfg, AP_CREATING, net)) {
+		/* Vendor IEs should be set to FW
+		 * after SoftAP interface is brought up
+		 */
+		goto exit;
+	} else if (wl_get_drv_status(cfg, AP_CREATED, net)) {
+		ndev = net;
+		bssidx = 0;
+	} else if (cfg->p2p) {
+		net = ndev_to_wlc_ndev(net, cfg);
+		if (!cfg->p2p->on) {
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr,
+				&cfg->p2p->int_addr);
+			/* In case of p2p_listen command, supplicant send remain_on_channel
+			* without turning on P2P
+			*/
+
+			p2p_on(cfg) = true;
+			ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0);
+
+			if (unlikely(ret)) {
+				goto exit;
+			}
+		}
+		if (net  != bcmcfg_to_prmry_ndev(cfg)) {
+			if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION);
+			}
+		} else {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		}
+	}
+	if (ndev != NULL) {
+		switch (type) {
+			case WL_BEACON:
+				pktflag = VNDR_IE_BEACON_FLAG;
+				break;
+			case WL_PROBE_RESP:
+				pktflag = VNDR_IE_PRBRSP_FLAG;
+				break;
+			case WL_ASSOC_RESP:
+				pktflag = VNDR_IE_ASSOCRSP_FLAG;
+				break;
+		}
+		if (pktflag)
+			ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len);
+	}
+exit:
+	return ret;
+}
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Disable mpc, to avoid automatic interface down. */
+	val = 0;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Set interface up, explicitly. */
+	val = 1;
+
+	ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+	if (ret < 0) {
+		WL_ERR(("set interface up failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Stop all scan explicitly, till auto channel selection complete. */
+	wl_set_drv_status(cfg, SCANNING, ndev);
+	if (cfg->escan_info.ndev == NULL) {
+		ret = BCME_OK;
+		goto done;
+	}
+	ret = wl_notify_escan_complete(cfg, ndev, true, true);
+	if (ret < 0) {
+		WL_ERR(("set scan abort failed, error = %d\n", ret));
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static bool
+wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec)
+{
+	bool valid = false;
+	char chanbuf[CHANSPEC_STR_LEN];
+
+	/* channel 1 to 14 */
+	if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
+		valid = true;
+	}
+	/* channel 36 to 48 */
+	else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) {
+		valid = true;
+	}
+	/* channel 149 to 161 */
+	else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) {
+		valid = true;
+	}
+	else {
+		valid = false;
+		WL_INFORM(("invalid P2P chanspec, chanspec = %s\n",
+			wf_chspec_ntoa_ex(chanspec, chanbuf)));
+	}
+
+	return valid;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	u32 channel = 0;
+	s32 ret = BCME_ERROR;
+	s32 i = 0;
+	s32 j = 0;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 5GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Skip DFS and inavlid P2P channel. */
+	for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+		chanspec = (chanspec_t) dtoh32(list->element[i]);
+		channel = CHSPEC_CHANNEL(chanspec);
+
+		ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+		if (ret < 0) {
+			WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_RADAR(channel) ||
+			!(wl_cfg80211_valid_chanspec_p2p(chanspec))) {
+			continue;
+		} else {
+			list->element[j] = list->element[i];
+		}
+
+		j++;
+	}
+
+	list->count = j;
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+	int *channel)
+{
+	s32 ret = BCME_ERROR;
+	int chosen = 0;
+	int retry = 0;
+
+	/* Start auto channel selection scan. */
+	ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
+	if (ret < 0) {
+		WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+		*channel = 0;
+		goto done;
+	}
+
+	/* Wait for auto channel selection, worst case possible delay is 5250ms. */
+	retry = CHAN_SEL_RETRY_COUNT;
+
+	while (retry--) {
+		OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+
+		ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
+			false);
+		if ((ret == 0) && (dtoh32(chosen) != 0)) {
+			*channel = (u16)(chosen & 0x00FF);
+			WL_INFORM(("selected channel = %d\n", *channel));
+			break;
+		}
+		WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
+			(CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, auto channel selection timed out\n"));
+		*channel = 0;
+		ret = BCME_ERROR;
+	}
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Clear scan stop driver status. */
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	/* Enable mpc back to 1, irrespective of initial state. */
+	val = 1;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+{
+	int channel = 0;
+	s32 ret = BCME_ERROR;
+	u8 *buf = NULL;
+	char *pos = cmd;
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+
+	memset(cmd, 0, total_len);
+
+	buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		WL_ERR(("failed to allocate chanspec buffer\n"));
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always use primary interface, irrespective of interface on which
+	 * command came.
+	 */
+	cfg = g_bcm_cfg;
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/*
+	 * Make sure that FW and driver are in right state to do auto channel
+	 * selection scan.
+	 */
+	ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Best channel selection in 2.4GHz band. */
+	ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_2G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	} else {
+		WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Best channel selection in 5GHz band. */
+	ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_5G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+	} else {
+		WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Set overall best channel same as 5GHz best channel. */
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+done:
+	if (NULL != buf) {
+		kfree(buf);
+	}
+
+	/* Restore FW and driver back to normal state. */
+	ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+	}
+
+	return (pos - cmd);
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+static const struct rfkill_ops wl_rfkill_ops = {
+	.set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	WL_DBG(("Enter \n"));
+	WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+	if (!cfg)
+		return -EINVAL;
+
+	cfg->rf_blocked = blocked;
+
+	return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg)
+		return -EINVAL;
+	if (setup) {
+		cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+			wl_cfg80211_get_parent_dev(),
+			RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		err = rfkill_register(cfg->rfkill);
+
+		if (err)
+			rfkill_destroy(cfg->rfkill);
+	} else {
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		rfkill_unregister(cfg->rfkill);
+		rfkill_destroy(cfg->rfkill);
+	}
+
+err_out:
+	return err;
+}
+
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+	size_t count, loff_t *ppos)
+{
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+	char *params, *token, *colon;
+	uint i, tokens, log_on = 0;
+	memset(tbuf, 0, sizeof(tbuf));
+	memset(sublog, 0, sizeof(sublog));
+	if (copy_from_user(&tbuf, userbuf, min_t(size_t, sizeof(tbuf), count)))
+		return -EFAULT;
+
+	params = &tbuf[0];
+	colon = strchr(params, '\n');
+	if (colon != NULL)
+		*colon = '\0';
+	while ((token = strsep(&params, " ")) != NULL) {
+		memset(sublog, 0, sizeof(sublog));
+		if (token == NULL || !*token)
+			break;
+		if (*token == '\0')
+			continue;
+		colon = strchr(token, ':');
+		if (colon != NULL) {
+			*colon = ' ';
+		}
+		tokens = sscanf(token, "%s %u", sublog, &log_on);
+		if (colon != NULL)
+			*colon = ':';
+
+		if (tokens == 2) {
+				for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+					if (!strncmp(sublog, sublogname_map[i].sublogname,
+						strlen(sublogname_map[i].sublogname))) {
+						if (log_on)
+							wl_dbg_level |=
+							(sublogname_map[i].log_level);
+						else
+							wl_dbg_level &=
+							~(sublogname_map[i].log_level);
+					}
+				}
+		} else
+			WL_ERR(("%s: can't parse '%s' as a "
+			       "SUBMODULE:LEVEL (%d tokens)\n",
+			       tbuf, token, tokens));
+
+
+	}
+	return count;
+}
+
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	char *param;
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+	uint i;
+	memset(tbuf, 0, sizeof(tbuf));
+	param = &tbuf[0];
+	for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+		param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+			sublogname_map[i].sublogname,
+			(wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+	}
+	*param = '\n';
+	return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+
+}
+static const struct file_operations fops_debuglevel = {
+	.open = NULL,
+	.write = wl_debuglevel_write,
+	.read = wl_debuglevel_read,
+	.owner = THIS_MODULE,
+	.llseek = NULL,
+};
+
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct dentry *_dentry;
+	if (!cfg)
+		return -EINVAL;
+	cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+		if (cfg->debugfs == ERR_PTR(-ENODEV))
+			WL_ERR(("Debugfs is not enabled on this kernel\n"));
+		else
+			WL_ERR(("Can not create debugfs directory\n"));
+		cfg->debugfs = NULL;
+		goto exit;
+
+	}
+	_dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+		cfg->debugfs, cfg, &fops_debuglevel);
+	if (!_dentry || IS_ERR(_dentry)) {
+		WL_ERR(("failed to create debug_level debug file\n"));
+		wl_free_debugfs(cfg);
+	}
+exit:
+	return err;
+}
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+{
+	if (!cfg)
+		return -EINVAL;
+	if (cfg->debugfs)
+		debugfs_remove_recursive(cfg->debugfs);
+	cfg->debugfs = NULL;
+	return 0;
+}
+#endif /* DEBUGFS_CFG80211 */
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+	return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+	cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+	cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (((dev_role == NL80211_IFTYPE_AP) &&
+		!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+		((dev_role == NL80211_IFTYPE_P2P_GO) &&
+		!(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+	{
+		WL_ERR(("device role select failed\n"));
+		return false;
+	}
+	return true;
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+	if (!cfg || !cfg->wdev)
+		return -EINVAL;
+
+	if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+		return -1;
+
+	return 0;
+}
+
+void wl_cfg80211_enable_trace(bool set, u32 level)
+{
+	if (set)
+		wl_dbg_level = level & WL_DBG_LEVEL;
+	else
+		wl_dbg_level |= (WL_DBG_LEVEL & level);
+}
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	/* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+	 * is passed with CMD_FRAME. This callback is supposed to cancel
+	 * the OFFCHANNEL Wait. Since we are already taking care of that
+	 *  with the tx_mgmt logic, do nothing here.
+	 */
+
+	return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) {
+			return (bcm_tlv_t *)ie;
+	}
+	return NULL;
+}
+
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len)
+{
+	s32 err = BCME_OK;
+	s32 buf_len;
+	s32 iecount;
+	ie_setbuf_t *ie_setbuf;
+
+	if (ie_id != DOT11_MNG_INTERWORKING_ID)
+		return BCME_UNSUPPORTED;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+	            VNDR_IE_CUSTOM_FLAG))) {
+		WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+	pktflag = htod32(pktflag);
+
+	buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+	ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+	if (!ie_setbuf) {
+		WL_ERR(("Error allocating buffer for IE\n"));
+		return -ENOMEM;
+	}
+
+	if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
+		WL_ERR(("Previous IW IE is equals to current IE\n"));
+		err = BCME_OK;
+		goto exit;
+	}
+
+	strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int));
+	memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32));
+
+	/* Now, add the IE to the buffer */
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
+
+	/* if already set with previous values, delete it first */
+	if (cfg->iw_ie_len != 0) {
+		WL_DBG(("Different IW_IE was already set. clear first\n"));
+
+		ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
+
+		err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (err != BCME_OK)
+			goto exit;
+	}
+
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+	memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+
+	err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (err == BCME_OK) {
+		memcpy(cfg->iw_ie, data, data_len);
+		cfg->iw_ie_len = data_len;
+		cfg->wl11u = TRUE;
+
+		err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+	}
+
+exit:
+	if (ie_setbuf)
+		kfree(ie_setbuf);
+	return err;
+}
+#endif /* WL11U */
+
+#ifdef WL_HOST_BAND_MGMT
+s32
+wl_cfg80211_set_band(struct net_device *ndev, int band)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	int ret = 0;
+	char ioctl_buf[50];
+
+	if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
+		WL_ERR(("Invalid band\n"));
+		return -EINVAL;
+	}
+
+	if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band,
+		sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+		WL_ERR(("seting roam_band failed code=%d\n", ret));
+		return ret;
+	}
+
+	WL_DBG(("Setting band to %d\n", band));
+	cfg->curr_band = band;
+
+	return 0;
+}
+#endif /* WL_HOST_BAND_MGMT */
+
+#if defined(DHCP_SCAN_SUPPRESS)
+static void wl_cfg80211_scan_supp_timerfunc(ulong data)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	WL_DBG(("Enter \n"));
+	schedule_work(&cfg->wlan_work);
+}
+
+int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress)
+{
+	int ret = 0;
+	struct wireless_dev *wdev;
+	struct bcm_cfg80211 *cfg;
+	if (!dev || ((suppress != 0) && (suppress != 1))) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	wdev = ndev_to_wdev(dev);
+	if (!wdev) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+	if (!cfg) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (suppress == cfg->scan_suppressed) {
+		WL_DBG(("No change in scan_suppress state. Ignoring cmd..\n"));
+		return 0;
+	}
+
+	if (timer_pending(&cfg->scan_supp_timer))
+		del_timer_sync(&cfg->scan_supp_timer);
+
+	if ((ret = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+		&suppress, sizeof(int), true)) < 0) {
+		WL_ERR(("Scan suppress setting failed ret:%d \n", ret));
+	} else {
+		WL_DBG(("Scan suppress %s \n", suppress ? "Enabled" : "Disabled"));
+		cfg->scan_suppressed = suppress;
+	}
+
+	/* If scan_suppress is set, Start a timer to monitor it (just incase) */
+	if (cfg->scan_suppressed) {
+		if (ret) {
+			WL_ERR(("Retry scan_suppress reset at a later time \n"));
+			mod_timer(&cfg->scan_supp_timer,
+				jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_RETRY));
+		} else {
+			WL_DBG(("Start wlan_timer to clear of scan_suppress \n"));
+			mod_timer(&cfg->scan_supp_timer,
+				jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_TIMEOUT));
+		}
+	}
+exit:
+	return ret;
+}
+#endif /* DHCP_SCAN_SUPPRESS */
+
+int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+	unsigned long flags;
+	int clear_flag = 0;
+	int ret = 0;
+
+	WL_TRACE(("Enter\n"));
+
+	cfg = g_bcm_cfg;
+	if (!cfg)
+		return -EINVAL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) {
+#else
+	if (cfg->scan_request && cfg->scan_request->dev == cfgdev) {
+#endif
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+		clear_flag = 1;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (clear_flag)
+		wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	return ret;
+}
+
+bool wl_cfg80211_is_vsdb_mode(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->vsdb_mode);
+}
+
+void* wl_cfg80211_get_dhdp()
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->p2p);
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_info *iter, *next;
+	s32 err = BCME_OK;
+	s32 pm = PM_FAST;
+
+	cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work);
+	WL_DBG(("Enter \n"));
+	if (cfg->pm_enable_work_on) {
+		cfg->pm_enable_work_on = false;
+		for_each_ndev(cfg, iter, next) {
+			if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+				(wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS))
+				continue;
+			if (iter->ndev) {
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
+					&pm, sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+				} else
+					wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+	}
+#if defined(DHCP_SCAN_SUPPRESS)
+	else if (cfg->scan_suppressed) {
+		/* There is pending scan_suppress. Clean it */
+		WL_ERR(("Clean up from timer after %d msec\n", WL_SCAN_SUPPRESS_TIMEOUT));
+		wl_cfg80211_scan_suppress(bcmcfg_to_prmry_ndev(cfg), 0);
+	}
+#endif /* DHCP_SCAN_SUPPRESS */
+}
+
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+	u8 category;
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	category = ptr[DOT11_ACTION_CAT_OFF];
+	WL_INFORM(("Action Category: %d\n", category));
+	return category;
+}
+
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL || ret_action == NULL)
+		return BCME_ERROR;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return BCME_ERROR;
+	if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+		return BCME_ERROR;
+	*ret_action = ptr[DOT11_ACTION_ACT_OFF];
+	WL_INFORM(("Public Action : %d\n", *ret_action));
+	return BCME_OK;
+}
+
+#ifdef WLFBT
+void
+wl_cfg80211_get_fbt_key(uint8 *key)
+{
+	memcpy(key, g_bcm_cfg->fbt_key, FBT_KEYLEN);
+}
+#endif /* WLFBT */
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid)
+{
+	s32 err;
+	wl_event_msg_t e;
+
+	bzero(&e, sizeof(e));
+	e.event_type = cpu_to_be32(WLC_E_ROAM);
+	memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+	/* trigger the roam event handler */
+	err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+
+	return err;
+}
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+
+	/* get the MAC filter mode */
+	if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+		macmode = MACLIST_MODE_ALLOW;
+	} else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+	acl->n_acl_entries) {
+		macmode = MACLIST_MODE_DENY;
+	}
+
+	/* if acl == NULL, macmode is still disabled.. */
+	if (macmode == MACLIST_MODE_DISABLED) {
+		if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+			WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+		return ret;
+	}
+
+	macnum = acl->n_acl_entries;
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		WL_ERR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	for (i = 0; i < macnum; i++) {
+		memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+		WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return ret;
+}
+#endif /* WL_CFG80211_ACL */
+
+#ifdef WL_NAN
+int
+wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len)
+{
+	return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len);
+}
+#endif /* WL_NAN */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 0000000000000000000000000000000000000000..692b33a31d20443a9dcce248040070341084af67
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,1046 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg80211.h 491407 2014-07-16 09:23:04Z $
+ */
+
+/**
+ * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface.
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct bcm_cfg80211;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define WL_DBG_NONE	0
+#define WL_DBG_P2P_ACTION (1 << 5)
+#define WL_DBG_TRACE	(1 << 4)
+#define WL_DBG_SCAN 	(1 << 3)
+#define WL_DBG_DBG 	(1 << 2)
+#define WL_DBG_INFO	(1 << 1)
+#define WL_DBG_ERR	(1 << 0)
+
+/* 0 invalidates all debug messages.  default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define CFG80211_ERROR_TEXT		"CFG80211-ERROR) "
+
+#if defined(DHD_DEBUG)
+#define	WL_ERR(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printf(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printf args;						\
+		}								\
+} while (0)
+#else /* defined(DHD_DEBUG) */
+#define	WL_ERR(args)									\
+do {										\
+	if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) {				\
+			printf(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printf args;						\
+		}								\
+} while (0)
+#endif /* defined(DHD_DEBUG) */
+
+#ifdef WL_INFORM
+#undef WL_INFORM
+#endif
+
+#define	WL_INFORM(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_INFO) {				\
+			printf(KERN_INFO "CFG80211-INFO) %s : ", __func__);	\
+			printf args;						\
+		}								\
+} while (0)
+
+
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define	WL_SCAN(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_SCAN) {			\
+		printf(KERN_INFO "CFG80211-SCAN) %s :", __func__);	\
+		printf args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define	WL_TRACE(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_TRACE) {			\
+		printf(KERN_INFO "CFG80211-TRACE) %s :", __func__);	\
+		printf args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE_HW4
+#undef WL_TRACE_HW4
+#endif
+#define	WL_TRACE_HW4			WL_TRACE
+#if (WL_DBG_LEVEL > 0)
+#define	WL_DBG(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_DBG) {			\
+		printf(KERN_DEBUG "CFG80211-DEBUG) %s :", __func__);	\
+		printf args;							\
+	}									\
+} while (0)
+#else				/* !(WL_DBG_LEVEL > 0) */
+#define	WL_DBG(args)
+#endif				/* (WL_DBG_LEVEL > 0) */
+#define WL_PNO(x)
+#define WL_SD(x)
+
+
+#define WL_SCAN_RETRY_MAX	3
+#define WL_NUM_PMKIDS_MAX	MAXPMKID
+#define WL_SCAN_BUF_MAX 	(1024 * 8)
+#define WL_TLV_INFO_MAX 	1500
+#define WL_SCAN_IE_LEN_MAX      2048
+#define WL_BSS_INFO_MAX		2048
+#define WL_ASSOC_INFO_MAX	512
+#define WL_IOCTL_LEN_MAX	2048
+#define WL_EXTRA_BUF_MAX	2048
+#define WL_SCAN_ERSULTS_LAST 	(WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX		256
+#define WL_FILE_NAME_MAX	256
+#define WL_DWELL_TIME 		200
+#define WL_MED_DWELL_TIME       400
+#define WL_MIN_DWELL_TIME	100
+#define WL_LONG_DWELL_TIME 	1000
+#define IFACE_MAX_CNT 		2
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 		200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 		20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 	320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 	400
+#define WL_AF_TX_MAX_RETRY 	5
+
+#define WL_AF_SEARCH_TIME_MAX           450
+#define WL_AF_TX_EXTRA_TIME_MAX         200
+
+#define WL_SCAN_TIMER_INTERVAL_MS	10000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 	5
+#define WL_INVALID 		-1
+
+/* Bring down SCB Timeout to 20secs from 60secs default */
+#ifndef WL_SCB_TIMEOUT
+#define WL_SCB_TIMEOUT 20
+#endif
+
+/* SCAN_SUPPRESS timer values in ms */
+#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
+#define WL_SCAN_SUPPRESS_RETRY 3000
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+#ifdef WLAIBSS
+/* Custom AIBSS beacon parameters */
+#define AIBSS_INITIAL_MIN_BCN_DUR	500
+#define AIBSS_MIN_BCN_DUR		5000
+#define AIBSS_BCN_FLOOD_DUR		5000
+#endif /* WLAIBSS */
+
+/* driver status */
+enum wl_status {
+	WL_STATUS_READY = 0,
+	WL_STATUS_SCANNING,
+	WL_STATUS_SCAN_ABORTING,
+	WL_STATUS_CONNECTING,
+	WL_STATUS_CONNECTED,
+	WL_STATUS_DISCONNECTING,
+	WL_STATUS_AP_CREATING,
+	WL_STATUS_AP_CREATED,
+	/* whole sending action frame procedure:
+	 * includes a) 'finding common channel' for public action request frame
+	 * and b) 'sending af via 'actframe' iovar'
+	 */
+	WL_STATUS_SENDING_ACT_FRM,
+	/* find a peer to go to a common channel before sending public action req frame */
+	WL_STATUS_FINDING_COMMON_CHANNEL,
+	/* waiting for next af to sync time of supplicant.
+	 * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN
+	 */
+	WL_STATUS_WAITING_NEXT_ACT_FRM,
+#ifdef WL_CFG80211_SYNC_GON
+	/* go to listen state to wait for next af after SENDING_ACT_FRM */
+	WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN,
+#endif /* WL_CFG80211_SYNC_GON */
+	/* it will be set when upper layer requests listen and succeed in setting listen mode.
+	 * if set, other scan request can abort current listen state
+	 */
+	WL_STATUS_REMAINING_ON_CHANNEL,
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	/* it's fake listen state to keep current scan state.
+	 * it will be set when upper layer requests listen but scan is running. then just run
+	 * a expire timer without actual listen state.
+	 * if set, other scan request does not need to abort scan.
+	 */
+	WL_STATUS_FAKE_REMAINING_ON_CHANNEL
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+};
+
+/* wi-fi mode */
+enum wl_mode {
+	WL_MODE_BSS,
+	WL_MODE_IBSS,
+	WL_MODE_AP
+};
+
+/* driver profile list */
+enum wl_prof_list {
+	WL_PROF_MODE,
+	WL_PROF_SSID,
+	WL_PROF_SEC,
+	WL_PROF_IBSS,
+	WL_PROF_BAND,
+	WL_PROF_CHAN,
+	WL_PROF_BSSID,
+	WL_PROF_ACT,
+	WL_PROF_BEACONINT,
+	WL_PROF_DTIMPERIOD
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+    WL_ESCAN_STATE_IDLE,
+    WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+	WL_FW_LOADING_DONE,
+	WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+	WL_BEACON = 0x1,
+	WL_PROBE_RESP = 0x2,
+	WL_ASSOC_RESP = 0x4
+};
+
+enum wl_handler_del_type {
+	WL_HANDLER_NOTUSE,
+	WL_HANDLER_DEL,
+	WL_HANDLER_MAINTAIN,
+	WL_HANDLER_PEND
+};
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+	__le64 timestamp;
+	__le16 beacon_int;
+	__le16 capab_info;
+	u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+                            const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+	u16 band;
+	u16 channel;
+	s16 rssi;
+	u16 frame_len;
+	u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+	struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+	struct list_head eq_list;
+	u32 etype;
+	wl_event_msg_t emsg;
+	s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+	u32 auth_assoc_res_status;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+	u8 beacon_interval;	/* in millisecond */
+	u8 atim;		/* in millisecond */
+	s8 join_only;
+	u8 band;
+	u8 channel;
+};
+
+/* cfg driver profile */
+struct wl_profile {
+	u32 mode;
+	s32 band;
+	u32 channel;
+	struct wlc_ssid ssid;
+	struct wl_security sec;
+	struct wl_ibss ibss;
+	u8 bssid[ETHER_ADDR_LEN];
+	u16 beacon_interval;
+	u8 dtim_period;
+	bool active;
+};
+
+struct net_info {
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	struct wl_profile profile;
+	s32 mode;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_restore;
+	bool pm_block;
+	s32 pm;
+	struct list_head list; /* list of all net_info structure */
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+	u8 req_ie[MAX_REQ_LINE];
+	s32 req_ie_len;
+	u8 resp_ie[MAX_REQ_LINE];
+	s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+	const struct firmware *fw_entry;
+	unsigned long status;
+	u32 ptr;
+	s8 fw_name[WL_FILE_NAME_MAX];
+	s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+	u32 req_len;
+	u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+	u32 escan_state;
+#if defined(STATIC_WL_PRIV_STRUCT)
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	u8 *escan_buf;
+#else
+	u8 escan_buf[ESCAN_BUF_SIZE];
+#endif /* STATIC_WL_PRIV_STRUCT */
+	struct wiphy *wiphy;
+	struct net_device *ndev;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+	u8   probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   assoc_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 probe_res_ie_len;
+	u32 beacon_ie_len;
+	u32 assoc_res_ie_len;
+	u8 *wpa_ie;
+	u8 *rsn_ie;
+	u8 *wps_ie;
+	bool security_mode;
+};
+
+struct sta_info {
+	/* Structure to hold WPS IE for a STA */
+	u8  probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  assoc_req_ie[VNDR_IES_BUF_LEN];
+	u32 probe_req_ie_len;
+	u32 assoc_req_ie_len;
+};
+
+struct afx_hdl {
+	wl_af_params_t *pending_tx_act_frm;
+	struct ether_addr	tx_dst_addr;
+	struct net_device *dev;
+	struct work_struct work;
+	u32 bssidx;
+	u32 retry;
+	s32 peer_chan;
+	s32 peer_listen_chan; /* search channel: configured by upper layer */
+	s32 my_listen_chan;	/* listen chanel: extract it from prb req or gon req */
+	bool is_listen;
+	bool ack_recv;
+	bool is_active;
+};
+
+struct parsed_ies {
+	wpa_ie_fixed_t *wps_ie;
+	u32 wps_ie_len;
+	wpa_ie_fixed_t *wpa_ie;
+	u32 wpa_ie_len;
+	bcm_tlv_t *wpa2_ie;
+	u32 wpa2_ie_len;
+};
+
+#ifdef WL_SDO
+/* Service discovery */
+typedef struct {
+	uint8	transaction_id; /* Transaction ID */
+	uint8   protocol;       /* Service protocol type */
+	uint16  query_len;      /* Length of query */
+	uint16  response_len;   /* Length of response */
+	uint8   qrbuf[1];
+} wl_sd_qr_t;
+
+typedef struct {
+	uint16	period;                 /* extended listen period */
+	uint16	interval;               /* extended listen interval */
+} wl_sd_listen_t;
+
+#define WL_SD_STATE_IDLE 0x0000
+#define WL_SD_SEARCH_SVC 0x0001
+#define WL_SD_ADV_SVC    0x0002
+
+enum wl_dd_state {
+    WL_DD_STATE_IDLE,
+    WL_DD_STATE_SEARCH,
+    WL_DD_STATE_LISTEN
+};
+
+#define MAX_SDO_PROTO_STR_LEN 20
+typedef struct wl_sdo_proto {
+	char str[MAX_SDO_PROTO_STR_LEN];
+	u32 val;
+} wl_sdo_proto_t;
+
+typedef struct sd_offload {
+	u32 sd_state;
+	enum wl_dd_state dd_state;
+	wl_sd_listen_t sd_listen;
+} sd_offload_t;
+
+typedef struct sdo_event {
+	u8 addr[ETH_ALEN];
+	uint16	freq;        /* channel Freq */
+	uint8	count;       /* Tlv count  */
+	uint16	update_ind;
+} sdo_event_t;
+#endif /* WL_SDO */
+
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 		9
+#endif
+#ifdef WLFBT
+#define FBT_KEYLEN		32
+#endif
+#define MAX_EVENT_BUF_NUM 16
+typedef struct wl_eventmsg_buf {
+    u16 num;
+    struct {
+		u16 type;
+		bool set;
+	} event [MAX_EVENT_BUF_NUM];
+} wl_eventmsg_buf_t;
+
+typedef struct wl_if_event_info {
+	bool valid;
+	int ifidx;
+	int bssidx;
+	uint8 mac[ETHER_ADDR_LEN];
+	char name[IFNAMSIZ+1];
+} wl_if_event_info;
+
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+	struct wireless_dev *wdev;	/* representing cfg cfg80211 device */
+
+	struct wireless_dev *p2p_wdev;	/* representing cfg cfg80211 device for P2P */
+	struct net_device *p2p_net;    /* reference to p2p0 interface */
+
+	struct wl_conf *conf;
+	struct cfg80211_scan_request *scan_request;	/* scan request object */
+	EVENT_HANDLER evt_handler[WLC_E_LAST];
+	struct list_head eq_list;	/* used for event queue */
+	struct list_head net_list;     /* used for struct net_info */
+	spinlock_t eq_lock;	/* for event queue synchronization */
+	spinlock_t cfgdrv_lock;	/* to protect scan status (and others if needed) */
+	struct completion act_frm_scan;
+	struct completion iface_disable;
+	struct completion wait_next_af;
+	struct mutex usr_sync;	/* maily for up/down synchronization */
+	struct wl_scan_results *bss_list;
+	struct wl_scan_results *scan_results;
+
+	/* scan request object for internal purpose */
+	struct wl_scan_req *scan_req_int;
+	/* information element object for internal purpose */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_ie *ie;
+#else
+	struct wl_ie ie;
+#endif
+
+	/* association information container */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_connect_info *conn_info;
+#else
+	struct wl_connect_info conn_info;
+#endif
+#ifdef DEBUGFS_CFG80211
+	struct dentry		*debugfs;
+#endif /* DEBUGFS_CFG80211 */
+	struct wl_pmk_list *pmk_list;	/* wpa2 pmk list */
+	tsk_ctl_t event_tsk;  		/* task of main event handler thread */
+	void *pub;
+	u32 iface_cnt;
+	u32 channel;		/* current channel */
+	u32 af_sent_channel;	/* channel action frame is sent */
+	/* next af subtype to cancel the remained dwell time in rx process */
+	u8 next_af_subtype;
+#ifdef WL_CFG80211_SYNC_GON
+	ulong af_tx_sent_jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+	struct escan_info escan_info;   /* escan information */
+	bool active_scan;	/* current scan mode */
+	bool ibss_starter;	/* indicates this sta is ibss starter */
+	bool link_up;		/* link/connection up flag */
+
+	/* indicate whether chip to support power save mode */
+	bool pwr_save;
+	bool roam_on;		/* on/off switch for self-roaming */
+	bool scan_tried;	/* indicates if first scan attempted */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+	bool wlfc_on;
+#endif 
+	bool vsdb_mode;
+	bool roamoff_on_concurrent;
+	u8 *ioctl_buf;		/* ioctl buffer */
+	struct mutex ioctl_buf_sync;
+	u8 *escan_ioctl_buf;
+	u8 *extra_buf;	/* maily to grab assoc information */
+	struct dentry *debugfsdir;
+	struct rfkill *rfkill;
+	bool rf_blocked;
+	struct ieee80211_channel remain_on_chan;
+	enum nl80211_channel_type remain_on_chan_type;
+	u64 send_action_id;
+	u64 last_roc_id;
+	wait_queue_head_t netif_change_event;
+	wl_if_event_info if_event_info;
+	struct completion send_af_done;
+	struct afx_hdl *afx_hdl;
+	struct ap_info *ap_info;
+	struct sta_info *sta_info;
+	struct p2p_info *p2p;
+	bool p2p_supported;
+	void *btcoex_info;
+	struct timer_list scan_timeout;   /* Timer for catch scan event timeout */
+	s32(*state_notifier) (struct bcm_cfg80211 *cfg,
+		struct net_info *_net_info, enum wl_status state, bool set);
+	unsigned long interrested_state;
+	wlc_ssid_t hostapd_ssid;
+#ifdef WL_SDO
+	sd_offload_t *sdo;
+#endif
+#ifdef WL11U
+	bool wl11u;
+	u8 iw_ie[IW_IES_MAX_BUF_LEN];
+	u32 iw_ie_len;
+#endif /* WL11U */
+	bool sched_scan_running;	/* scheduled scan req status */
+#ifdef WL_SCHED_SCAN
+	struct cfg80211_sched_scan_request *sched_scan_req;	/* scheduled scan req */
+#endif /* WL_SCHED_SCAN */
+#ifdef WL_HOST_BAND_MGMT
+	u8 curr_band;
+#endif /* WL_HOST_BAND_MGMT */
+	bool scan_suppressed;
+	struct timer_list scan_supp_timer;
+	struct work_struct wlan_work;
+	struct mutex event_sync;	/* maily for up/down synchronization */
+	bool disable_roam_event;
+	bool pm_enable_work_on;
+	struct delayed_work pm_enable_work;
+	vndr_ie_setbuf_t *ibss_vsie;	/* keep the VSIE for IBSS */
+	int ibss_vsie_len;
+#ifdef WLAIBSS
+	u32 aibss_txfail_pid;
+	u32 aibss_txfail_seq;
+#endif /* WLAIBSS */
+	u32 rmc_event_pid;
+	u32 rmc_event_seq;
+#ifdef WLAIBSS_MCHAN
+	struct ether_addr ibss_if_addr;
+	bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
+#endif /* WLAIBSS_MCHAN */
+	bcm_struct_cfgdev *bss_cfgdev;  /* For DUAL STA/STA+AP */
+	s32 cfgdev_bssidx;
+	bool bss_pending_op;		/* indicate where there is a pending IF operation */
+#ifdef WLFBT
+	uint8 fbt_key[FBT_KEYLEN];
+#endif
+	bool roam_offload;
+	bool nan_running;
+};
+
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev * wdev, s32 mode, bool pm_block)
+{
+	struct net_info *_net_info;
+	s32 err = 0;
+	if (cfg->iface_cnt == IFACE_MAX_CNT)
+		return -ENOMEM;
+	_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+	if (!_net_info)
+		err = -ENOMEM;
+	else {
+		_net_info->mode = mode;
+		_net_info->ndev = ndev;
+		_net_info->wdev = wdev;
+		_net_info->pm_restore = 0;
+		_net_info->pm = 0;
+		_net_info->pm_block = pm_block;
+		_net_info->roam_off = WL_INVALID;
+		cfg->iface_cnt++;
+		list_add(&_net_info->list, &cfg->net_list);
+	}
+	return err;
+}
+static inline void
+wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			list_del(&_net_info->list);
+			cfg->iface_cnt--;
+			kfree(_net_info);
+		}
+	}
+
+}
+static inline void
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		list_del(&_net_info->list);
+			if (_net_info->wdev)
+				kfree(_net_info->wdev);
+			kfree(_net_info);
+	}
+	cfg->iface_cnt = 0;
+}
+static inline u32
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
+
+{
+	struct net_info *_net_info, *next;
+	u32 cnt = 0;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(status, &_net_info->sme_state))
+			cnt++;
+	}
+	return cnt;
+}
+static inline void
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		switch (op) {
+			case 1:
+				return; /* set all status is not allowed */
+			case 2:
+				clear_bit(status, &_net_info->sme_state);
+				if (cfg->state_notifier &&
+					test_bit(status, &(cfg->interrested_state)))
+					cfg->state_notifier(cfg, _net_info, status, false);
+				break;
+			case 4:
+				return; /* change all status is not allowed */
+			default:
+				return; /* unknown operation */
+		}
+	}
+}
+static inline void
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev, u32 op)
+{
+
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			switch (op) {
+				case 1:
+					set_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, true);
+					break;
+				case 2:
+					clear_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, false);
+					break;
+				case 4:
+					change_bit(status, &_net_info->sme_state);
+					break;
+			}
+		}
+
+	}
+
+}
+
+static inline u32
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return test_bit(status, &_net_info->sme_state);
+	}
+	return 0;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info->mode;
+	}
+	return -1;
+}
+
+
+static inline void
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 mode)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					_net_info->mode = mode;
+	}
+}
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return &_net_info->profile;
+	}
+	return NULL;
+}
+static inline struct net_info *
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info;
+	}
+	return NULL;
+}
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
+#define wdev_to_ndev(wdev) (wdev->netdev)
+
+#if defined(WL_ENABLE_P2P_IF)
+#define ndev_to_wlc_ndev(ndev, cfg)	((ndev == cfg->p2p_net) ? \
+	bcmcfg_to_prmry_ndev(cfg) : ndev)
+#else
+#define ndev_to_wlc_ndev(ndev, cfg)	(ndev)
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define wdev_to_wlc_ndev(wdev, cfg)	\
+	((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \
+	bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev))
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
+#elif defined(WL_ENABLE_P2P_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
+#else
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	(cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define ndev_to_cfgdev(ndev)	ndev_to_wdev(ndev)
+#define cfgdev_to_ndev(cfgdev)  cfgdev ? (cfgdev->netdev) : NULL
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
+#else
+#define ndev_to_cfgdev(ndev)	(ndev)
+#define cfgdev_to_ndev(cfgdev)	(cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
+#elif defined(WL_ENABLE_P2P_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
+#else
+#define scan_req_match(cfg)	(((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
+	true : false)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define wl_to_sr(w) (w->scan_req_int)
+#if defined(STATIC_WL_PRIV_STRUCT)
+#define wl_to_ie(w) (w->ie)
+#define wl_to_conn(w) (w->conn_info)
+#else
+#define wl_to_ie(w) (&w->ie)
+#define wl_to_conn(w) (&w->conn_info)
+#endif
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(cfg, stat) \
+	(wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev)  \
+	(wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat)  \
+	(wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(cfg, iter, next) \
+	list_for_each_entry_safe(iter, next, &cfg->net_list, list)
+
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+	((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+	 (!_sme->crypto.n_ciphers_pairwise) && \
+	 (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void *para);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+            void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+extern s32 wl_cfg80211_up(void *para);
+extern s32 wl_cfg80211_down(void *para);
+extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev);
+extern bool wl_cfg80211_is_vsdb_mode(void);
+extern void* wl_cfg80211_get_dhdp(void);
+extern bool wl_cfg80211_is_p2p_active(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
+#ifdef WL_SDO
+extern s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_sd_offload(struct net_device *net, char *cmd, char* buf, int len);
+extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+
+#endif
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CHANSPEC_BUF_SIZE	1024
+#define CHAN_SEL_IOCTL_DELAY	300
+#define CHAN_SEL_RETRY_COUNT	15
+#define CHANNEL_IS_RADAR(channel)	(((channel & WL_CHAN_RADAR) || \
+	(channel & WL_CHAN_PASSIVE)) ? true : false)
+#define CHANNEL_IS_2G(channel)	(((channel >= 1) && (channel <= 14)) ? \
+	true : false)
+#define CHANNEL_IS_5G(channel)	(((channel >= 36) && (channel <= 165)) ? \
+	true : false)
+extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
+	int total_len);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n);
+extern int wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(bool set, u32 level);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern  chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
+extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+#ifdef WL_HOST_BAND_MGMT
+extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band);
+#endif /* WL_HOST_BAND_MGMT */
+#if defined(DHCP_SCAN_SUPPRESS)
+extern int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress);
+#endif /* OEM_ANDROID */
+extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
+extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+#define SCAN_BUF_CNT	2
+#define SCAN_BUF_NEXT	1
+#define WL_SCANTYPE_LEGACY	0x1
+#define WL_SCANTYPE_P2P		0x2
+#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234))
+#define wl_escan_set_type(a, b)
+#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
+#define wl_escan_check_sync_id(a, b, c) 0
+#define wl_escan_print_sync_id(a, b, c)
+#define wl_escan_increment_sync_id(a, b)
+#define wl_escan_init_sync_id(a)
+extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len);
+extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+#ifdef WLAIBSS
+extern void wl_cfg80211_set_txfail_pid(int pid);
+#endif /* WLAIBSS */
+extern void wl_cfg80211_set_rmc_pid(int pid);
+
+#ifdef WLFBT
+extern void wl_cfg80211_get_fbt_key(uint8 *key);
+#endif
+
+/* Action frame specific functions */
+extern u8 wl_get_action_category(void *frame, u32 frame_len);
+extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+extern int wl_cfg80211_get_ioctl_version(void);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable);
+
+#ifdef WL_NAN
+extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd,
+	int cmd_len);
+#endif /* WL_NAN */
+
+#endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
new file mode 100644
index 0000000000000000000000000000000000000000..c8a16ce551feba64950880743a27bb9a46b79e0b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
@@ -0,0 +1,539 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg_btcoex.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+	struct timer_list timer;
+	u32 timer_ms;
+	u32 timer_on;
+	u32 ts_dhcp_start;	/* ms ts ecord time stats */
+	u32 ts_dhcp_ok;		/* ms ts ecord time stats */
+	bool dhcp_done;	/* flag, indicates that host done with
+					 * dhcp before t1/t2 expiration
+					 */
+	s32 bt_state;
+	struct work_struct work;
+	struct net_device *dev;
+};
+
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME	2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+	BT_DHCP_IDLE,
+	BT_DHCP_START,
+	BT_DHCP_OPPR_WIN,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+	uint reg, int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+		(char *)(&var), sizeof(var.buf));
+	error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	char ioctlbuf_local[1024];
+#else
+	static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+	return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = FALSE;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERR(("ioc read btc params error\n"));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) { /* count both sco & esco  */
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE(("sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				sco_id_cnt, i));
+			res = TRUE;
+			break;
+		}
+
+		OSL_SLEEP(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = FALSE;
+
+	char buf_reg50va_dhcp_on[8] =
+		{ 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] =
+		{ 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] =
+		{ 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] =
+		{ 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] =
+		{ 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) {
+		/* this should reduce eSCO agressive retransmit
+		 * w/o breaking it
+		 */
+
+		/* 1st save current */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+		if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+			saved_status = TRUE;
+			WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+				  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				  saved_reg50, saved_reg51,
+				  saved_reg64, saved_reg65, saved_reg71));
+		} else {
+			WL_ERR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = FALSE;
+			return -1;
+		}
+
+		WL_TRACE(("override with [50,51,64,65,71]:"
+			  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  *(u32 *)(buf_reg50va_dhcp_on+4),
+			  *(u32 *)(buf_reg51va_dhcp_on+4),
+			  *(u32 *)(buf_reg64va_dhcp_on+4),
+			  *(u32 *)(buf_reg65va_dhcp_on+4),
+			  *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = TRUE;
+	} else if (saved_status) {
+		/* restore previously saved bt params */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+			"0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = FALSE;
+	} else {
+		WL_ERR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+	/* set = 1, save & turn on  0 - off & restore prev settings */
+	set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE)
+		/* Forcing bt_flag7  */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_dhcp_on[0],
+			sizeof(buf_flag7_dhcp_on));
+	else
+		/* Restoring default bt flag7 */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_default[0],
+			sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+	struct btcoex_info *bt_local = (struct btcoex_info *)data;
+	WL_TRACE(("Enter\n"));
+	bt_local->timer_on = 0;
+	schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+	struct btcoex_info *btcx_inf;
+
+	btcx_inf = container_of(work, struct btcoex_info, work);
+
+	if (btcx_inf->timer_on) {
+		btcx_inf->timer_on = 0;
+		del_timer_sync(&btcx_inf->timer);
+	}
+
+	switch (btcx_inf->bt_state) {
+		case BT_DHCP_START:
+			/* DHCP started
+			 * provide OPPORTUNITY window to get DHCP address
+			 */
+			WL_TRACE(("bt_dhcp stm: started \n"));
+
+			btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_OPPR_WIN:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T1 expiration\n"));
+				goto btc_coex_idle;
+			}
+
+			/* DHCP is not over yet, start lowering BT priority
+			 * enforce btc_params + flags if necessary
+			 */
+			WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+			btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_FLAG_FORCE_TIMEOUT:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T2 expiration\n"));
+			} else {
+				/* Noo dhcp during T1+T2, restore BT priority */
+				WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+					BT_DHCP_FLAG_FORCE_TIME));
+			}
+
+			/* Restoring default bt priority */
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+
+		default:
+			WL_ERR(("error g_status=%d !!!\n",	btcx_inf->bt_state));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+	}
+
+	net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+	struct btcoex_info *btco_inf = NULL;
+
+	btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+	if (!btco_inf)
+		return NULL;
+
+	btco_inf->bt_state = BT_DHCP_IDLE;
+	btco_inf->ts_dhcp_start = 0;
+	btco_inf->ts_dhcp_ok = 0;
+	/* Set up timer for BT  */
+	btco_inf->timer_ms = 10;
+	init_timer(&btco_inf->timer);
+	btco_inf->timer.data = (ulong)btco_inf;
+	btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+	btco_inf->dev = ndev;
+
+	INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+	btcoex_info_loc = btco_inf;
+	return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+	if (!btcoex_info_loc)
+		return;
+
+	if (btcoex_info_loc->timer_on) {
+		btcoex_info_loc->timer_on = 0;
+		del_timer_sync(&btcoex_info_loc->timer);
+	}
+
+	cancel_work_sync(&btcoex_info_loc->work);
+
+	kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+	struct btcoex_info *btco_inf = btcoex_info_loc;
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+	/* Figure out powermode 1 or o command */
+	strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+		WL_TRACE_HW4(("DHCP session starts\n"));
+
+#if defined(DHCP_SCAN_SUPPRESS)
+		/* Suppress scan during the DHCP */
+		wl_cfg80211_scan_suppress(dev, 1);
+#endif /* OEM_ANDROID */
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 1;
+
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+			dhd_enable_packet_filter(0, dhd);
+		}
+#endif
+
+		/* Retrieve and saved orig regs value */
+		if ((saved_status == FALSE) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68)))   {
+				saved_status = TRUE;
+				WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+					saved_reg66, saved_reg41, saved_reg68));
+
+				/* Disable PM mode during dhpc session */
+
+				/* Disable PM mode during dhpc session */
+				/* Start  BT timer only for SCO connection */
+				if (btcoex_is_sco_active(dev)) {
+					/* btc_params 66 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg66va_dhcp_on[0],
+						sizeof(buf_reg66va_dhcp_on));
+					/* btc_params 41 0x33 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg41va_dhcp_on[0],
+						sizeof(buf_reg41va_dhcp_on));
+					/* btc_params 68 0x190 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg68va_dhcp_on[0],
+						sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					btco_inf->bt_state = BT_DHCP_START;
+					btco_inf->timer_on = 1;
+					mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+					WL_TRACE(("enable BT DHCP Timer\n"));
+				}
+		}
+		else if (saved_status == TRUE) {
+			WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+		}
+	}
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+#if defined(DHCP_SCAN_SUPPRESS)
+		/* Since DHCP is complete, enable the scan back */
+		wl_cfg80211_scan_suppress(dev, 0);
+#endif /* OEM_ANDROID */
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 0;
+		WL_TRACE_HW4(("DHCP is complete \n"));
+
+		/* Enable packet filtering */
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+			dhd_enable_packet_filter(1, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+
+		/* Restoring PM mode */
+
+		/* Stop any bt timer because DHCP session is done */
+		WL_TRACE(("disable BT DHCP Timer\n"));
+		if (btco_inf->timer_on) {
+			btco_inf->timer_on = 0;
+			del_timer_sync(&btco_inf->timer);
+
+			if (btco_inf->bt_state != BT_DHCP_IDLE) {
+			/* need to restore original btc flags & extra btc params */
+				WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+				/* wake up btcoex thread to restore btlags+params  */
+				schedule_work(&btco_inf->work);
+			}
+		}
+
+		/* Restoring btc_flag paramter anyway */
+		if (saved_status == TRUE)
+			dev_wlc_bufvar_set(dev, "btc_flags",
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+		/* Restore original values */
+		if (saved_status == TRUE) {
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+				saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+
+	}
+	else {
+		WL_ERR(("Unkwown yet power setting, ignored\n"));
+	}
+
+	snprintf(command, 3, "OK");
+
+	return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 0000000000000000000000000000000000000000..21208172c53047e9924ce8e8446846bde3356698
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,2678 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfgp2p.c 490694 2014-07-11 14:37:00Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static s8 g_mgmt_ie_buf[2048];
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev *wdev, bool notify);
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+	.ndo_open       = wl_cfgp2p_if_open,
+	.ndo_stop       = wl_cfgp2p_if_stop,
+	.ndo_do_ioctl   = wl_cfgp2p_do_ioctl,
+	.ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+
+static int wl_cfgp2p_if_dummy(struct net_device *net)
+{
+	return 0;
+}
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+	.ndo_open       = wl_cfgp2p_if_dummy,
+	.ndo_stop       = wl_cfgp2p_if_dummy,
+	.ndo_do_ioctl   = wl_cfgp2p_do_ioctl,
+	.ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+
+	if (frame == NULL)
+		return false;
+	pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+		return false;
+
+	if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+		pact_frm->action == P2P_PUB_AF_ACTION &&
+		pact_frm->oui_type == P2P_VER &&
+		memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_action_frame_t *act_frm;
+
+	if (frame == NULL)
+		return false;
+	act_frm = (wifi_p2p_action_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+		return false;
+
+	if (act_frm->category == P2P_AF_CATEGORY &&
+		act_frm->type  == P2P_VER &&
+		memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+#define GAS_RESP_LEN		2
+#define DOUBLE_TLV_BODY_OFF	4
+#define GAS_RESP_OFFSET		4
+#define GAS_CRESP_OFFSET	5
+
+bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
+{
+	bcm_tlv_t *ie = (bcm_tlv_t *)data;
+	u8 *frame = NULL;
+	u16 id, flen;
+
+	/* Skipped first ANQP Element, if frame has anqp elemnt */
+	ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
+
+	if (ie == NULL)
+		return false;
+
+	frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+	id = ((u16) (((frame)[1] << 8) | (frame)[0]));
+	flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
+
+	/* If the contents match the OUI and the type */
+	if (flen >= WFA_OUI_LEN + 1 &&
+		id ==  P2PSD_GAS_NQP_INFOID &&
+		!bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
+		subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+	if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+		return false;
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return true;
+	else
+		return false;
+}
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+	wifi_p2p_action_frame_t *act_frm;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+	if (!frame || frame_len <= 2)
+		return;
+
+	if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+		switch (pact_frm->subtype) {
+			case P2P_PAF_GON_REQ:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_RSP:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_CONF:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_REQ:
+				CFGP2P_ACTION(("%s P2P Invitation Request  Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_RSP:
+				CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Public Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+
+		}
+
+	} else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+		act_frm = (wifi_p2p_action_frame_t *)frame;
+		switch (act_frm->subtype) {
+			case P2P_AF_NOTICE_OF_ABSENCE:
+				CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_REQ:
+				CFGP2P_ACTION(("%s P2P Presence Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_RSP:
+				CFGP2P_ACTION(("%s P2P Presence Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_GO_DISC_REQ:
+				CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+		}
+
+	} else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+		sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+		switch (sd_act_frm->action) {
+			case P2PSD_ACTION_ID_GAS_IREQ:
+				CFGP2P_ACTION(("%s P2P GAS Initial Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_IRESP:
+				CFGP2P_ACTION(("%s P2P GAS Initial Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CREQ:
+				CFGP2P_ACTION(("%s P2P GAS Comback Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CRESP:
+				CFGP2P_ACTION(("%s P2P GAS Comback Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P GAS Frame,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+		}
+
+
+	}
+}
+
+/*
+ *  Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
+{
+	if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+		CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+		return -ENOMEM;
+	}
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0;
+	return BCME_OK;
+
+}
+/*
+ *  Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	CFGP2P_DBG(("In\n"));
+	if (cfg->p2p) {
+		kfree(cfg->p2p);
+		cfg->p2p = NULL;
+	}
+	cfg->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+	s32 ret = BCME_OK;
+	s32 val = 0;
+	/* Do we have to check whether APSTA is enabled or not ? */
+	ret = wldev_iovar_getint(ndev, "apsta", &val);
+	if (ret < 0) {
+		CFGP2P_ERR(("get apsta error %d\n", ret));
+		return ret;
+	}
+	if (val == 0) {
+		val = 1;
+		ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
+			return ret;
+		}
+		wldev_iovar_setint(ndev, "apsta", val);
+		ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_UP error %d\n", ret));
+			return ret;
+		}
+	}
+
+	/* In case of COB type, firmware has default mac address
+	 * After Initializing firmware, we have to set current mac address to
+	 * firmware for P2P device address
+	 */
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+		sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+	if (ret && ret != BCME_UNSUPPORTED) {
+		CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+	}
+	return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * @if_type  : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec   : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n",
+		MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+	        (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0))
+		printf("'cfg p2p_ifadd' error %d\n", err);
+	else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printf("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+/* Disable a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to disable
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+		netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printf("'cfg p2p_ifdis' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to delete
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
+	    netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printf("'cfg p2p_ifdel' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac      : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+
+	struct net_device *netdev =  wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
+		" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+		(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
+		ifreq.chspec));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0)) {
+		printf("'cfg p2p_ifupd' error %d\n", err);
+	} else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printf("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the created BSS
+ * @index    : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
+{
+	s32 ret;
+	u8 getbuf[64];
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+
+	ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+		sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+	if (ret == 0) {
+		memcpy(index, getbuf, sizeof(s32));
+		CFGP2P_INFO(("---cfg p2p_if   ==> %d\n", *index));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+	}
+
+	return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode      : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel   : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx    : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+	wl_p2p_disc_st_t discovery_mode;
+	s32 ret;
+	struct net_device *dev;
+	CFGP2P_DBG(("enter\n"));
+
+	if (unlikely(bssidx == WL_INVALID)) {
+		CFGP2P_ERR((" %d index out of range\n", bssidx));
+		return -1;
+	}
+
+	dev = wl_cfgp2p_find_ndev(cfg, bssidx);
+	if (unlikely(dev == NULL)) {
+		CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+		return BCME_NOTFOUND;
+	}
+
+	/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+	discovery_mode.state = mode;
+	discovery_mode.chspec = wl_ch_host_to_driver(channel);
+	discovery_mode.dwell = listen_ms;
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+		sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync);
+
+	return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
+{
+	s32 ret;
+	struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+
+	ret = wldev_iovar_getint(dev, "p2p_dev", index);
+	CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+	if (unlikely(ret <  0)) {
+	    CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+		return ret;
+	}
+	return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
+{
+
+	s32 index = 0;
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
+		CFGP2P_ERR(("do nothing, already initialized\n"));
+		return ret;
+	}
+
+	ret = wl_cfgp2p_set_discovery(cfg, 1);
+	if (ret < 0) {
+		CFGP2P_ERR(("set discover error\n"));
+		return ret;
+	}
+	/* Enable P2P Discovery in the WL Driver */
+	ret = wl_cfgp2p_get_disc_idx(cfg, &index);
+
+	if (ret < 0) {
+		return ret;
+	}
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+	    wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index;
+
+	/* Set the initial discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret != 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+		wl_cfgp2p_set_discovery(cfg, 0);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+		return 0;
+	}
+	return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @cfg        : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR(("do nothing, not initialized\n"));
+		return -1;
+	}
+	/* Set the discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+	ret = wl_cfgp2p_set_discovery(cfg, 0);
+
+	/* Clear our saved WPS and P2P IEs for the discovery BSS.  The driver
+	 * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+	 * BSS.
+	 */
+
+	/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+	 * have no discovery BSS.
+	 */
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+	return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @cfg	: wl_private data
+ * @ie  : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	const u8 *ie, u32 ie_len)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+		CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+		goto set_ie;
+	}
+
+	wl_set_p2p_status(cfg, DISCOVERY_ON);
+
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wl_cfgp2p_init_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" init discovery error %d\n", ret));
+		goto exit;
+	}
+	/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+	 * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+	 * Some peer devices may not initiate WPS with us if this bit is not set.
+	 */
+	ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+			"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" wsec error %d\n", ret));
+	}
+set_ie:
+	if (ie_len) {
+		if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+			bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		} else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+
+		ret = wl_cfgp2p_set_management_ie(cfg, dev,
+			bssidx,
+			VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @cfg       : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" enter\n"));
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR((" do nothing, not initialized\n"));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret < 0)) {
+
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+	}
+	/* Do a scan abort to stop the driver's scan engine in case it is still
+	 * waiting out an action frame tx dwell time.
+	 */
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+	ret = wl_cfgp2p_deinit_discovery(cfg);
+
+exit:
+	return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+	u32 num_chans, u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose)
+{
+	s32 ret = BCME_OK;
+	s32 memsize;
+	s32 eparams_size;
+	u32 i;
+	s8 *memblk;
+	wl_p2p_scan_t *p2p_params;
+	wl_escan_params_t *eparams;
+	wlc_ssid_t ssid;
+	/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+
+	struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	/* Allocate scan params which need space for 3 channels and 0 ssids */
+	eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+	    OFFSETOF(wl_escan_params_t, params)) +
+		num_chans * sizeof(eparams->params.channel_list[0]);
+
+	memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+	memblk = scanparambuf;
+	if (memsize > sizeof(scanparambuf)) {
+		CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
+		    memsize, sizeof(scanparambuf)));
+		return -1;
+	}
+	memset(memblk, 0, memsize);
+	memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+	if (search_state == WL_P2P_DISC_ST_SEARCH) {
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+		/* use null ssid */
+		ssid.SSID_len = 0;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+	} else if (search_state == WL_P2P_DISC_ST_SCAN) {
+		/* SCAN STATE 802.11 SCAN
+		 * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+		 * So if P2P_find command with type=progressive,
+		 * we have to set ssid to P2P WILDCARD because
+		 * we just do broadcast scan unless setting SSID
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+		/* use wild card ssid */
+		ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+		memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
+	} else {
+		CFGP2P_ERR((" invalid search state %d\n", search_state));
+		return -1;
+	}
+
+
+	/* Fill in the P2P scan structure at the start of the iovar param block */
+	p2p_params = (wl_p2p_scan_t*) memblk;
+	p2p_params->type = 'E';
+	/* Fill in the Scan structure that follows the P2P scan structure */
+	eparams = (wl_escan_params_t*) (p2p_params + 1);
+	eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+	if (active)
+		eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	else
+		eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+	if (tx_dst_addr == NULL)
+		memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	else
+		memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
+
+	if (ssid.SSID_len)
+		memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+	eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+
+	switch (p2p_scan_purpose) {
+		case P2P_SCAN_SOCIAL_CHANNEL:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_AFX_PEER_NORMAL:
+		case P2P_SCAN_AFX_PEER_REDUCED:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_CONNECT_TRY:
+			eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+			break;
+		default :
+			if (wl_get_drv_status_all(cfg, CONNECTED))
+		eparams->params.active_time = -1;
+	else
+		eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+			break;
+	}
+
+	if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
+		eparams->params.nprobes = htod32(eparams->params.active_time /
+			WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	else
+	eparams->params.nprobes = htod32((eparams->params.active_time /
+		P2PAPI_SCAN_NPROBS_TIME_MS));
+
+
+	if (eparams->params.nprobes <= 0)
+		eparams->params.nprobes = 1;
+	CFGP2P_DBG(("nprobes # %d, active_time %d\n",
+		eparams->params.nprobes, eparams->params.active_time));
+	eparams->params.passive_time = htod32(-1);
+	eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	    (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	for (i = 0; i < num_chans; i++) {
+		eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+	}
+	eparams->version = htod32(ESCAN_REQ_VERSION);
+	eparams->action =  htod16(action);
+	wl_escan_set_sync_id(eparams->sync_id, cfg);
+	wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+	CFGP2P_INFO(("SCAN CHANNELS : "));
+
+	for (i = 0; i < num_chans; i++) {
+		if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+		else CFGP2P_INFO((",%d", channels[i]));
+	}
+
+	CFGP2P_INFO(("\n"));
+
+	ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+		memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (ret == BCME_OK)
+		wl_set_p2p_status(cfg, SCANNING);
+	return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
+{
+	s32 ret = 0;
+	u32 chan_cnt = 0;
+	u16 *default_chan_list = NULL;
+	p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
+	if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
+		return -BCME_ERROR;
+	WL_TRACE_HW4((" Enter\n"));
+	if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (channel)
+		chan_cnt = AF_PEER_SEARCH_CNT;
+	else
+		chan_cnt = SOCIAL_CHAN_CNT;
+	default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+	if (default_chan_list == NULL) {
+		CFGP2P_ERR(("channel list allocation failed \n"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (channel) {
+		u32 i;
+		/* insert same channel to the chan_list */
+		for (i = 0; i < chan_cnt; i++) {
+			default_chan_list[i] = channel;
+		}
+	} else {
+		default_chan_list[0] = SOCIAL_CHAN_1;
+		default_chan_list[1] = SOCIAL_CHAN_2;
+		default_chan_list[2] = SOCIAL_CHAN_3;
+	}
+	ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
+		default_chan_list, WL_P2P_DISC_ST_SEARCH,
+		WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
+	kfree(default_chan_list);
+exit:
+	return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#ifndef WFA_OUI_TYPE_WFD
+#define WFA_OUI_TYPE_WFD	0x0a			/* WiFi Display OUI TYPE */
+#endif
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
+static s32
+wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len,
+	struct parsed_vndr_ies *vndr_ies)
+{
+	s32 err = BCME_OK;
+	vndr_ie_t *vndrie;
+	bcm_tlv_t *ie;
+	struct parsed_vndr_ie_info *parsed_info;
+	u32	count = 0;
+	s32 remained_len;
+
+	remained_len = (s32)len;
+	memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+	WL_INFORM(("---> len %d\n", len));
+	ie = (bcm_tlv_t *) parse;
+	if (!bcm_valid_tlv(ie, remained_len))
+		ie = NULL;
+	while (ie) {
+		if (count >= MAX_VNDR_IE_NUMBER)
+			break;
+		if (ie->id == DOT11_MNG_VS_ID) {
+			vndrie = (vndr_ie_t *) ie;
+			/* len should be bigger than OUI length + one data length at least */
+			if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+				CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n",
+					__FUNCTION__, vndrie->len));
+				goto end;
+			}
+			/* if wpa or wme ie, do not add ie */
+			if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+				((vndrie->data[0] == WPA_OUI_TYPE) ||
+				(vndrie->data[0] == WME_OUI_TYPE))) {
+				CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+				goto end;
+			}
+
+			parsed_info = &vndr_ies->ie_info[count++];
+
+			/* save vndr ie information */
+			parsed_info->ie_ptr = (char *)vndrie;
+			parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+			memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+
+			vndr_ies->count = count;
+
+			CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n",
+				parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+				parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0]));
+		}
+end:
+		ie = bcm_next_tlv(ie, &remained_len);
+	}
+	return err;
+}
+
+
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * @pktflag  : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ *                                 VNDR_IE_ASSOCREQ_FLAG)
+ * @ie       :  VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len   : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+    s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+	s32 ret = BCME_OK;
+	u8  *curr_ie_buf = NULL;
+	u8  *mgmt_ie_buf = NULL;
+	u32 mgmt_ie_buf_len = 0;
+	u32 *mgmt_ie_len = 0;
+	u32 del_add_ie_buf_len = 0;
+	u32 total_ie_buf_len = 0;
+	u32 parsed_ie_buf_len = 0;
+	struct parsed_vndr_ies old_vndr_ies;
+	struct parsed_vndr_ies new_vndr_ies;
+	s32 i;
+	u8 *ptr;
+	s32 type = -1;
+	s32 remained_buf_len;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len)
+	memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+	curr_ie_buf = g_mgmt_ie_buf;
+	CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
+
+#ifdef DUAL_STA
+	if ((cfg->p2p != NULL) && (bssidx != cfg->cfgdev_bssidx)) {
+#else
+	if (cfg->p2p != NULL) {
+#endif
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+			CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
+			return BCME_ERROR;
+		}
+
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type));
+				break;
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type));
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type));
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type));
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = IE_TYPE(beacon, type);
+				mgmt_ie_len = &IE_TYPE_LEN(beacon, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type));
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		switch (pktflag) {
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+				mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie);
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = cfg->ap_info->beacon_ie;
+				mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				/* WPS-AP WSC2.0 assoc res includes wps_ie */
+				mgmt_ie_buf = cfg->ap_info->assoc_res_ie;
+				mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->probe_req_ie;
+				mgmt_ie_len = &cfg->sta_info->probe_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie);
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->assoc_req_ie;
+				mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else {
+		CFGP2P_ERR(("not suitable type\n"));
+		return BCME_ERROR;
+	}
+
+	if (vndr_ie_len > mgmt_ie_buf_len) {
+		CFGP2P_ERR(("extra IE size too big\n"));
+		ret = -ENOMEM;
+	} else {
+		/* parse and save new vndr_ie in curr_ie_buff before comparing it */
+		if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+			ptr = curr_ie_buf;
+
+			wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie,
+				vndr_ie_len, &new_vndr_ies);
+
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				parsed_ie_buf_len += vndrie_info->ie_len;
+			}
+		}
+
+		if (mgmt_ie_buf != NULL) {
+			if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+			     (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+				CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+				goto exit;
+			}
+
+			/* parse old vndr_ie */
+			wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+				&old_vndr_ies);
+
+			/* make a command to delete old ie */
+			for (i = 0; i < old_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&old_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"del");
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+
+		*mgmt_ie_len = 0;
+		/* Add if there is any extra IE */
+		if (mgmt_ie_buf && parsed_ie_buf_len) {
+			ptr = mgmt_ie_buf;
+
+			remained_buf_len = mgmt_ie_buf_len;
+
+			/* make a command to add new ie */
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->ie_len - 2,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"add");
+
+				/* verify remained buf size before copy data */
+				if (remained_buf_len >= vndrie_info->ie_len) {
+					remained_buf_len -= vndrie_info->ie_len;
+				} else {
+					CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+						"found vndr ies # = %d(cur %d), remained len %d, "
+						"cur mgmt_ie_len %d, new ie len = %d\n",
+						pktflag, new_vndr_ies.count, i, remained_buf_len,
+						*mgmt_ie_len, vndrie_info->ie_len));
+					break;
+				}
+
+				/* save the parsed IE in cfg struct */
+				memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				*mgmt_ie_len += vndrie_info->ie_len;
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+		if (total_ie_buf_len) {
+			ret  = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+				total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (ret)
+				CFGP2P_ERR(("vndr ie set error : %d\n", ret));
+		}
+	}
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+	return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @cfg       : wl_private data
+ * @bssidx   : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+
+	s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+		VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+	s32 index = -1;
+	s32 type = -1;
+	struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx);
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	if (bssidx < 0 || ndev == NULL) {
+		CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev"));
+		return BCME_BADARG;
+	}
+
+	if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+		CFGP2P_ERR(("invalid argument\n"));
+		return BCME_BADARG;
+	}
+	for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+		/* clean up vndr ies in dongle */
+		wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0);
+	}
+	INIT_IE(probe_req, type);
+	INIT_IE(probe_res, type);
+	INIT_IE(assoc_req, type);
+	INIT_IE(assoc_res, type);
+	INIT_IE(beacon, type);
+	return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+		!bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+		type == ie[TLV_BODY_OFF + oui_len]) {
+		return TRUE;
+	}
+
+	if (tlvs == NULL)
+		return FALSE;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_p2p_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_wfd_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
+{
+	vndr_ie_setbuf_t hdr;	/* aligned temporary vndr_ie buffer header */
+	s32 iecount;
+	u32 data_offset;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+		CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+	hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Copy packet flags that indicate which packets will contain this IE */
+	pktflag = htod32(pktflag);
+	memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+
+	/* Add the IE ID to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+
+	/* Add the IE length to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
+		(uint8) VNDR_IE_MIN_LEN + datalen;
+
+	/* Add the IE OUI to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
+
+	/* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
+	memcpy(iebuf, &hdr, sizeof(hdr) - 1);
+
+	/* Copy the IE data to the IE buffer */
+	data_offset =
+		(u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
+		(u8*)&hdr;
+	memcpy(iebuf + data_offset, data, datalen);
+	return data_offset + datalen;
+
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device to search bssidx
+ * @bssidx  : output arg to store bssidx of the bsscfg of firmware.
+ * Returns error
+ */
+s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx)
+{
+	u32 i;
+	if (ndev == NULL || bssidx == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		return BCME_BADARG;
+	}
+	if (!cfg->p2p_supported) {
+		*bssidx = P2PAPI_BSSCFG_PRIMARY;
+		return BCME_OK;
+	}
+	/* we cannot find the bssidx of DISCOVERY BSS
+	 *  because the ndev is same with ndev of PRIMARY BSS.
+	 */
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (ndev == wl_to_p2p_bss_ndev(cfg, i)) {
+			*bssidx = wl_to_p2p_bss_bssidx(cfg, i);
+			return BCME_OK;
+		}
+	}
+
+#ifdef DUAL_STA
+	if (cfg->bss_cfgdev && (cfg->bss_cfgdev == ndev_to_cfgdev(ndev))) {
+		CFGP2P_INFO(("cfgdev is present, return the bssidx"));
+		*bssidx = cfg->cfgdev_bssidx;
+		return BCME_OK;
+	}
+#endif
+
+	return BCME_BADARG;
+
+}
+struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	u32 i;
+	struct net_device *ndev = NULL;
+	if (bssidx < 0) {
+		CFGP2P_ERR((" bsscfg idx is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			ndev = wl_to_p2p_bss_ndev(cfg, i);
+			break;
+		}
+	}
+
+exit:
+	return ndev;
+}
+/*
+ * Search the driver array idx based on bssidx argument
+ * Parameters:
+ * @cfg     : wl_private data
+ * @bssidx : bssidx which indicate bsscfg->idx of firmware.
+ * @type   : output arg to store array idx of p2p->bss.
+ * Returns error
+ */
+
+s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
+{
+	u32 i;
+	if (bssidx < 0 || type == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			*type = i;
+			return BCME_OK;
+		}
+	}
+
+#ifdef DUAL_STA
+	if (bssidx == cfg->cfgdev_bssidx) {
+		CFGP2P_DBG(("bssidx matching with the virtual I/F \n"));
+		*type = 1;
+		return BCME_OK;
+	}
+#endif
+
+exit:
+	return BCME_BADARG;
+}
+
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = NULL;
+
+	if (!cfg || !cfg->p2p)
+		return BCME_ERROR;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+		wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		if (cfg->afx_hdl->is_listen == TRUE &&
+			wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_DBG(("Listen DONE for action frame\n"));
+			complete(&cfg->act_frm_scan);
+		}
+#ifdef WL_CFG80211_SYNC_GON
+		else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+			WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
+				jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+				wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+			complete(&cfg->wait_next_af);
+		}
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL)) {
+#else
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+			wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL)) {
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			WL_DBG(("Listen DONE for ramain on channel expired\n"));
+			wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+		}
+		if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
+			WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
+			CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+	} else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+	return ret;
+
+}
+
+/*
+ *  Timer expire callback function for LISTEN
+ *  We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ *  so lets do it from thread context.
+ */
+void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+	CFGP2P_DBG((" Enter\n"));
+	bzero(&msg, sizeof(wl_event_msg_t));
+	msg.event_type =  hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net :
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+#else
+	wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg,
+		NULL);
+#endif /* WL_ENABLE_P2P_IF */
+}
+/*
+ *  Routine for cancelling the P2P LISTEN
+ */
+static s32
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+                         struct wireless_dev *wdev, bool notify)
+{
+	WL_DBG(("Enter \n"));
+	/* Irrespective of whether timer is running or not, reset
+	 * the LISTEN state.
+	 */
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		del_timer_sync(&cfg->p2p->listen_timer);
+		if (notify) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (wdev)
+				cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+			if (ndev && ndev->ieee80211_ptr)
+				cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		}
+	}
+	return 0;
+}
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters   :
+ * @cfg          : wl_private data
+ * @channel     : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
+{
+#define EXTRA_DELAY_TIME	100
+	s32 ret = BCME_OK;
+	struct timer_list *_timer;
+	s32 extra_delay;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
+	if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
+
+		CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+		ret = BCME_NOTREADY;
+		goto exit;
+	}
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+		goto exit;
+
+	}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+	if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
+			CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	_timer = &cfg->p2p->listen_timer;
+
+	/*  We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+	 *  otherwise we will wait up to duration_ms + 100ms + duration / 10
+	 */
+	if (ret == BCME_OK) {
+		extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
+	} else {
+		/* if failed to set listen, it doesn't need to wait whole duration. */
+		duration_ms = 100 + duration_ms / 20;
+		extra_delay = 0;
+	}
+
+	INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#undef EXTRA_DELAY_TIME
+exit:
+	return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+
+		CFGP2P_DBG((" do nothing, discovery is off\n"));
+		return ret;
+	}
+	if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
+		CFGP2P_DBG(("already : %d\n", enable));
+		return ret;
+	}
+
+	wl_chg_p2p_status(cfg, SEARCH_ENABLED);
+	/* When disabling Search, reset the WL driver's p2p discovery state to
+	 * WL_P2P_DISC_ST_SCAN.
+	 */
+	if (!enable) {
+		wl_clr_p2p_status(cfg, SCANNING);
+		ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	}
+
+	return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	u32 event_type = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+	struct net_device *ndev = NULL;
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+			if (status == WLC_E_STATUS_SUCCESS) {
+				wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+				CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+			}
+			else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+				wl_set_p2p_status(cfg, ACTION_TX_NOACK);
+				CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+				wl_stop_wait_next_action_frame(cfg, ndev);
+			}
+		} else {
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+						"status : %d\n", status));
+
+			if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+				complete(&cfg->send_af_done);
+		}
+	}
+	return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 evt_ret = BCME_OK;
+	s32 timeout = 0;
+	wl_eventmsg_buf_t buf;
+
+
+	CFGP2P_INFO(("\n"));
+	CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+	    af_params->channel, af_params->dwell_time));
+
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
+		return evt_ret;
+
+	cfg->af_sent_channel  = af_params->channel;
+#ifdef WL_CFG80211_SYNC_GON
+	cfg->af_tx_sent_jiffies = jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (ret < 0) {
+		CFGP2P_ERR((" sending action frame is failed\n"));
+		goto exit;
+	}
+
+	timeout = wait_for_completion_timeout(&cfg->send_af_done,
+		msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
+
+	if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+		CFGP2P_INFO(("tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+		CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else {
+		ret = BCME_ERROR;
+		CFGP2P_INFO(("tx action frame operation is failed\n"));
+	}
+	/* clear status bit for action tx */
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+exit:
+	CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
+		WL_ERR(("TX frame events revert back failed \n"));
+		return evt_ret;
+	}
+
+	return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+            struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+	memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+	memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+	/* Generate the P2P Device Address.  This consists of the device's
+	 * primary MAC address with the locally administered bit set.
+	 */
+	memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+	out_dev_addr->octet[0] |= 0x02;
+
+	/* Generate the P2P Interface Address.  If the discovery and connection
+	 * BSSCFGs need to simultaneously co-exist, then this address must be
+	 * different from the P2P Device Address.
+	 */
+	memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+	out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+	u16 len = ie->len;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+	CFGP2P_DBG((" Enter\n"));
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+	/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			if (subelt_id == P2P_SEID_INTINTADDR) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_INFO) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_GROUP_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+			}			return;
+		} else {
+			CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+		}
+		subel += subelt_len;
+	}
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup().  DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+	s32 result, val;
+	bool isup = false;
+	s8 getbuf[64];
+
+	/* Check if the BSS is up */
+	*(int*)getbuf = -1;
+	result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+		sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+	if (result != 0) {
+		CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+		CFGP2P_ERR(("NOTE: this ioctl error is normal "
+					"when the BSS has not been created yet.\n"));
+	} else {
+		val = *(int*)getbuf;
+		val = dtoh32(val);
+		CFGP2P_INFO(("---cfg bss -C %d   ==> %d\n", bsscfg_idx, val));
+		isup = (val ? TRUE : FALSE);
+	}
+	return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+	s32 ret = BCME_OK;
+	s32 val = up ? 1 : 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+	CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (ret != 0) {
+		CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+	}
+
+	return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	s32 ret = BCME_OK;
+	s32 p2p_supported = 0;
+	ret = wldev_iovar_getint(ndev, "p2p",
+	               &p2p_supported);
+	if (ret < 0) {
+		if (ret == BCME_UNSUPPORTED) {
+			CFGP2P_INFO(("p2p is unsupported\n"));
+			return 0;
+		} else {
+			CFGP2P_ERR(("cfg p2p error %d\n", ret));
+			return ret;
+		}
+	}
+	if (p2p_supported == 1) {
+		CFGP2P_INFO(("p2p is supported\n"));
+	} else {
+		CFGP2P_INFO(("p2p is unsupported\n"));
+		p2p_supported = 0;
+	}
+	return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = NULL;
+	struct wireless_dev *wdev = NULL;
+	s32 i = 0, index = -1;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wdev = bcmcfg_to_p2p_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+	ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
+	wdev = ndev_to_wdev(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+			index = wl_to_p2p_bss_bssidx(cfg, i);
+			if (index != WL_INVALID)
+				wl_cfgp2p_clear_management_ie(cfg, index);
+	}
+	wl_cfgp2p_deinit_priv(cfg);
+	return 0;
+}
+s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	s32 ret = -1;
+	int count, start, duration;
+	wl_p2p_sched_t dongle_noa;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+	if (cfg->p2p && cfg->p2p->vif_created) {
+
+		cfg->p2p->noa.desc[0].start = 0;
+
+		sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
+		CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+			count, start, duration));
+		if (count != -1)
+			cfg->p2p->noa.desc[0].count = count;
+
+		/* supplicant gives interval as start */
+		if (start != -1)
+			cfg->p2p->noa.desc[0].interval = start;
+
+		if (duration != -1)
+			cfg->p2p->noa.desc[0].duration = duration;
+
+		if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) {
+			cfg->p2p->noa.desc[0].start = 200;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+		}
+		else if (cfg->p2p->noa.desc[0].count == 0) {
+			cfg->p2p->noa.desc[0].start = 0;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+		}
+		else {
+			/* Continuous NoA interval. */
+			dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			if ((cfg->p2p->noa.desc[0].interval == 102) ||
+				(cfg->p2p->noa.desc[0].interval == 100)) {
+				cfg->p2p->noa.desc[0].start = 100 -
+					cfg->p2p->noa.desc[0].duration;
+				dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+			}
+			else {
+				dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			}
+		}
+		/* Put the noa descriptor in dongle format for dongle */
+		dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
+		if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
+		}
+		else {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
+		}
+		dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+
+		ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION),
+			"p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf,
+			WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+	}
+	return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
+{
+
+	wifi_p2p_noa_desc_t *noa_desc;
+	int len = 0, i;
+	char _buf[200];
+
+	CFGP2P_DBG((" Enter\n"));
+	buf[0] = '\0';
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
+			_buf[0] = 1; /* noa index */
+			_buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+				(cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
+			len += 2;
+			if (cfg->p2p->noa.desc[0].count) {
+				noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+				noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+				noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+				noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+				noa_desc->start = cfg->p2p->noa.desc[0].start;
+				len += sizeof(wifi_p2p_noa_desc_t);
+			}
+			if (buf_len <= len * 2) {
+				CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+					"returning noa in string format\n", buf_len));
+				return -1;
+			}
+			/* We have to convert the buffer data into ASCII strings */
+			for (i = 0; i < len; i++) {
+				snprintf(buf, 3, "%02x", _buf[i]);
+				buf += 2;
+			}
+			buf[i*2] = '\0';
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+		return -1;
+	}
+	return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int ps, ctw;
+	int ret = -1;
+	s32 legacy_ps;
+	struct net_device *dev;
+
+	CFGP2P_DBG((" Enter\n"));
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
+		CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+		dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+		if (ctw != -1) {
+			cfg->p2p->ops.ctw = ctw;
+			ret = 0;
+		}
+		if (ps != -1) {
+			cfg->p2p->ops.ops = ps;
+			ret = wldev_iovar_setbuf(dev,
+				"p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (ret < 0) {
+				CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+			}
+		}
+
+		if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+			ret = wldev_ioctl(dev,
+				WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
+			if (unlikely(ret))
+				CFGP2P_ERR(("error (%d)\n", ret));
+			wl_cfg80211_update_power_mode(dev);
+		}
+		else
+			CFGP2P_ERR(("ilegal setting\n"));
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+		ret = -1;
+	}
+	return ret;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = NULL;
+	u16 len = 0;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+
+	if (!buf) {
+		WL_ERR(("P2P IE not present"));
+		return 0;
+	}
+
+	ie = (wifi_p2p_ie_t*) buf;
+	len = ie->len;
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+		/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			/* This will point to start of subelement attrib after
+			 * attribute id & len
+			 */
+			return subel;
+		}
+
+		/* Go to next subelement */
+		subel += subelt_len;
+	}
+
+	/* Not Found */
+	return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT	0x01
+
+u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
+{
+	bcm_tlv_t *ie;
+	u8* pAttrib;
+
+	CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
+			/* Have the P2p ie. Now check for attribute */
+			if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
+				CFGP2P_INFO(("P2P attribute %d was found at parse %p",
+					attrib, parse));
+				return pAttrib;
+			}
+			else {
+				parse += (ie->len + TLV_HDR_LEN);
+				len -= (ie->len + TLV_HDR_LEN);
+				CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
+					" to %p len to %d", attrib, parse, len));
+			}
+		}
+		else {
+			/* It was not p2p IE. parse will get updated automatically to next TLV */
+			CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+		}
+	}
+	CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
+	return NULL;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+	u8 *capability = NULL;
+	bool p2p_go	= 0;
+	u8 *ptr = NULL;
+
+	if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
+		WL_ERR(("P2P Capability attribute not found"));
+		return NULL;
+	}
+
+	/* Check Group capability for Group Owner bit */
+	p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+	if (!p2p_go) {
+		return bi->BSSID.octet;
+	}
+
+	/* In probe responses, DEVICE INFO attribute will be present */
+	if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length,  P2P_SEID_DEV_INFO))) {
+		/* If DEVICE_INFO is not found, this might be a beacon frame.
+		 * check for DEVICE_ID in the beacon frame.
+		 */
+		ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+		bi->ie_length,  P2P_SEID_DEV_ID);
+	}
+
+	if (!ptr)
+		WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+	return ptr;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	snprintf(info->driver, sizeof(info->driver), "p2p");
+	snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
+}
+
+struct ethtool_ops cfgp2p_ethtool_ops = {
+	.get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	struct net_device* net = NULL;
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+	struct wireless_dev *wdev = NULL;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+	if (cfg->p2p_net) {
+		CFGP2P_ERR(("p2p_net defined already.\n"));
+		return -EINVAL;
+	}
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
+		CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		free_netdev(net);
+		return -ENOMEM;
+	}
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
+	net->name[IFNAMSIZ - 1] = '\0';
+
+	/* Copy the reference to bcm_cfg80211 */
+	memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->do_ioctl = wl_cfgp2p_do_ioctl;
+	net->hard_start_xmit = wl_cfgp2p_start_xmit;
+	net->open = wl_cfgp2p_if_open;
+	net->stop = wl_cfgp2p_if_stop;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+	/* Register with a dummy MAC addr */
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+	wdev->wiphy = cfg->wdev->wiphy;
+
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+	net->ieee80211_ptr = wdev;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &cfgp2p_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+	SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+	/* Associate p2p0 network interface with new wdev */
+	wdev->netdev = net;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	ret = register_netdev(net);
+	if (ret) {
+		CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+		free_netdev(net);
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+		kfree(wdev);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+		return -ENODEV;
+	}
+
+	/* store p2p net ptr for further reference. Note that iflist won't have this
+	 * entry as there corresponding firmware interface is a "Hidden" interface.
+	 */
+#ifndef	WL_NEWCFG_PRIVCMD_SUPPORT
+	cfg->p2p_wdev = wdev;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+	cfg->p2p_net = net;
+
+	printf("%s: P2P Interface Registered\n", net->name);
+
+	return ret;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
+{
+
+	if (!cfg || !cfg->p2p_net) {
+		CFGP2P_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	unregister_netdev(cfg->p2p_net);
+	free_netdev(cfg->p2p_net);
+
+	return 0;
+}
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+
+	if (skb)
+	{
+		CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
+			ndev->name));
+		dev_kfree_skb_any(skb);
+	}
+
+	return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* There is no ifidx corresponding to p2p0 in our firmware. So we should
+	 * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+	 * For Android PRIV CMD handling map it to primary I/F
+	 */
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+	} else {
+		CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+		__FUNCTION__, cmd));
+		return -1;
+	}
+
+	return ret;
+}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT  */
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev || !wl_cfg80211_is_p2p_active())
+		return -EINVAL;
+	WL_TRACE(("Enter\n"));
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	/* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+	 * do it here. This will make sure that in concurrent mode, supplicant
+	 * is not dependent on a particular order of interface initialization.
+	 * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+	 * -iwlan0.
+	 */
+	wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	wl_cfg80211_do_driver_init(net);
+
+	return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev)
+		return -EINVAL;
+
+	wl_cfg80211_scan_stop(net);
+
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+					& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO)));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	return 0;
+}
+
+bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
+{
+	return (if_ops == &wl_cfgp2p_if_ops);
+}
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = NULL;
+	struct ether_addr primary_mac;
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+	WL_TRACE(("Enter\n"));
+
+	if (cfg->p2p_wdev) {
+		CFGP2P_ERR(("p2p_wdev defined already.\n"));
+#if (defined(CUSTOMER_HW10) && defined(CONFIG_ARCH_ODIN))
+		wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+		CFGP2P_ERR(("p2p_wdev deleted.\n"));
+#else
+		return ERR_PTR(-ENFILE);
+#endif 
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&primary_mac, 0, sizeof(primary_mac));
+	get_primary_mac(cfg, &primary_mac);
+	wl_cfgp2p_generate_bss_mac(&primary_mac,
+		&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+	wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+	memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+	if (cfg->p2p_net)
+		memcpy(cfg->p2p_net->dev_addr, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
+	/* store p2p wdev ptr for further reference. */
+	cfg->p2p_wdev = wdev;
+
+	CFGP2P_ERR(("P2P interface registered\n"));
+
+	return wdev;
+}
+
+int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfgp2p_set_firm_p2p(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	p2p_on(cfg) = true;
+
+	CFGP2P_DBG(("P2P interface started\n"));
+
+exit:
+	return ret;
+}
+
+void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfg80211_scan_stop(wdev);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+	}
+
+	if (!cfg->p2p)
+		return;
+
+	ret = wl_cfgp2p_disable_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
+	}
+
+	p2p_on(cfg) = false;
+
+	CFGP2P_DBG(("P2P interface stopped\n"));
+
+	return;
+}
+
+int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
+{
+	bool rollback_lock = false;
+
+	if (!wdev)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	if (!rtnl_is_locked()) {
+		rtnl_lock();
+		rollback_lock = true;
+	}
+
+	cfg80211_unregister_wdev(wdev);
+
+	if (rollback_lock)
+		rtnl_unlock();
+
+	kfree(wdev);
+
+	if (cfg)
+		cfg->p2p_wdev = NULL;
+
+	CFGP2P_ERR(("P2P interface unregistered\n"));
+
+	return 0;
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 0000000000000000000000000000000000000000..d35780c41f54e222aec47edcf3f22c8c0f5005f5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,398 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfgp2p.h 472818 2014-04-25 08:07:56Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct bcm_cfg80211;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library.  Do not
+ * confuse this with a bsscfg index.  This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+typedef enum {
+	P2P_SCAN_PURPOSE_MIN,
+	P2P_SCAN_SOCIAL_CHANNEL, /* scan for social channel */
+	P2P_SCAN_AFX_PEER_NORMAL, /* scan for action frame search */
+	P2P_SCAN_AFX_PEER_REDUCED, /* scan for action frame search with short time */
+	P2P_SCAN_DURING_CONNECTED, /* scan during connected status */
+	P2P_SCAN_CONNECT_TRY, /* scan for connecting */
+	P2P_SCAN_NORMAL, /* scan during not-connected status */
+	P2P_SCAN_PURPOSE_MAX
+} p2p_scan_purpose_t;
+
+/* vendor ies max buffer length for probe response or beacon */
+#define VNDR_IES_MAX_BUF_LEN	1400
+/* normal vendor ies buffer length */
+#define VNDR_IES_BUF_LEN 		512
+
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+	u8  p2p_probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8  p2p_assoc_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_assoc_res_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 p2p_probe_req_ie_len;
+	u32 p2p_probe_res_ie_len;
+	u32 p2p_assoc_req_ie_len;
+	u32 p2p_assoc_res_ie_len;
+	u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+	u32 bssidx;
+	struct net_device *dev;
+	struct p2p_saved_ie saved_ie;
+	void *private_data;
+};
+
+struct p2p_info {
+	bool on;    /* p2p on/off switch */
+	bool scan;
+	int16 search_state;
+	bool vif_created;
+	s8 vir_ifname[IFNAMSIZ];
+	unsigned long status;
+	struct ether_addr dev_addr;
+	struct ether_addr int_addr;
+	struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
+	struct timer_list listen_timer;
+	wl_p2p_sched_t noa;
+	wl_p2p_ops_t ops;
+	wlc_ssid_t ssid;
+};
+
+#define MAX_VNDR_IE_NUMBER	5
+
+struct parsed_vndr_ie_info {
+	char *ie_ptr;
+	u32 ie_len;	/* total length including id & length field */
+	vndr_ie_t vndrie;
+};
+
+struct parsed_vndr_ies {
+	u32 count;
+	struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+	WLP2P_STATUS_DISCOVERY_ON = 0,
+	WLP2P_STATUS_SEARCH_ENABLED,
+	WLP2P_STATUS_IF_ADDING,
+	WLP2P_STATUS_IF_DELETING,
+	WLP2P_STATUS_IF_CHANGING,
+	WLP2P_STATUS_IF_CHANGED,
+	WLP2P_STATUS_LISTEN_EXPIRED,
+	WLP2P_STATUS_ACTION_TX_COMPLETED,
+	WLP2P_STATUS_ACTION_TX_NOACK,
+	WLP2P_STATUS_SCANNING,
+	WLP2P_STATUS_GO_NEG_PHASE,
+	WLP2P_STATUS_DISC_IN_PROGRESS
+};
+
+
+#define wl_to_p2p_bss_ndev(cfg, type)		((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type)		((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(cfg, type)	((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type)		((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type)			((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+	change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+
+#define CFGP2P_ERROR_TEXT		"CFGP2P-ERROR) "
+
+
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printf(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__);	\
+			printf args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_INFO(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_INFO) {				\
+			printf(KERN_INFO "CFGP2P-INFO) %s : ", __func__);	\
+			printf args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_DBG(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_DBG) {			\
+			printf(KERN_DEBUG "CFGP2P-DEBUG) %s :", __func__);	\
+			printf args;							\
+		}									\
+	} while (0)
+
+#define	CFGP2P_ACTION(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_P2P_ACTION) {			\
+			printf(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__);	\
+			printf args;							\
+		}									\
+	} while (0)
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {				   \
+		init_timer(timer); \
+		timer->function = func; \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		timer->data = (unsigned long) cfg; \
+		add_timer(timer); \
+	} while (0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
+#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#else
+#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
+/* Enable P2P network Interface if P2P support is enabled */
+#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WLP2P */
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#ifdef WL_NEWCFG_PRIVCMD_SUPPORT
+#undef WL_NEWCFG_PRIVCMD_SUPPORT
+#endif
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
+#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \
+	or kernel version is 3.8.0 or above
+#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */
+
+#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF))
+#error WLP2P not defined
+#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define bcm_struct_cfgdev	struct wireless_dev
+#else
+#define bcm_struct_cfgdev	struct net_device
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+extern void
+wl_cfgp2p_listen_expired(unsigned long data);
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
+extern s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
+extern void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
+            u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+	u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
+	u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+            s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index);
+extern struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
+extern s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+
+extern s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+            struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
+
+extern bool
+wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
+
+extern int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+					(channel == SOCIAL_CHAN_2) || \
+					(channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+
+/* If the provision discovery is for JOIN operations,
+ * or the device discoverablity frame is destined to GO
+ * then we need not do an internal scan to find GO.
+ */
+#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \
+	(wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL)
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+					((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+					(frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+
+#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \
+							((subtype == P2P_PAF_GON_CONF) || \
+							(subtype == P2P_PAF_INVITE_RSP) || \
+							(subtype == P2P_PAF_PROVDIS_RSP)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \
+					(len == WL_P2P_WILDCARD_SSID_LEN))
+#endif				/* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
new file mode 100644
index 0000000000000000000000000000000000000000..a98c2c2b1f2a4ff93743d35fd214a2721bc03565
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
@@ -0,0 +1,37 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfgvendor.h 455257 2014-02-20 08:10:24Z $
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+ */
+
+#ifndef _wl_cfgvendor_h_
+#define _wl_cfgvendor_h_
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) && !defined(VENDOR_EXT_SUPPORT)
+#define VENDOR_EXT_SUPPORT
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && !VENDOR_EXT_SUPPORT */
+
+enum wl_vendor_event {
+	BRCM_VENDOR_EVENT_UNSPEC,
+	BRCM_VENDOR_EVENT_PRIV_STR
+};
+
+/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
+#define BRCM_VENDOR_SCMD_CAPA	"cap"
+
+#ifdef VENDOR_EXT_SUPPORT
+extern int cfgvendor_attach(struct wiphy *wiphy);
+extern int cfgvendor_detach(struct wiphy *wiphy);
+#else
+static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; }
+static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; }
+#endif /*  VENDOR_EXT_SUPPORT */
+
+#endif /* _wl_cfgvendor_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 0000000000000000000000000000000000000000..2e36495ff469e6fe6cc5fb36c1b230e6a64f21c8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,392 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_dbg.h 472390 2014-04-23 23:32:01Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+/* wl_msg_level is a bit vector with defs in wlioctl.h */
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#if 0 && (VERSION_MAJOR > 9)
+extern int osl_printf(const char *fmt, ...);
+#include <IOKit/apple80211/IO8Log.h>
+#define WL_PRINT(args)		do { osl_printf args; } while (0)
+#define RELEASE_PRINT(args)	do { WL_PRINT(args); IO8Log args; } while (0)
+#else
+#define WL_PRINT(args)		do { WL_TIMESTAMP(); printf args; } while (0)
+#endif 
+
+#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
+#define _WL_SRSCAN(fmt, ...)	EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
+#define WL_SRSCAN(args)		_WL_SRSCAN args
+#else
+#define WL_SRSCAN(args)
+#endif
+
+#if defined(BCMCONDITIONAL_LOGGING)
+
+/* Ideally this should be some include file that vendors can include to conditionalize logging */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+#define WL_ERROR(args)		do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args);} while (0)
+#define WL_TRACE(args)
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#define WL_PRPKT(m, b, n)
+#define WL_INFORM(args)
+#define WL_TMP(args)
+#define WL_OID(args)
+#define WL_RATE(args)		do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args)		do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args);} while (0)
+#define WL_PRUSR(m, b, n)
+#define WL_PS(args)		do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)	do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0)
+
+#define WL_MPC(args)
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_PROTO(args)
+
+#define	WL_CAC(args)		do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+#define WL_DFS(args)
+#define WL_WOWL(args)
+#define WL_DPT(args)
+#define WL_ASSOC_OR_DPT(args)
+#define WL_SCAN(args)		do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#define WL_BTA(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_TXBF(args)
+#define WL_P2PO(args)
+#define WL_NET_DETECT(args)
+#define WL_ROAM(args)
+#define WL_WNM(args)
+
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON()       0
+#define WL_AMPDU_HW_ON()        0
+#define WL_AMPDU_HWTXS_ON()     0
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#define WL_PCIE(args)
+#define WL_CHANLOG(w, s, i, j)
+
+#define WL_ERROR_ON()		(wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON()		0
+#define WL_PRHDRS_ON()		0
+#define WL_PRPKT_ON()		0
+#define WL_INFORM_ON()		0
+#define WL_TMP_ON()		0
+#define WL_OID_ON()		0
+#define WL_RATE_ON()		(wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON()		(wl_msg_level & WL_ASSOC_VAL)
+#define WL_PRUSR_ON()		0
+#define WL_PS_ON()		(wl_msg_level & WL_PS_VAL)
+#define WL_PORT_ON()		0
+#define WL_WSEC_ON()		0
+#define WL_WSEC_DUMP_ON()	0
+#define WL_MPC_ON()		0
+#define WL_REGULATORY_ON()	(wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON()		0
+#define WL_DFS_ON()		0
+#define WL_MBSS_ON()		0
+#define WL_CAC_ON()		(wl_msg_level & WL_CAC_VAL)
+#define WL_AMPDU_ON()		0
+#define WL_DPT_ON()		0
+#define WL_WOWL_ON()		0
+#define WL_SCAN_ON()		(wl_msg_level2 & WL_SCAN_VAL)
+#define WL_BTA_ON()		0
+#define WL_P2P_ON()		0
+#define WL_ITFR_ON()		0
+#define WL_MCHAN_ON()		0
+#define WL_TDLS_ON()		0
+#define WL_MCNX_ON()		0
+#define WL_PROT_ON()		0
+#define WL_PSTA_ON()		0
+#define WL_TRF_MGMT_ON()	0
+#define WL_LPC_ON()		0
+#define WL_L2FILTER_ON()	0
+#define WL_TXBF_ON()		0
+#define WL_P2PO_ON()		0
+#define WL_CHANLOG_ON()		0
+#define WL_NET_DETECT_ON()	0
+#define WL_WNM_ON()		0
+#define WL_PCIE_ON()		0
+
+#else /* !BCMDBG */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+
+#define	WL_ERROR(args)
+#define	WL_TRACE(args)
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#ifdef WLMSG_PRHDRS
+#define	WL_PRHDRS_MSG(args)		WL_PRINT(args)
+#define WL_PRHDRS(i, p, f, t, r, l)	wlc_print_hdrs(i, p, f, t, r, l)
+#else
+#define	WL_PRHDRS_MSG(args)
+#define	WL_PRHDRS(i, p, f, t, r, l)
+#endif
+#ifdef WLMSG_PRPKT
+#define	WL_PRPKT(m, b, n)	prhex(m, b, n)
+#else
+#define	WL_PRPKT(m, b, n)
+#endif
+#ifdef WLMSG_INFORM
+#define	WL_INFORM(args)		WL_PRINT(args)
+#else
+#define	WL_INFORM(args)
+#endif
+#define	WL_TMP(args)
+#ifdef WLMSG_OID
+#define WL_OID(args)		WL_PRINT(args)
+#else
+#define WL_OID(args)
+#endif
+#define	WL_RATE(args)
+#ifdef WLMSG_ASSOC
+#define	WL_ASSOC(args)		WL_PRINT(args)
+#else
+#define	WL_ASSOC(args)
+#endif
+#define	WL_PRUSR(m, b, n)
+#ifdef WLMSG_PS
+#define WL_PS(args)		WL_PRINT(args)
+#else
+#define WL_PS(args)
+#endif
+#ifdef WLMSG_ROAM
+#define WL_ROAM(args)	WL_PRINT(args)
+#else
+#define WL_ROAM(args)
+#endif
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)
+
+#ifdef WLMSG_MPC
+#define WL_MPC(args)		WL_PRINT(args)
+#else
+#define WL_MPC(args)
+#endif
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_MODE_SWITCH(args)
+#define	WL_PROTO(args)
+
+#define	WL_CAC(args)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+/* Define WLMSG_DFS automatically for WLTEST builds */
+
+#ifdef WLMSG_DFS
+#define WL_DFS(args)		do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0)
+#else /* WLMSG_DFS */
+#define WL_DFS(args)
+#endif /* WLMSG_DFS */
+#define WL_WOWL(args)
+#ifdef WLMSG_SCAN
+#define WL_SCAN(args)		WL_PRINT(args)
+#else
+#define WL_SCAN(args)
+#endif
+#define	WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#ifdef WLMSG_BTA
+#define WL_BTA(args)		WL_PRINT(args)
+#else
+#define WL_BTA(args)
+#endif
+#define WL_WMF(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TBTT(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_P2PO(args)
+#define WL_WNM(args)
+#define WL_TXBF(args)
+#define WL_CHANLOG(w, s, i, j)
+#define WL_NET_DETECT(args)
+
+#define WL_ERROR_ON()		0
+#define WL_TRACE_ON()		0
+#ifdef WLMSG_PRHDRS
+#define WL_PRHDRS_ON()		1
+#else
+#define WL_PRHDRS_ON()		0
+#endif
+#ifdef WLMSG_PRPKT
+#define WL_PRPKT_ON()		1
+#else
+#define WL_PRPKT_ON()		0
+#endif
+#ifdef WLMSG_INFORM
+#define WL_INFORM_ON()		1
+#else
+#define WL_INFORM_ON()		0
+#endif
+#ifdef WLMSG_OID
+#define WL_OID_ON()		1
+#else
+#define WL_OID_ON()		0
+#endif
+#define WL_TMP_ON()		0
+#define WL_RATE_ON()		0
+#ifdef WLMSG_ASSOC
+#define WL_ASSOC_ON()		1
+#else
+#define WL_ASSOC_ON()		0
+#endif
+#define WL_PORT_ON()		0
+#ifdef WLMSG_WSEC
+#define WL_WSEC_ON()		1
+#define WL_WSEC_DUMP_ON()	1
+#else
+#define WL_WSEC_ON()		0
+#define WL_WSEC_DUMP_ON()	0
+#endif
+#ifdef WLMSG_MPC
+#define WL_MPC_ON()		1
+#else
+#define WL_MPC_ON()		0
+#endif
+#define WL_REGULATORY_ON()	0
+
+#define WL_APSTA_ON()		0
+#define WL_BA_ON()		0
+#define WL_MBSS_ON()		0
+#define WL_MODE_SWITCH_ON()		0
+#ifdef WLMSG_DFS
+#define WL_DFS_ON()		1
+#else /* WLMSG_DFS */
+#define WL_DFS_ON()		0
+#endif /* WLMSG_DFS */
+#ifdef WLMSG_SCAN
+#define WL_SCAN_ON()            1
+#else
+#define WL_SCAN_ON()            0
+#endif
+#ifdef WLMSG_BTA
+#define WL_BTA_ON()		1
+#else
+#define WL_BTA_ON()		0
+#endif
+#define WL_WMF_ON()		0
+#define WL_P2P_ON()		0
+#define WL_MCHAN_ON()		0
+#define WL_TDLS_ON()		0
+#define WL_MCNX_ON()		0
+#define WL_PROT_ON()		0
+#define WL_TBTT_ON()		0
+#define WL_PWRSEL_ON()		0
+#define WL_L2FILTER_ON()	0
+#define WL_MQ_ON()		0
+#define WL_P2PO_ON()		0
+#define WL_TXBF_ON()            0
+#define WL_CHANLOG_ON()		0
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON()       0
+#define WL_AMPDU_HW_ON()        0
+#define WL_AMPDU_HWTXS_ON()     0
+
+#define WL_WNM_ON()		0
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args)		WL_PRINT(args)
+#define WL_WSEC_DUMP(args)	WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+#define WL_PCIE(args)		do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON()		(wl_msg_level2 & WL_PCIE_VAL)
+#endif 
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif /* _wl_dbg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 0000000000000000000000000000000000000000..32bd8e59d270dd97d2975a9ec3eafe846b6bfc13
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,3841 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+typedef const struct si_pub	si_t;
+#include <wlioctl.h>
+
+
+#include <wl_dbg.h>
+#include <wl_iw.h>
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1	0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4	0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
+#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
+#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_FT_PSK
+#define IW_AUTH_KEY_MGMT_FT_PSK 0x08
+#endif
+
+#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE
+#define IW_ENC_CAPA_FW_ROAM_ENABLE	0x00000020
+#endif
+
+
+/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest
+ * version 22.
+ */
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
+#endif
+/* End FC9. */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;  /* apsta AP netdev waiter thread */
+#endif /* SOFTAP */
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+	uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle.  Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif /* WIRELESS_EXT < 19 */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	/* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	struct task_struct *kthread;
+#endif
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+
+
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+
+/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
+typedef struct priv_link {
+	wl_iw_t *wliw;
+} priv_link_t;
+
+/* dev to priv_link */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev)       (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev)       (priv_link_t*)netdev_priv(dev)
+#endif
+
+/* dev to wl_iw_t */
+#define IW_DEV_IF(dev)          ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+
+	strcpy(ifr.ifr_name, dev->name);
+	ifr.ifr_data = (caddr_t) &ioc;
+
+	fs = get_fs();
+	set_fs(get_ds());
+#if defined(WL_USE_NETDEV_OPS)
+	ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+	ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+	set_fs(fs);
+
+	return ret;
+}
+
+/*
+set named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_set(dev, "arate", rate)
+*/
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+	char *ioctlbuf;
+	uint buflen;
+	int error;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(buflen);
+	error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+	kfree(ioctlbuf);
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_bufvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+	char *ioctlbuf;
+	int error;
+
+	uint len;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(len);
+	BCM_REFERENCE(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	kfree(ioctlbuf);
+	return (error);
+}
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+/* Maintain backward compatibility */
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		/* Wireless Extension command */
+	__u16		flags;		/* More to come ;-) */
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+	void *wrqu, char *extra);
+#endif /* WIRELESS_EXT < 13 */
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int dc = *(int *)extra;
+	int error;
+
+	error = dev_wlc_intvar_set(dev, "leddc", dc);
+	return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int mode = *(int *)extra;
+	int error;
+
+	mode = htod32(mode);
+	error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+	return error;
+}
+
+static int
+wl_iw_set_pm(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int pm = *(int *)extra;
+	int error;
+
+	pm = htod32(pm);
+	error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+	return error;
+}
+
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strcpy(extra, flag);
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *cwrq,
+	char *extra
+)
+{
+	int phytype, err;
+	uint band[3];
+	char cap[5];
+
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	cap[0] = 0;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+		goto done;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+		goto done;
+
+	band[0] = dtoh32(band[0]);
+	switch (phytype) {
+		case WLC_PHY_TYPE_A:
+			strcpy(cap, "a");
+			break;
+		case WLC_PHY_TYPE_B:
+			strcpy(cap, "b");
+			break;
+		case WLC_PHY_TYPE_LP:
+		case WLC_PHY_TYPE_G:
+			if (band[0] >= 2)
+				strcpy(cap, "abg");
+			else
+				strcpy(cap, "bg");
+			break;
+		case WLC_PHY_TYPE_N:
+			if (band[0] >= 2)
+				strcpy(cap, "abgn");
+			else
+				strcpy(cap, "bgn");
+			break;
+	}
+done:
+	snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+
+	WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+	/* Setting by channel number */
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+
+	/* Setting by frequency */
+	else {
+		/* Convert to MHz as best we can */
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+	/* handle 4.9GHz frequencies as Japan 4 GHz based channelization */
+	if (fwrq->m > 4000 && fwrq->m < 5000)
+		sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */
+
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+	chan = htod32(chan);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+
+	/* Return radio channel in channel form */
+	fwrq->m = dtoh32(ci.hw_channel);
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	static int channels[MAXCHANNEL+1];
+	wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+	wl_rateset_t rateset;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+	int fbt_cap = 0;
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(*range));
+
+	/* We don't use nwids */
+	range->min_nwid = range->max_nwid = 0;
+
+	/* Set available channels/frequencies */
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+		return error;
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	/* Link quality (use NDIS cutoffs) */
+	range->max_qual.qual = 5;
+	/* Signal level (use RSSI) */
+	range->max_qual.level = 0x100 - 200;	/* -200 dBm */
+	/* Noise level (use noise) */
+	range->max_qual.noise = 0x100 - 200;	/* -200 dBm */
+	/* Signal level threshold range (?) */
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	/* Link quality (use NDIS cutoffs) */
+	range->avg_qual.qual = 3;
+	/* Signal level (use RSSI) */
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	/* Noise level (use noise) */
+	range->avg_qual.noise = 0x100 - 75;	/* -75 dBm */
+#endif /* WIRELESS_EXT > 11 */
+
+	/* Set available bitrates */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */
+	if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode)))
+		return error;
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+		return error;
+	if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
+		(phytype == WLC_PHY_TYPE_LCN40))) {
+		if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
+			return error;
+		if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
+			return error;
+		if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t))))
+			return error;
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		ASSERT(range->num_bitrates < IW_MAX_BITRATES);
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			/* convert to bps */
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	/* Set an indication of the max TCP throughput
+	 * in bit/s that we can expect using this interface.
+	 * May be use for QoS stuff... Jean II
+	 */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+		return error;
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	/* 24 Mbits/s */
+	else
+		range->throughput = 1500000;	/* 1.5 Mbits/s */
+
+	/* RTS and fragmentation thresholds */
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	/* Do not support power micro-management */
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	/* Transmit Power - values are in mW */
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	/* Only support retry limits */
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	/* SRL and LRL limits */
+	range->min_retry = 1;
+	range->max_retry = 255;
+	/* Retry lifetime limits unsupported */
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif /* WIRELESS_EXT > 10 */
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+	/* Determine driver FBT capability. */
+	if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+		if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+			/* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */
+			range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+		}
+	}
+
+#ifdef BCMFW_ROAM_ENABLE_WEXT
+	/* Advertise firmware roam capability to the external supplicant */
+	range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE;
+#endif /* BCMFW_ROAM_ENABLE_WEXT */
+
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+	/* Event capability (driver) */
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+	/* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */
+	range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif /* WIRELESS_EXT > 17 */
+
+	return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	/* Ignore "auto" or "off" */
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+		}
+		return 0;
+	}
+	/* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
+	 * eabuf)));
+	 */
+	/* Reassociate to the specified AP */
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	/* Ignore error (may be down or disassociated) */
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+		return error;
+	}
+
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan = g_iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_aplist(dev, info, dwrq, extra);
+	}
+
+	buf = iscan->list_hdr;
+	/* Get scan results (too large to put on the stack) */
+	while (buf) {
+	    list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+	    ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	    bi = NULL;
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	    }
+	    buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 13
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+	/* Ignore error (most likely scan in progress) */
+	(void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_set_scan(dev, info, wrqu, extra);
+	}
+	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+		return 0;
+	}
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+	wl_iw_set_event_mask(dev);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+	iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+	add_timer(&iscan->timer);
+	iscan->timer_on = 1;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPA entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpaie;
+
+	/* If the contents match the WPA_OUI and type=1 */
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPS entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpsie;
+
+	/* If the contents match the WPA_OUI and type=4 */
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+	size_t len, int uppercase)
+{
+	size_t i;
+	char *pos = buf, *end = buf + buf_size;
+	int ret;
+	if (buf_size == 0)
+		return 0;
+	for (i = 0; i < len; i++) {
+		ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+			data[i]);
+		if (ret < 0 || ret >= end - pos) {
+			end[-1] = '\0';
+			return pos - buf;
+		}
+		pos += ret;
+	}
+	end[-1] = '\0';
+	return pos - buf;
+}
+
+/**
+ * wpa_snprintf_hex - Print data as a hex string into a buffer
+ * @buf: Memory area to use as the output buffer
+ * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
+ * @data: Data to be printed
+ * @len: Length of data in bytes
+ * Returns: Number of bytes written
+ */
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+	return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif /* BCMWAPI_WPI */
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+#ifdef BCMWAPI_WPI
+	char *buf;
+	int custom_event_len;
+#endif
+
+	event = *event_p;
+	if (bi->ie_length) {
+		/* look for wpa/rsn ies in the ie list... */
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		int ptr_len = bi->ie_length;
+
+		/* OSEN IE */
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
+			ie->len > WFA_OUI_LEN + 1 &&
+			!bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) &&
+			ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			/* look for WPS IE */
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+#ifdef BCMWAPI_WPI
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+			WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else /* using CUSTOM event */
+			iwe.cmd = IWEVCUSTOM;
+			custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+			iwe.u.data.length = custom_event_len;
+
+			buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+			if (buf == NULL)
+			{
+				WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+				break;
+			}
+
+			memcpy(buf, "wapi_ie=", 8);
+			wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+			wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+			wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+			kfree(buf);
+#endif /* WAPI_IE_USE_GENIE */
+			break;
+		}
+#endif /* BCMWAPI_WPI */
+	*event_p = event;
+	}
+
+#endif /* WIRELESS_EXT > 17 */
+	return 0;
+}
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int error, i, j;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check for scan in progress */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			(CHSPEC_IS2G(bi->chanspec)) ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		 wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count) {
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	}
+
+	kfree(list);
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+
+	/* Check for scan in progress */
+	if (iscan->iscan_state == ISCAN_STATE_SCANING)
+		return -EAGAIN;
+
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	/* Get scan results */
+	while (p_buf != iscan->list_cur) {
+	    list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+	    if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+	    }
+
+	    bi = NULL;
+	    for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* overflow check cover fields before wpa IEs */
+		if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+			IW_EV_QUAL_LEN >= end)
+			return -E2BIG;
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			(CHSPEC_IS2G(bi->chanspec)) ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+			if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+				return -E2BIG;
+
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	    }
+	    p_buf = p_buf->next;
+	} /* while (p_buf) */
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+#endif /* WIRELESS_EXT > 13 */
+
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	/* default Broadcast SSID */
+	memset(&ssid, 0, sizeof(ssid));
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(ssid.SSID, extra, ssid.SSID_len);
+		ssid.SSID_len = htod32(ssid.SSID_len);
+
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid))))
+			return error;
+	}
+	/* If essid null then it is "iwconfig <interface> essid off" command */
+	else {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t))))
+			return error;
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	/* Get the current SSID */
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; /* active */
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check the size of the string */
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	/* Get current rateset */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		/* Select maximum rate */
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		/* Select rate by rateset index */
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		/* Specified rate in bps */
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		/*
+			Set rate override,
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		/*
+			clear rate override
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		/* 0 is for clearing rate override */
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		/* 0 is for clearing rate override */
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		/* Remove rates above selected rate */
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		/* Set current rateset */
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	/* Report the current tx rate */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	/* If Radio is off, nothing more to do */
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	/* Only handle mW */
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	/* Value < 0 means just "on" or "off" */
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	/* Do not handle "off" or "lifetime" */
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	/* Handle "[min|max] limit" */
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+		/* "max limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+			!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+		/* "min limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+			!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      /* Can't be disabled */
+
+	/* Do not handle lifetime queries */
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	/* Get retry limits */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	/* Note : by default, display the min retry number */
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		/* Default to 0 */
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	/* Interpret "off" to mean no encryption */
+	wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	/* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		/* Just select a new current key */
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* Set the new key/index */
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	/* Interpret "restricted" to mean shared key authentication */
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	/* assure default values of zero for things we don't touch */
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	/* Get info */
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	/* Get key length */
+	dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+	/* Get flags */
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		/* Interpret "off" to mean no encryption */
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		/* Interpret "restricted" to mean shared key authentication */
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	/* Get key */
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+	uchar buf[WLC_IOCTL_SMLEN] = {0};
+	uchar *p = buf;
+	int wapi_ie_size;
+
+	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+	if (extra[0] == DOT11_MNG_WAPI_ID)
+	{
+		wapi_ie_size = iwp->length;
+		memcpy(p, extra, iwp->length);
+		dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+	}
+	else
+#endif
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	/* disable encryption completely  */
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	/* get the key index */
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	/* Instead of bcast for ea address for default wep keys, driver needs it to be Null */
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	/* check for key index change */
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			/* change the key index .... */
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		/* key delete */
+		else {
+			swap_key_from_BE(&key);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+			if (error)
+				return error;
+		}
+	}
+	/* This case is used to allow an external 802.1x supplicant
+	 * to pass the PMK to the in-driver supplicant for use in
+	 * the 4-way handshake.
+	 */
+	else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+		int j;
+		wsec_pmk_t pmk;
+		char keystring[WSEC_MAX_PSK_LEN + 1];
+		char* charptr = keystring;
+		uint len;
+
+		/* copy the raw hex key to the appropriate format */
+		for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+			sprintf(charptr, "%02x", iwe->key[j]);
+			charptr += 2;
+		}
+		len = strlen(keystring);
+		pmk.key_len = htod16(len);
+		bcopy(keystring, pmk.key, len);
+		pmk.flags = htod16(WSEC_PASSPHRASE);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+		if (error)
+			return error;
+	}
+
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		/* rx iv */
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+#ifdef BCMWAPI_WPI
+			case IW_ENCODE_ALG_SM4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+					key.flags &= ~WL_PRIMARY_KEY;
+				}
+				break;
+#endif
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+
+#if WIRELESS_EXT > 17
+struct {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+	WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+	}
+	if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		pmkid_list_t pmkid, *pmkidptr;
+		pmkidptr = &pmkid;
+		bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+		{
+			uint j;
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		for (; i < pmkid_list.pmkids.npmkid; i++) {
+			bcopy(&pmkid_array[i+1].BSSID,
+				&pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&pmkid_array[i+1].PMKID,
+				&pmkid_array[i].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		pmkid_list.pmkids.npmkid--;
+	}
+	if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		bcopy(&iwpmksa->bssid.sa_data[0],
+			&pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
+			ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
+			WPA2_PMKID_LEN);
+		{
+			uint j;
+			uint k;
+			k = pmkid_list.pmkids.npmkid;
+			BCM_REFERENCE(k);
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_array[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		pmkid_list.pmkids.npmkid++;
+	}
+	WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+		uint j;
+		WL_TRACE(("PMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_array[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+		printf("\n");
+	}
+	WL_TRACE(("\n"));
+	dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	uint32 cipher_combined;
+	int val = 0;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		dev->name, paramid, paramval));
+
+	switch (paramid) {
+
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+			val = WPA_AUTH_DISABLED;
+		else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+		else if (paramval & IW_AUTH_WAPI_VERSION_1)
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP: {
+		int fbt_cap = 0;
+
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+			iw->pwsec = paramval;
+		}
+		else {
+			iw->gwsec = paramval;
+		}
+
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+
+		cipher_combined = iw->gwsec | iw->pwsec;
+		val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+		if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+			val |= WEP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+			val |= TKIP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+			val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+		val &= ~SMS4_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+			val |= SMS4_ENABLED;
+#endif
+
+		if (iw->privacy_invoked && !val) {
+			WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+			         "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+				WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else if (val) {
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		}
+
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+
+		/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+		 * handshake.
+		 */
+		if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+			if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+				if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+						return error;
+				}
+				else if (val == 0) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+						return error;
+				}
+			}
+		}
+		break;
+	}
+
+	case IW_AUTH_KEY_MGMT:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA_AUTH_PSK;
+			else
+				val = WPA_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA2_AUTH_PSK;
+			else
+				val = WPA2_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+#ifdef BCMWAPI_WPI
+		if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open shared */
+		WL_ERROR(("Setting the D11auth %d\n", paramval));
+		if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			val = 0;
+			WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+			error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+			return error;
+		}
+		else {
+			/* If WPA is enabled, wpa_auth is set elsewhere */
+		}
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED: {
+		int wsec;
+
+		if (paramval == 0) {
+			iw->privacy_invoked = FALSE;
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else {
+			iw->privacy_invoked = TRUE;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+				return error;
+
+			if (!WSEC_ENABLED(wsec)) {
+				/* if privacy is true, but wsec is false, we are a WPS enrollee */
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+					WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			} else {
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+					WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			}
+		}
+		break;
+	}
+
+
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+
+	case IW_AUTH_WAPI_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+		if (paramval) {
+			val |= SMS4_ENABLED;
+			if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+				WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+					__FUNCTION__, val, error));
+				return error;
+			}
+			if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+				WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
+					__FUNCTION__, WAPI_AUTH_UNSPECIFIED,
+					error));
+				return error;
+			}
+		}
+
+		break;
+
+#endif /* BCMWAPI_WPI */
+
+	default:
+		break;
+	}
+	return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+			paramval = IW_AUTH_WPA_VERSION_DISABLED;
+		else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA;
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		paramval = iw->pwsec;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		paramval = iw->gwsec;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		/* psk, 1x */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open, shared, leap */
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+
+#endif /* WIRELESS_EXT > 17 */
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	/* SIOCSIWCOMMIT */
+	(iw_handler) wl_iw_get_name,		/* SIOCGIWNAME */
+	(iw_handler) NULL,			/* SIOCSIWNWID */
+	(iw_handler) NULL,			/* SIOCGIWNWID */
+	(iw_handler) wl_iw_set_freq,		/* SIOCSIWFREQ */
+	(iw_handler) wl_iw_get_freq,		/* SIOCGIWFREQ */
+	(iw_handler) wl_iw_set_mode,		/* SIOCSIWMODE */
+	(iw_handler) wl_iw_get_mode,		/* SIOCGIWMODE */
+	(iw_handler) NULL,			/* SIOCSIWSENS */
+	(iw_handler) NULL,			/* SIOCGIWSENS */
+	(iw_handler) NULL,			/* SIOCSIWRANGE */
+	(iw_handler) wl_iw_get_range,		/* SIOCGIWRANGE */
+	(iw_handler) NULL,			/* SIOCSIWPRIV */
+	(iw_handler) NULL,			/* SIOCGIWPRIV */
+	(iw_handler) NULL,			/* SIOCSIWSTATS */
+	(iw_handler) NULL,			/* SIOCGIWSTATS */
+	(iw_handler) wl_iw_set_spy,		/* SIOCSIWSPY */
+	(iw_handler) wl_iw_get_spy,		/* SIOCGIWSPY */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wap,		/* SIOCSIWAP */
+	(iw_handler) wl_iw_get_wap,		/* SIOCGIWAP */
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,			/* -- hole -- */
+#endif
+	(iw_handler) wl_iw_iscan_get_aplist,	/* SIOCGIWAPLIST */
+#if WIRELESS_EXT > 13
+	(iw_handler) wl_iw_iscan_set_scan,	/* SIOCSIWSCAN */
+	(iw_handler) wl_iw_iscan_get_scan,	/* SIOCGIWSCAN */
+#else	/* WIRELESS_EXT > 13 */
+	(iw_handler) NULL,			/* SIOCSIWSCAN */
+	(iw_handler) NULL,			/* SIOCGIWSCAN */
+#endif	/* WIRELESS_EXT > 13 */
+	(iw_handler) wl_iw_set_essid,		/* SIOCSIWESSID */
+	(iw_handler) wl_iw_get_essid,		/* SIOCGIWESSID */
+	(iw_handler) wl_iw_set_nick,		/* SIOCSIWNICKN */
+	(iw_handler) wl_iw_get_nick,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_rate,		/* SIOCSIWRATE */
+	(iw_handler) wl_iw_get_rate,		/* SIOCGIWRATE */
+	(iw_handler) wl_iw_set_rts,		/* SIOCSIWRTS */
+	(iw_handler) wl_iw_get_rts,		/* SIOCGIWRTS */
+	(iw_handler) wl_iw_set_frag,		/* SIOCSIWFRAG */
+	(iw_handler) wl_iw_get_frag,		/* SIOCGIWFRAG */
+	(iw_handler) wl_iw_set_txpow,		/* SIOCSIWTXPOW */
+	(iw_handler) wl_iw_get_txpow,		/* SIOCGIWTXPOW */
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		/* SIOCSIWRETRY */
+	(iw_handler) wl_iw_get_retry,		/* SIOCGIWRETRY */
+#endif /* WIRELESS_EXT > 10 */
+	(iw_handler) wl_iw_set_encode,		/* SIOCSIWENCODE */
+	(iw_handler) wl_iw_get_encode,		/* SIOCGIWENCODE */
+	(iw_handler) wl_iw_set_power,		/* SIOCSIWPOWER */
+	(iw_handler) wl_iw_get_power,		/* SIOCGIWPOWER */
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wpaie,		/* SIOCSIWGENIE */
+	(iw_handler) wl_iw_get_wpaie,		/* SIOCGIWGENIE */
+	(iw_handler) wl_iw_set_wpaauth,		/* SIOCSIWAUTH */
+	(iw_handler) wl_iw_get_wpaauth,		/* SIOCGIWAUTH */
+	(iw_handler) wl_iw_set_encodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) wl_iw_get_encodeext,	/* SIOCGIWENCODEEXT */
+	(iw_handler) wl_iw_set_pmksa,		/* SIOCSIWPMKSA */
+#endif /* WIRELESS_EXT > 17 */
+};
+
+#if WIRELESS_EXT > 12
+enum {
+	WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+	WL_IW_SET_VLANMODE,
+	WL_IW_SET_PM,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	WL_IW_SET_LAST
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+	wl_iw_set_leddc,
+	wl_iw_set_vlanmode,
+	wl_iw_set_pm,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	NULL
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+	{
+		WL_IW_SET_LEDDC,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_leddc"
+	},
+	{
+		WL_IW_SET_VLANMODE,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_vlanmode"
+	},
+	{
+		WL_IW_SET_PM,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_pm"
+	},
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	{ 0, 0, 0, { 0 } }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.num_private = ARRAY_SIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.standard = (iw_handler *) wl_iw_handler,
+	.private = wl_iw_priv_handler,
+	.private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif /* WIRELESS_EXT >= 19 */
+	};
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	size_t token_size = 1;
+	int max_tokens = 0, ret = 0;
+
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = IW_ENCODING_TOKEN_MAX;
+		break;
+
+	case SIOCGIWRANGE:
+		max_tokens = sizeof(struct iw_range);
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+	if (g_iscan)
+		max_tokens = wrq->u.data.length;
+	else
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif /* WIRELESS_EXT > 13 */
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+	default:
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens)
+			return -E2BIG;
+
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		kfree(extra);
+	}
+
+	return ret;
+}
+
+/* Convert a connection status event into a connection status string.
+ * Returns TRUE if a matching connection status string was found.
+ */
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			/* input: event type to match */
+		uint32 inStatus;		/* input: event status code to match */
+		uint32 inReason;		/* input: event reason code to match */
+		const char* outName;	/* output: failure type */
+		const char* outCause;	/* output: failure cause */
+	} conn_fail_event_map_t;
+
+	/* Map of WLC_E events to connection failure strings */
+#	define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		/* inEvent           inStatus                inReason         */
+		/* outName outCause                                           */
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	/* Search the event map table for a matching event */
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	/* If found, generate a connection failure string and return TRUE */
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		snprintf(stringBuf, buflen, "%s %s %02d %02d",
+			name, cause, status, reason);
+		WL_TRACE(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if (WIRELESS_EXT > 14)
+/* Check if we have received an event that indicates connection failure
+ * If so, generate a connection failure report string.
+ * The caller supplies a buffer to hold the generated string.
+ */
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	} else
+	{
+		return FALSE;
+	}
+}
+#endif /* WIRELESS_EXT > 14 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+
+	switch (event_type) {
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		bzero(&extra, ETHER_ADDR_LEN);
+		break;
+
+	case WLC_E_LINK:
+	case WLC_E_NDIS_LINK:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+		}
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			WL_TRACE(("wl_iw_event status %d  \n", status));
+		}
+		break;
+#endif /* WIRELESS_EXT > 14 */
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+
+	case WLC_E_ASSOC_REQ_IE:
+		cmd = IWEVASSOCREQIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_ASSOC_RESP_IE:
+		cmd = IWEVASSOCRESPIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_PMKID_CACHE: {
+		struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+		pmkid_cand_list_t *pmkcandlist;
+		pmkid_cand_t	*pmkidcand;
+		int count;
+
+		if (data == NULL)
+			break;
+
+		cmd = IWEVPMKIDCAND;
+		pmkcandlist = data;
+		count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+		wrqu.data.length = sizeof(struct iw_pmkid_cand);
+		pmkidcand = pmkcandlist->pmkid_cand;
+		while (count) {
+			bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+			if (pmkidcand->preauth)
+				iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+			bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+			      ETHER_ADDR_LEN);
+			wireless_send_event(dev, cmd, &wrqu, extra);
+			pmkidcand++;
+			count--;
+		}
+		break;
+	}
+#endif /* WIRELESS_EXT > 17 */
+
+	case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+		cmd = SIOCGIWSCAN;
+#endif
+		WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+		if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+			(g_iscan->iscan_state != ISCAN_STATE_IDLE))
+			up(&g_iscan->sysioc_sem);
+		break;
+
+	default:
+		/* Cannot translate event */
+		break;
+	}
+
+	if (cmd) {
+		if (cmd == SIOCGIWSCAN)
+			wireless_send_event(dev, cmd, &wrqu, NULL);
+		else
+			wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+
+#if WIRELESS_EXT > 14
+	/* Look for WLC events that indicate a connection failure.
+	 * If found, generate an IWEVCUSTOM event.
+	 */
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+		wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+#endif /* WIRELESS_EXT > 14 */
+
+#endif /* WIRELESS_EXT > 13 */
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	wl_cnt_t cnt;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+		goto done;
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+	scb_val.val = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+		goto done;
+
+	rssi = dtoh32(scb_val.val);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	/* Wraps to 0 if RSSI is 0 */
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif /* WIRELESS_EXT > 18 */
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+
+	memset(&cnt, 0, sizeof(wl_cnt_t));
+	res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+		goto done;
+	}
+
+	cnt.version = dtoh16(cnt.version);
+	if (cnt.version != WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, cnt.version));
+		goto done;
+	}
+
+	wstats->discard.nwid = 0;
+	wstats->discard.code = dtoh32(cnt.rxundec);
+	wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+	wstats->discard.retries = dtoh32(cnt.txfail);
+	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+	wstats->miss.beacon = 0;
+
+	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif /* WIRELESS_EXT > 11 */
+
+done:
+	return res;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	iscan->timer_on = 0;
+	if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+		WL_TRACE(("timer trigger\n"));
+		up(&iscan->sysioc_sem);
+	}
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/* Room for "event_msgs" + '\0' + bitvec */
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+	wl_iscan_params_t *params;
+	int err = 0;
+
+	if (ssid && ssid->SSID_len) {
+		params_size += sizeof(wlc_ssid_t);
+	}
+	params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		return -ENOMEM;
+	}
+	memset(params, 0, params_size);
+	ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+	err = wl_iw_iscan_prep(&params->params, ssid);
+
+	if (!err) {
+		params->version = htod32(ISCAN_REQ_VERSION);
+		params->action = htod16(action);
+		params->scan_duration = htod16(0);
+
+		/* params_size += OFFSETOF(wl_iscan_params_t, params); */
+		(void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+			iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+	}
+
+	kfree(params);
+	return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+
+	/* buffers are allocated on demand */
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf)
+			return WL_SCAN_RESULTS_ABORTED;
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	(void) dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	results->count = dtoh32(results->count);
+	WL_TRACE(("results->count = %d\n", results->count));
+
+	WL_TRACE(("results->buflen = %d\n", results->buflen));
+	status = dtoh32(list_buf->status);
+	return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	/* wext expects to get no data for SIOCGIWSCAN Event  */
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+	iscan_info_t *iscan = (iscan_info_t *)data;
+
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (down_interruptible(&iscan->sysioc_sem) == 0) {
+		if (iscan->timer_on) {
+			del_timer(&iscan->timer);
+			iscan->timer_on = 0;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+				/* make sure our buffer size is enough before going next round */
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_TRACE(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_TRACE(("iscanresults pending\n"));
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_TRACE(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			default:
+				WL_TRACE(("iscanresults returned unknown status %d\n", status));
+				break;
+		 }
+	}
+	complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+	iscan_info_t *iscan = NULL;
+
+	if (!dev)
+		return 0;
+
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	memset(iscan, 0, sizeof(iscan_info_t));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = NULL;
+#endif
+	iscan->sysioc_pid = -1;
+	/* we only care about main interface so save a global here */
+	g_iscan = iscan;
+	iscan->dev = dev;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+
+
+	/* Set up the timer */
+	iscan->timer_ms    = 2000;
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+
+	sema_init(&iscan->sysioc_sem, 0);
+	init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+	iscan->sysioc_pid = iscan->kthread->pid;
+#else
+	iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
+	if (iscan->sysioc_pid < 0)
+		return -ENOMEM;
+	return 0;
+}
+
+void wl_iw_detach(void)
+{
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = g_iscan;
+	if (!iscan)
+		return;
+	if (iscan->sysioc_pid >= 0) {
+		KILL_PROC(iscan->sysioc_pid, SIGTERM);
+		wait_for_completion(&iscan->sysioc_exited);
+	}
+
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan);
+	g_iscan = NULL;
+}
+
+#endif /* USE_IW */
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 0000000000000000000000000000000000000000..dfadb0c3951d6ffe6ccd46127079a612e2091565
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,143 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_iw.h 488316 2014-06-30 15:22:21Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+/* Lin - Is the extra space needed? */
+#define PNOSETUP_SET_CMD			"PNOSETUP " /* TLV command has extra end space */
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD			"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02X:%02X:%02X:%02X:%02X:%02X"
+
+/* Structure to keep global parameters */
+typedef struct wl_iw_extra_params {
+	int 	target_channel; /* target channel */
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];	/* ISO 3166-1 country abbreviation */
+	char custom_locale[WLC_CNTRY_BUF_SZ];	/* Custom firmware locale */
+	int32 custom_locale_rev;		/* Custom local revisin default -1 */
+};
+/* ============================================== */
+/* Defines from wlc_pub.h */
+#define	WL_IW_RSSI_MINVAL		-200	/* Low value, e.g. for forcing roam */
+#define	WL_IW_RSSI_NO_SIGNAL	-91	/* NDIS RSSI link quality cutoffs */
+#define	WL_IW_RSSI_VERY_LOW	-80	/* Very low quality cutoffs */
+#define	WL_IW_RSSI_LOW		-70	/* Low quality cutoffs */
+#define	WL_IW_RSSI_GOOD		-68	/* Good quality cutoffs */
+#define	WL_IW_RSSI_VERY_GOOD	-58	/* Very good quality cutoffs */
+#define	WL_IW_RSSI_EXCELLENT	-57	/* Excellent quality cutoffs */
+#define	WL_IW_RSSI_INVALID	 0	/* invalid RSSI value */
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN	((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI			(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP				(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START			(SIOCIWFIRSTPRIV+13)
+
+#define 		G_SCAN_RESULTS 8*1024
+#define 		WE_ADD_EVENT_FIX	0x80
+#define          G_WLAN_SET_ON	0
+#define          G_WLAN_SET_OFF	1
+
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	uint32 pwsec;			/* pairwise wsec setting */
+	uint32 gwsec;			/* group wsec setting  */
+	bool privacy_invoked; 		/* IW_AUTH_PRIVACY_INVOKED setting */
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+	struct timer_list *timer;
+	struct net_device *dev;
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+
+void wl_iw_detach(void);
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE          'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE   'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE     'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE      'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE    'P'
+#define CSCAN_TLV_TYPE_HOME_IE         'H'
+#define CSCAN_TLV_TYPE_STYPE_IE        'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif /* _wl_iw_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
new file mode 100644
index 0000000000000000000000000000000000000000..ccf2c8ddd878895f6188090c1dc0495219428773
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -0,0 +1,385 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_linux_mon.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+	MONITOR_STATE_DEINIT = 0x0,
+	MONITOR_STATE_INIT = 0x1,
+	MONITOR_STATE_INTERFACE_ADDED = 0x2,
+	MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printf("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+	int radiotap_enabled;
+	struct net_device* real_ndev;	/* The real interface that the monitor is on */
+	struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+	void *dhd_pub;
+	monitor_states_t monitor_state;
+	monitor_interface mon_if[DHD_MAX_IFS];
+	struct mutex lock;		/* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+	.ndo_open		= dhd_mon_if_open,
+	.ndo_stop		= dhd_mon_if_stop,
+	.ndo_start_xmit		= dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+	.ndo_set_mac_address 	= dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+	struct net_device *ndev_found = NULL;
+
+	int i;
+	int len = 0;
+	int last_name_len = 0;
+	struct net_device *ndev;
+
+	/* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+	 * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+	 * iface would be mon-p2p0-0.
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+		/* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+		 * it matches, then this netdev is the corresponding real_netdev.
+		 */
+		if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+			len = strlen("p2p");
+		} else {
+		/* if p2p- is not present, then the IFNAMSIZ have reached and name
+		 * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+		 */
+			len = 0;
+		}
+		if (ndev && strstr(name, (ndev->name + len))) {
+			if (strlen(ndev->name) > last_name_len) {
+				ndev_found = ndev;
+				last_name_len = strlen(ndev->name);
+			}
+		}
+	}
+
+	return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev)
+			return &g_monitor.mon_if[i];
+	}
+
+	return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	int ret = 0;
+	int rtap_len;
+	int qos_len = 0;
+	int dot11_hdr_len = 24;
+	int snap_len = 6;
+	unsigned char *pdata;
+	unsigned short frame_ctl;
+	unsigned char src_mac_addr[6];
+	unsigned char dst_mac_addr[6];
+	struct ieee80211_hdr *dot11_hdr;
+	struct ieee80211_radiotap_header *rtap_hdr;
+	monitor_interface* mon_if;
+
+	MON_PRINT("enter\n");
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+		goto fail;
+	}
+
+	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+		goto fail;
+
+	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+	if (unlikely(rtap_hdr->it_version))
+		goto fail;
+
+	rtap_len = ieee80211_get_radiotap_len(skb->data);
+	if (unlikely(skb->len < rtap_len))
+		goto fail;
+
+	MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+	/* Skip the ratio tap header */
+	skb_pull(skb, rtap_len);
+
+	dot11_hdr = (struct ieee80211_hdr *)skb->data;
+	frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+	/* Check if the QoS bit is set */
+	if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+		/* Check if this ia a Wireless Distribution System (WDS) frame
+		 * which has 4 MAC addresses
+		 */
+		if (dot11_hdr->frame_control & 0x0080)
+			qos_len = 2;
+		if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+			dot11_hdr_len += 6;
+
+		memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+		memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+		/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+		 * for two MAC addresses
+		 */
+		skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+		pdata = (unsigned char*)skb->data;
+		memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+		memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+		PKTSETPRIO(skb, 0);
+
+		MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+		/* Use the real net device to transmit the packet */
+		ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+		return ret;
+	}
+fail:
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+	int ret = 0;
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+	return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+	int i;
+	int idx = -1;
+	int ret = 0;
+	struct net_device* ndev = NULL;
+	dhd_linux_monitor_t **dhd_mon;
+
+	mutex_lock(&g_monitor.lock);
+
+	MON_TRACE("enter, if name: %s\n", name);
+	if (!name || !new_ndev) {
+		MON_PRINT("invalid parameters\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Find a vacancy
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++)
+		if (g_monitor.mon_if[i].mon_ndev == NULL) {
+			idx = i;
+			break;
+		}
+	if (idx == -1) {
+		MON_PRINT("exceeds maximum interfaces\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+	if (!ndev) {
+		MON_PRINT("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+	strncpy(ndev->name, name, IFNAMSIZ);
+	ndev->name[IFNAMSIZ - 1] = 0;
+	ndev->netdev_ops = &dhd_mon_if_ops;
+
+	ret = register_netdevice(ndev);
+	if (ret) {
+		MON_PRINT(" register_netdevice failed (%d)\n", ret);
+		goto out;
+	}
+
+	*new_ndev = ndev;
+	g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+	g_monitor.mon_if[idx].mon_ndev = ndev;
+	g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+	dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+	*dhd_mon = &g_monitor;
+	g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+	MON_PRINT("net device returned: 0x%p\n", ndev);
+	MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+	if (ret && ndev)
+		free_netdev(ndev);
+
+	mutex_unlock(&g_monitor.lock);
+	return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+	int i;
+	if (!ndev)
+		return -EINVAL;
+	mutex_lock(&g_monitor.lock);
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev ||
+			g_monitor.mon_if[i].real_ndev == ndev) {
+
+			g_monitor.mon_if[i].real_ndev = NULL;
+			unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
+			free_netdev(g_monitor.mon_if[i].mon_ndev);
+			g_monitor.mon_if[i].mon_ndev = NULL;
+			g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+			break;
+		}
+	}
+
+	if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+		MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
+	mutex_unlock(&g_monitor.lock);
+
+	return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+	if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+		g_monitor.dhd_pub = dhd_pub;
+		mutex_init(&g_monitor.lock);
+		g_monitor.monitor_state = MONITOR_STATE_INIT;
+	}
+	return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+	int i;
+	struct net_device *ndev;
+	mutex_lock(&g_monitor.lock);
+	if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			ndev = g_monitor.mon_if[i].mon_ndev;
+			if (ndev) {
+				unregister_netdevice(ndev);
+				free_netdev(ndev);
+				g_monitor.mon_if[i].real_ndev = NULL;
+				g_monitor.mon_if[i].mon_ndev = NULL;
+			}
+		}
+		g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+	}
+	mutex_unlock(&g_monitor.lock);
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..29d1d9566054513cc3df9116bfc83700716d6508
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,367 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wldev_common.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define	WLDEV_ERROR(args)						\
+	do {										\
+		printf(KERN_ERR "WLDEV-ERROR) %s : ", __func__);	\
+		printf args;							\
+	} while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	s32 ret = 0;
+	struct wl_ioctl ioc;
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+	return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+	s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, u32 buflen)
+{
+	s32 iolen = 0;
+
+	iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+	return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else
+		ret = BCME_BUFTOOSHORT;
+
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ *  taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ *  wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+
+	if (bssidx == 0) {
+		return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+			(s8 *) iovar_buf, buflen);
+	}
+
+	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+	namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+	if (buflen < 0 || iolen > (u32)buflen)
+	{
+		WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+		return BCME_BUFTOOSHORT;
+	}
+
+	p = (s8 *)iovar_buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, iovar_name, namelen);
+	p += namelen;
+
+	/* bss config index as first param */
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(u32));
+	p += sizeof(u32);
+
+	/* parameter buffer follows */
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+
+	wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else {
+		ret = BCME_BUFTOOSHORT;
+	}
+
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+int wldev_get_link_speed(
+	struct net_device *dev, int *plink_speed)
+{
+	int error;
+
+	if (!plink_speed)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+	if (unlikely(error))
+		return error;
+
+	/* Convert internal 500Kbps to Kbps */
+	*plink_speed *= 500;
+	return error;
+}
+
+int wldev_get_rssi(
+	struct net_device *dev, int *prssi)
+{
+	scb_val_t scb_val;
+	int error;
+
+	if (!prssi)
+		return -ENOMEM;
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+	if (unlikely(error))
+		return error;
+
+	*prssi = dtoh32(scb_val.val);
+	return error;
+}
+
+int wldev_get_ssid(
+	struct net_device *dev, wlc_ssid_t *pssid)
+{
+	int error;
+
+	if (!pssid)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+	if (unlikely(error))
+		return error;
+	pssid->SSID_len = dtoh32(pssid->SSID_len);
+	return error;
+}
+
+int wldev_get_band(
+	struct net_device *dev, uint *pband)
+{
+	int error;
+
+	error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+	return error;
+}
+
+int wldev_set_band(
+	struct net_device *dev, uint band)
+{
+	int error = -1;
+
+	if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+		error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+		if (!error)
+			dhd_bus_band_set(dev, band);
+	}
+	return error;
+}
+
+int wldev_set_country(
+	struct net_device *dev, char *country_code, bool notify, bool user_enforced)
+{
+	int error = -1;
+	wl_country_t cspec = {{0}, 0, {0}};
+	scb_val_t scbval;
+	char smbuf[WLC_IOCTL_SMLEN];
+
+	if (!country_code)
+		return error;
+
+	bzero(&scbval, sizeof(scb_val_t));
+	error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL);
+	if (error < 0) {
+		WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	if ((error < 0) ||
+	    (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) {
+
+		if (user_enforced) {
+			bzero(&scbval, sizeof(scb_val_t));
+			error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+			if (error < 0) {
+				WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+		}
+
+		cspec.rev = -1;
+		memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+		memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+		dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+		error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+			smbuf, sizeof(smbuf), NULL);
+		if (error < 0) {
+			WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			return error;
+		}
+		dhd_bus_country_set(dev, &cspec, notify);
+		WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
+			__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 0000000000000000000000000000000000000000..6de214845f928f55252a4ad3a6e785be965190ef
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,100 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wldev_common.h 467328 2014-04-03 01:23:40Z $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ *  netdev_ops->ndo_do_ioctl in new kernels)
+ *  @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ *  indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+	unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+extern void dhd_bus_band_set(struct net_device *dev, uint band);
+extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
+	bool user_enforced);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+	int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */