diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 352e872c631fa098e2b04eb1c4b3e5ed6f2d89f6..d61dc20dbb15e9c77edaa03b5697aac7eafd8367 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1677,7 +1677,7 @@ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>; vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>; vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>; - qcom,vdd-0.8-cx-mx-config = <525000 950000>; + qcom,vdd-0.8-cx-mx-config = <848000 848000>; qcom,vdd-1.8-xo-config = <1750000 1900000>; qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; diff --git a/arch/arm/boot/dts/qcom/sdm636.dtsi b/arch/arm/boot/dts/qcom/sdm636.dtsi index 8250a2a4645352eea79a5538c77e9fd99a984481..e053e191e98245f4cb7152bf5eed99dd4fb6449e 100644 --- a/arch/arm/boot/dts/qcom/sdm636.dtsi +++ b/arch/arm/boot/dts/qcom/sdm636.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -71,3 +71,9 @@ }; }; }; + +/* GPU overrides */ +&msm_gpu { + /* Update GPU chip ID*/ + qcom,chipid = <0x05000900>; +}; diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi index 5a571c2db6346ba6cb94f8910a9e8016bf0e3fad..6b820942b6bfea09b199da6a3e3a7323e272ddc3 100644 --- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -465,8 +465,8 @@ qcom,gpu-pwrlevel@2 { reg = <2>; qcom,gpu-freq = <465000000>; - qcom,bus-freq = <9>; - qcom,bus-min = <8>; + qcom,bus-freq = <10>; + qcom,bus-min = <9>; qcom,bus-max = <11>; }; @@ -474,18 +474,18 @@ qcom,gpu-pwrlevel@3 { reg = <3>; qcom,gpu-freq = <370000000>; - qcom,bus-freq = <8>; - qcom,bus-min = <6>; - qcom,bus-max = <9>; + qcom,bus-freq = <9>; + qcom,bus-min = <9>; + qcom,bus-max = <11>; }; /* Low SVS */ qcom,gpu-pwrlevel@4 { reg = <4>; qcom,gpu-freq = <266000000>; - qcom,bus-freq = <3>; + qcom,bus-freq = <4>; qcom,bus-min = <3>; - qcom,bus-max = <6>; + qcom,bus-max = <8>; }; /* Min SVS */ diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index c626698ffd510431db8ff5802ae385d5960c4c2f..b39e7cff71ca36630f23c3627db3c1c19d784906 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1975,7 +1975,7 @@ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>; vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>; vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>; - qcom,vdd-0.8-cx-mx-config = <525000 950000>; + qcom,vdd-0.8-cx-mx-config = <848000 848000>; qcom,vdd-1.8-xo-config = <1750000 1900000>; qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 796e271bed69c95b3cd2feae7f78b7ed9ad37d6f..9a98b012471324fd966b236eb105e913bc78a5d4 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -1162,18 +1162,31 @@ void extract_dci_events(unsigned char *buf, int len, int data_source, struct list_head *start, *temp; struct diag_dci_client_tbl *entry = NULL; - length = *(uint16_t *)(buf + 1); /* total length of event series */ - if (length == 0) { - pr_err("diag: Incoming dci event length is invalid\n"); + if (!buf) { + pr_err("diag: In %s buffer is NULL\n", __func__); return; } /* - * Move directly to the start of the event series. 1 byte for - * event code and 2 bytes for the length field. + * 1 byte for event code and 2 bytes for the length field. * The length field indicates the total length removing the cmd_code * and the lenght field. The event parsing in that case should happen * till the end. */ + if (len < 3) { + pr_err("diag: In %s invalid len: %d\n", __func__, len); + return; + } + length = *(uint16_t *)(buf + 1); /* total length of event series */ + if ((length == 0) || (len != (length + 3))) { + pr_err("diag: Incoming dci event length: %d is invalid\n", + length); + return; + } + /* + * Move directly to the start of the event series. + * The event parsing should happen from start of event + * series till the end. + */ temp_len = 3; while (temp_len < length) { event_id_packet = *(uint16_t *)(buf + temp_len); @@ -1190,30 +1203,60 @@ void extract_dci_events(unsigned char *buf, int len, int data_source, * necessary. */ timestamp_len = 8; - memcpy(timestamp, buf + temp_len + 2, timestamp_len); + if ((temp_len + timestamp_len + 2) <= len) + memcpy(timestamp, buf + temp_len + 2, + timestamp_len); + else { + pr_err("diag: Invalid length in %s, len: %d, temp_len: %d", + __func__, len, temp_len); + return; + } } /* 13th and 14th bit represent the payload length */ if (((event_id_packet & 0x6000) >> 13) == 3) { payload_len_field = 1; - payload_len = *(uint8_t *) + if ((temp_len + timestamp_len + 3) <= len) { + payload_len = *(uint8_t *) (buf + temp_len + 2 + timestamp_len); - if (payload_len < (MAX_EVENT_SIZE - 13)) { - /* copy the payload length and the payload */ + } else { + pr_err("diag: Invalid length in %s, len: %d, temp_len: %d", + __func__, len, temp_len); + return; + } + if ((payload_len < (MAX_EVENT_SIZE - 13)) && + ((temp_len + timestamp_len + payload_len + 3) <= len)) { + /* + * Copy the payload length and the payload + * after skipping temp_len bytes for already + * parsed packet, timestamp_len for timestamp + * buffer, 2 bytes for event_id_packet. + */ memcpy(event_data + 12, buf + temp_len + 2 + timestamp_len, 1); memcpy(event_data + 13, buf + temp_len + 2 + timestamp_len + 1, payload_len); } else { - pr_err("diag: event > %d, payload_len = %d\n", - (MAX_EVENT_SIZE - 13), payload_len); + pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n", + (MAX_EVENT_SIZE - 13), payload_len, temp_len); return; } } else { payload_len_field = 0; payload_len = (event_id_packet & 0x6000) >> 13; - /* copy the payload */ - memcpy(event_data + 12, buf + temp_len + 2 + + /* + * Copy the payload after skipping temp_len bytes + * for already parsed packet, timestamp_len for + * timestamp buffer, 2 bytes for event_id_packet. + */ + if ((payload_len < (MAX_EVENT_SIZE - 12)) && + ((temp_len + timestamp_len + payload_len + 2) <= len)) + memcpy(event_data + 12, buf + temp_len + 2 + timestamp_len, payload_len); + else { + pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n", + (MAX_EVENT_SIZE - 12), payload_len, temp_len); + return; + } } /* Before copying the data to userspace, check if we are still @@ -1337,19 +1380,19 @@ void extract_dci_log(unsigned char *buf, int len, int data_source, int token, pr_err("diag: In %s buffer is NULL\n", __func__); return; } - - /* The first six bytes for the incoming log packet contains - * Command code (2), the length of the packet (2) and the length - * of the log (2) + /* + * The first eight bytes for the incoming log packet contains + * Command code (2), the length of the packet (2), the length + * of the log (2) and log code (2) */ - log_code = *(uint16_t *)(buf + 6); - read_bytes += sizeof(uint16_t) + 6; - if (read_bytes > len) { - pr_err("diag: Invalid length in %s, len: %d, read: %d", - __func__, len, read_bytes); + if (len < 8) { + pr_err("diag: In %s invalid len: %d\n", __func__, len); return; } + log_code = *(uint16_t *)(buf + 6); + read_bytes += sizeof(uint16_t) + 6; + /* parse through log mask table of each client and check mask */ mutex_lock(&driver->dci_mutex); list_for_each_safe(start, temp, &driver->dci_client_list) { @@ -1376,6 +1419,10 @@ void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source, pr_err("diag: In %s buffer is NULL\n", __func__); return; } + if (len < (EXT_HDR_LEN + sizeof(uint8_t))) { + pr_err("diag: In %s invalid len: %d\n", __func__, len); + return; + } version = *(uint8_t *)buf + 1; if (version < EXT_HDR_VERSION) { @@ -1387,10 +1434,6 @@ void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source, pkt = buf + EXT_HDR_LEN; pkt_cmd_code = *(uint8_t *)pkt; len -= EXT_HDR_LEN; - if (len < 0) { - pr_err("diag: %s, Invalid length len: %d\n", __func__, len); - return; - } switch (pkt_cmd_code) { case LOG_CMD_CODE: diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index e1e86f6e74dca6d382289ffbcaf717013e75c6c2..c9af1e7f848a555834ed3a405c7eb93c6a69178a 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -537,8 +537,7 @@ static void diag_send_feature_mask_update(uint8_t peripheral) } static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; @@ -546,23 +545,30 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, struct diag_msg_ssid_query_t rsp; struct diag_ssid_range_t ssid_range; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &msg_mask : info->msg_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || !mask_info) { pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } - if (!diag_apps_responds()) + if (!diag_apps_responds()) { + mutex_unlock(&driver->md_session_lock); return 0; + } mutex_lock(&driver->msg_mask_lock); rsp.cmd_code = DIAG_CMD_MSG_CONFIG; rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE; @@ -584,12 +590,12 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, write_len += sizeof(ssid_range); } mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&driver->md_session_lock); return write_len; } static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i = 0; int write_len = 0; @@ -642,8 +648,7 @@ static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len, } static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; @@ -652,6 +657,10 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, struct diag_build_mask_req_t *req = NULL; struct diag_msg_build_mask_t rsp; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &msg_mask : info->msg_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -659,15 +668,19 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } - if (!diag_apps_responds()) + if (!diag_apps_responds()) { + mutex_unlock(&driver->md_session_lock); return 0; + } mutex_lock(&driver->msg_mask_lock); req = (struct diag_build_mask_req_t *)src_buf; @@ -682,6 +695,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", __func__, mask->ptr); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { @@ -701,12 +715,12 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, memcpy(dest_buf, &rsp, sizeof(rsp)); write_len += sizeof(rsp); mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&driver->md_session_lock); return write_len; } static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; @@ -720,6 +734,10 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask_next = NULL; uint32_t *temp = NULL; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &msg_mask : info->msg_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -727,11 +745,13 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } @@ -744,6 +764,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, __func__, mask->ptr); mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { @@ -786,6 +807,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&mask->lock); mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); return -ENOMEM; } mask->ptr = temp; @@ -806,6 +828,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, } mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(MSG_MASKS_TYPE); @@ -839,8 +862,7 @@ end: } static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; @@ -849,6 +871,10 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, struct diag_msg_config_rsp_t *req = NULL; struct diag_msg_mask_t *mask = NULL; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &msg_mask : info->msg_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -856,11 +882,13 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } @@ -875,6 +903,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, __func__, mask->ptr); mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : @@ -887,7 +916,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, } mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); - + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(MSG_MASKS_TYPE); @@ -915,8 +944,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, } static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int write_len = 0; uint32_t mask_size; @@ -951,8 +979,7 @@ static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len, } static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; @@ -961,18 +988,23 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, struct diag_event_mask_config_t rsp; struct diag_event_mask_config_t *req; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &event_mask : info->event_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || !mask_info) { pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } req = (struct diag_event_mask_config_t *)src_buf; @@ -980,6 +1012,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, if (mask_len <= 0 || mask_len > event_mask.mask_len) { pr_err("diag: In %s, invalid event mask len: %d\n", __func__, mask_len); + mutex_unlock(&driver->md_session_lock); return -EIO; } @@ -987,6 +1020,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, memcpy(mask_info->ptr, src_buf + header_len, mask_len); mask_info->status = DIAG_CTRL_MASK_VALID; mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(EVENT_MASKS_TYPE); @@ -1015,26 +1049,30 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, } static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; uint8_t toggle = 0; struct diag_event_report_t header; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &event_mask : info->event_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || !mask_info) { pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } @@ -1048,6 +1086,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, memset(mask_info->ptr, 0, mask_info->mask_len); } mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(EVENT_MASKS_TYPE); @@ -1071,8 +1110,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, } static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int status = LOG_STATUS_INVALID; @@ -1085,6 +1123,10 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, struct diag_log_config_req_t *req; struct diag_log_config_rsp_t rsp; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &log_mask : info->log_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -1092,16 +1134,20 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } - if (!diag_apps_responds()) + if (!diag_apps_responds()) { + mutex_unlock(&driver->md_session_lock); return 0; + } req = (struct diag_log_config_req_t *)src_buf; read_len += req_header_len; @@ -1121,6 +1167,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, if (!log_item->ptr) { pr_err("diag: Invalid input in %s, mask: %pK\n", __func__, log_item); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) { @@ -1162,28 +1209,27 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, rsp.status = status; memcpy(dest_buf, &rsp, rsp_header_len); + mutex_unlock(&driver->md_session_lock); return write_len; } static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { int i; int write_len = 0; struct diag_log_config_rsp_t rsp; - struct diag_mask_info *mask_info = NULL; struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr; + if (!mask) + return -EINVAL; + if (!diag_apps_responds()) return 0; - mask_info = (!info) ? &log_mask : info->log_mask; - if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || - !mask_info) { - pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", - __func__, src_buf, src_len, dest_buf, dest_len, - mask_info); + if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) { + pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n", + __func__, src_buf, src_len, dest_buf, dest_len); return -EINVAL; } @@ -1206,7 +1252,7 @@ static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len, static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + int pid) { int i; int write_len = 0; @@ -1221,6 +1267,10 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, struct diag_log_mask_t *mask = NULL; unsigned char *temp_buf = NULL; struct diag_mask_info *mask_info = NULL; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &log_mask : info->log_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -1228,11 +1278,13 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } @@ -1242,6 +1294,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, if (!mask->ptr) { pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", __func__, mask->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (req->equip_id >= MAX_EQUIP_ID) { @@ -1304,6 +1357,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, break; } mutex_unlock(&mask_info->lock); + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(LOG_MASKS_TYPE); @@ -1344,14 +1398,16 @@ end: } static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) + unsigned char *dest_buf, int dest_len, int pid) { struct diag_mask_info *mask_info = NULL; struct diag_log_mask_t *mask = NULL; struct diag_log_config_rsp_t header; - int write_len = 0; - int i; + int write_len = 0, i; + struct diag_md_session_t *info = NULL; + + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); mask_info = (!info) ? &log_mask : info->log_mask; if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 || @@ -1359,17 +1415,20 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n", __func__, src_buf, src_len, dest_buf, dest_len, mask_info); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (!mask_info->ptr) { pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", __func__, mask_info->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } mask = (struct diag_log_mask_t *)mask_info->ptr; if (!mask->ptr) { pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", __func__, mask->ptr); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { @@ -1378,6 +1437,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, mutex_unlock(&mask->lock); } mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED; + mutex_unlock(&driver->md_session_lock); if (diag_check_update(APPS_DATA)) diag_update_userspace_clients(LOG_MASKS_TYPE); @@ -2109,14 +2169,12 @@ void diag_send_updates_peripheral(uint8_t peripheral) &driver->buffering_mode[peripheral]); } -int diag_process_apps_masks(unsigned char *buf, int len, - struct diag_md_session_t *info) +int diag_process_apps_masks(unsigned char *buf, int len, int pid) { int size = 0; int sub_cmd = 0; int (*hdlr)(unsigned char *src_buf, int src_len, - unsigned char *dest_buf, int dest_len, - struct diag_md_session_t *info) = NULL; + unsigned char *dest_buf, int dest_len, int pid) = NULL; if (!buf || len <= 0) return -EINVAL; @@ -2166,7 +2224,7 @@ int diag_process_apps_masks(unsigned char *buf, int len, if (hdlr) size = hdlr(buf, len, driver->apps_rsp_buf, - DIAG_MAX_RSP_SIZE, info); + DIAG_MAX_RSP_SIZE, pid); return (size > 0) ? size : 0; } diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h index 1a52f946bb09ceabb08d6fcffac6e8fab3bb5d95..6edeee954d74baedccd5b7dc877bd61522dbdb7e 100644 --- a/drivers/char/diag/diag_masks.h +++ b/drivers/char/diag/diag_masks.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -167,8 +167,7 @@ int diag_event_mask_copy(struct diag_mask_info *dest, void diag_log_mask_free(struct diag_mask_info *mask_info); void diag_msg_mask_free(struct diag_mask_info *mask_info); void diag_event_mask_free(struct diag_mask_info *mask_info); -int diag_process_apps_masks(unsigned char *buf, int len, - struct diag_md_session_t *info); +int diag_process_apps_masks(unsigned char *buf, int len, int pid); void diag_send_updates_peripheral(uint8_t peripheral); extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask, diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index 986aeed169f5e9d79baeea4f6ce61f65a042da4c..83a2c70fba0295947c9364a05f0bb00c49e2ffb5 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -131,7 +131,7 @@ void diag_md_close_all() int diag_md_write(int id, unsigned char *buf, int len, int ctx) { - int i; + int i, pid = 0; uint8_t found = 0; unsigned long flags; struct diag_md_info *ch = NULL; @@ -149,10 +149,14 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) if (peripheral < 0) return -EINVAL; - session_info = - diag_md_session_get_peripheral(peripheral); - if (!session_info) + mutex_lock(&driver->md_session_lock); + session_info = diag_md_session_get_peripheral(peripheral); + if (!session_info) { + mutex_unlock(&driver->md_session_lock); return -EIO; + } + pid = session_info->pid; + mutex_unlock(&driver->md_session_lock); ch = &diag_md[id]; if (!ch) @@ -195,13 +199,13 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) found = 0; for (i = 0; i < driver->num_clients && !found; i++) { - if ((driver->client_map[i].pid != - session_info->pid) || + if ((driver->client_map[i].pid != pid) || (driver->client_map[i].pid == 0)) continue; found = 1; driver->data_ready[i] |= USER_SPACE_DATA_TYPE; + atomic_inc(&driver->data_ready_notif[i]); pr_debug("diag: wake up logging process\n"); wake_up_interruptible(&driver->wait_q); } diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index 0a0fc4400de5044a75f4c6c82e21793ac4d12303..87d021f6a95642fb0d43ceb02f7c1e2942bb505a 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -221,7 +221,7 @@ static void usb_disconnect(struct diag_usb_info *ch) if (!atomic_read(&ch->connected) && driver->usb_connected && diag_mask_param()) - diag_clear_masks(NULL); + diag_clear_masks(0); if (ch && ch->ops && ch->ops->close) ch->ops->close(ch->ctxt, DIAG_USB_MODE); diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index d81a39e2c6376431626d8bb73095b46232bb8d01..66d85eb2a0263b1e91ed188735085d09526eefb9 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -26,6 +26,8 @@ #include <asm/atomic.h> #include "diagfwd_bridge.h" +#define THRESHOLD_CLIENT_LIMIT 50 + /* Size of the USB buffers used for read and write*/ #define USB_MAX_OUT_BUF 4096 #define APPS_BUF_SIZE 4096 @@ -508,6 +510,7 @@ struct diagchar_dev { wait_queue_head_t wait_q; struct diag_client_map *client_map; int *data_ready; + atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT]; int num_clients; int polling_reg_flag; int use_device_tree; @@ -673,7 +676,7 @@ void diag_cmd_remove_reg_by_pid(int pid); void diag_cmd_remove_reg_by_proc(int proc); int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry); int diag_mask_param(void); -void diag_clear_masks(struct diag_md_session_t *info); +void diag_clear_masks(int pid); uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask); void diag_record_stats(int type, int flag); diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 4111e599877a152d654421329d32442e1557efe9..61f62cd49029f28625dd2fd9a37bdfcf1f03eee2 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -139,7 +139,6 @@ module_param(poolsize_qsc_usb, uint, 0); /* This is the max number of user-space clients supported at initialization*/ static unsigned int max_clients = 15; -static unsigned int threshold_client_limit = 50; module_param(max_clients, uint, 0); /* Timer variables */ @@ -171,7 +170,7 @@ uint16_t diag_debug_mask; void *diag_ipc_log; #endif -static void diag_md_session_close(struct diag_md_session_t *session_info); +static void diag_md_session_close(int pid); /* * Returns the next delayed rsp id. If wrapping is enabled, @@ -210,6 +209,16 @@ do { \ ret += length; \ } while (0) +#define COPY_USER_SPACE_OR_ERR(buf, data, length) \ +do { \ + if ((count < ret+length) || (copy_to_user(buf, \ + (void *)&data, length))) { \ + ret = -EFAULT; \ + break; \ + } \ + ret += length; \ +} while (0) + static void drain_timer_func(unsigned long data) { queue_work(driver->diag_wq , &(driver->diag_drain_work)); @@ -248,12 +257,13 @@ void diag_drain_work_fn(struct work_struct *work) timer_in_progress = 0; mutex_lock(&apps_data_mutex); + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; - + mutex_unlock(&driver->md_session_lock); if (!hdlc_disabled) diag_drain_apps_data(&hdlc_data); else @@ -328,7 +338,7 @@ static int diagchar_open(struct inode *inode, struct file *file) if (i < driver->num_clients) { diag_add_client(i, file); } else { - if (i < threshold_client_limit) { + if (i < THRESHOLD_CLIENT_LIMIT) { driver->num_clients++; temp = krealloc(driver->client_map , (driver->num_clients) * sizeof(struct @@ -358,11 +368,17 @@ static int diagchar_open(struct inode *inode, struct file *file) } } driver->data_ready[i] = 0x0; + atomic_set(&driver->data_ready_notif[i], 0); driver->data_ready[i] |= MSG_MASKS_TYPE; + atomic_inc(&driver->data_ready_notif[i]); driver->data_ready[i] |= EVENT_MASKS_TYPE; + atomic_inc(&driver->data_ready_notif[i]); driver->data_ready[i] |= LOG_MASKS_TYPE; + atomic_inc(&driver->data_ready_notif[i]); driver->data_ready[i] |= DCI_LOG_MASKS_TYPE; + atomic_inc(&driver->data_ready_notif[i]); driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE; + atomic_inc(&driver->data_ready_notif[i]); if (driver->ref_count == 0) diag_mempool_init(); @@ -373,8 +389,8 @@ static int diagchar_open(struct inode *inode, struct file *file) return -ENOMEM; fail: - mutex_unlock(&driver->diagchar_mutex); driver->num_clients--; + mutex_unlock(&driver->diagchar_mutex); pr_err_ratelimited("diag: Insufficient memory for new client"); return -ENOMEM; } @@ -433,7 +449,7 @@ int diag_mask_param(void) { return diag_mask_clear_param; } -void diag_clear_masks(struct diag_md_session_t *info) +void diag_clear_masks(int pid) { int ret; char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0}; @@ -442,14 +458,14 @@ void diag_clear_masks(struct diag_md_session_t *info) DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: %s: masks clear request upon %s\n", __func__, - ((info) ? "ODL exit" : "USB Disconnection")); + ((pid) ? "ODL exit" : "USB Disconnection")); ret = diag_process_apps_masks(cmd_disable_log_mask, - sizeof(cmd_disable_log_mask), info); + sizeof(cmd_disable_log_mask), pid); ret = diag_process_apps_masks(cmd_disable_msg_mask, - sizeof(cmd_disable_msg_mask), info); + sizeof(cmd_disable_msg_mask), pid); ret = diag_process_apps_masks(cmd_disable_event_mask, - sizeof(cmd_disable_event_mask), info); + sizeof(cmd_disable_event_mask), pid); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag:%s: masks cleared successfully\n", __func__); } @@ -462,12 +478,17 @@ static void diag_close_logging_process(const int pid) struct diag_md_session_t *session_info = NULL; struct diag_logging_mode_param_t params; + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_pid(pid); - if (!session_info) + if (!session_info) { + mutex_unlock(&driver->md_session_lock); return; + } + session_mask = session_info->peripheral_mask; + mutex_unlock(&driver->md_session_lock); if (diag_mask_clear_param) - diag_clear_masks(session_info); + diag_clear_masks(pid); mutex_lock(&driver->diag_maskclear_mutex); driver->mask_clear = 1; @@ -475,9 +496,6 @@ static void diag_close_logging_process(const int pid) mutex_lock(&driver->diagchar_mutex); - session_mask = session_info->peripheral_mask; - diag_md_session_close(session_info); - p_mask = diag_translate_kernel_to_user_mask(session_mask); @@ -501,7 +519,9 @@ static void diag_close_logging_process(const int pid) } } } - + mutex_lock(&driver->md_session_lock); + diag_md_session_close(pid); + mutex_unlock(&driver->md_session_lock); diag_switch_logging(¶ms); mutex_unlock(&driver->diagchar_mutex); @@ -1035,11 +1055,13 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len, if (driver->hdlc_encode_buf_len != 0) return -EAGAIN; + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; + mutex_unlock(&driver->md_session_lock); if (hdlc_disabled) { if (len < 4) { pr_err("diag: In %s, invalid len: %d of non_hdlc pkt", @@ -1403,15 +1425,16 @@ fail_peripheral: return err; } -static void diag_md_session_close(struct diag_md_session_t *session_info) +static void diag_md_session_close(int pid) { int i; uint8_t found = 0; + struct diag_md_session_t *session_info = NULL; + session_info = diag_md_session_get_pid(pid); if (!session_info) return; - mutex_lock(&driver->md_session_lock); for (i = 0; i < NUM_MD_SESSIONS; i++) { if (driver->md_session_map[i] != session_info) continue; @@ -1437,13 +1460,14 @@ static void diag_md_session_close(struct diag_md_session_t *session_info) driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE; kfree(session_info); session_info = NULL; - mutex_unlock(&driver->md_session_lock); DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n"); } struct diag_md_session_t *diag_md_session_get_pid(int pid) { int i; + if (pid <= 0) + return NULL; for (i = 0; i < NUM_MD_SESSIONS; i++) { if (driver->md_session_map[i] && driver->md_session_map[i]->pid == pid) @@ -1459,10 +1483,12 @@ struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral) return driver->md_session_map[peripheral]; } -static int diag_md_peripheral_switch(struct diag_md_session_t *session_info, +static int diag_md_peripheral_switch(int pid, int peripheral_mask, int req_mode) { int i, bit = 0; + struct diag_md_session_t *session_info = NULL; + session_info = diag_md_session_get_pid(pid); if (!session_info) return -EINVAL; if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE) @@ -1472,25 +1498,20 @@ static int diag_md_peripheral_switch(struct diag_md_session_t *session_info, * check that md_session_map for i == session_info, * if not then race condition occurred and bail */ - mutex_lock(&driver->md_session_lock); for (i = 0; i < NUM_MD_SESSIONS; i++) { bit = MD_PERIPHERAL_MASK(i) & peripheral_mask; if (!bit) continue; if (req_mode == DIAG_USB_MODE) { - if (driver->md_session_map[i] != session_info) { - mutex_unlock(&driver->md_session_lock); + if (driver->md_session_map[i] != session_info) return -EINVAL; - } driver->md_session_map[i] = NULL; driver->md_session_mask &= ~bit; session_info->peripheral_mask &= ~bit; } else { - if (driver->md_session_map[i] != NULL) { - mutex_unlock(&driver->md_session_lock); + if (driver->md_session_map[i] != NULL) return -EINVAL; - } driver->md_session_map[i] = session_info; driver->md_session_mask |= bit; session_info->peripheral_mask |= bit; @@ -1499,7 +1520,6 @@ static int diag_md_peripheral_switch(struct diag_md_session_t *session_info, } driver->md_session_mode = DIAG_MD_PERIPHERAL; - mutex_unlock(&driver->md_session_lock); DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n", peripheral_mask, req_mode); } @@ -1508,7 +1528,7 @@ static int diag_md_session_check(int curr_mode, int req_mode, const struct diag_logging_mode_param_t *param, uint8_t *change_mode) { - int i, bit = 0, err = 0; + int i, bit = 0, err = 0, peripheral_mask = 0; int change_mask = 0; struct diag_md_session_t *session_info = NULL; @@ -1532,12 +1552,13 @@ static int diag_md_session_check(int curr_mode, int req_mode, if (req_mode == DIAG_USB_MODE) { if (curr_mode == DIAG_USB_MODE) return 0; + mutex_lock(&driver->md_session_lock); if (driver->md_session_mode == DIAG_MD_NONE && driver->md_session_mask == 0 && driver->logging_mask) { *change_mode = 1; + mutex_unlock(&driver->md_session_lock); return 0; } - /* * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE * Check if requested peripherals are already in usb mode @@ -1549,8 +1570,10 @@ static int diag_md_session_check(int curr_mode, int req_mode, if (bit & driver->logging_mask) change_mask |= bit; } - if (!change_mask) + if (!change_mask) { + mutex_unlock(&driver->md_session_lock); return 0; + } /* * Change is needed. Check if this md_session has set all the @@ -1559,29 +1582,29 @@ static int diag_md_session_check(int curr_mode, int req_mode, * If this session owns all the requested peripherals, then * call function to switch the modes/masks for the md_session */ - mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); - if (!session_info) { *change_mode = 1; + mutex_unlock(&driver->md_session_lock); return 0; } - if ((change_mask & session_info->peripheral_mask) + peripheral_mask = session_info->peripheral_mask; + if ((change_mask & peripheral_mask) != change_mask) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "Another MD Session owns a requested peripheral\n"); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } *change_mode = 1; /* If all peripherals are being set to USB Mode, call close */ - if (~change_mask & session_info->peripheral_mask) { - err = diag_md_peripheral_switch(session_info, + if (~change_mask & peripheral_mask) { + err = diag_md_peripheral_switch(current->tgid, change_mask, DIAG_USB_MODE); } else - diag_md_session_close(session_info); - + diag_md_session_close(current->tgid); + mutex_unlock(&driver->md_session_lock); return err; } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) { @@ -1590,21 +1613,23 @@ static int diag_md_session_check(int curr_mode, int req_mode, * been set. Check that requested peripherals already set are * owned by this md session */ - change_mask = driver->md_session_mask & param->peripheral_mask; mutex_lock(&driver->md_session_lock); + change_mask = driver->md_session_mask & param->peripheral_mask; session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); if (session_info) { if ((session_info->peripheral_mask & change_mask) != change_mask) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "Another MD Session owns a requested peripheral\n"); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } - err = diag_md_peripheral_switch(session_info, + err = diag_md_peripheral_switch(current->tgid, change_mask, DIAG_USB_MODE); + mutex_unlock(&driver->md_session_lock); } else { + mutex_unlock(&driver->md_session_lock); if (change_mask) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "Another MD Session owns a requested peripheral\n"); @@ -1866,6 +1891,7 @@ static int diag_ioctl_lsm_deinit(void) } driver->data_ready[i] |= DEINIT_TYPE; + atomic_inc(&driver->data_ready_notif[i]); mutex_unlock(&driver->diagchar_mutex); wake_up_interruptible(&driver->wait_q); @@ -2059,19 +2085,17 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg) { uint8_t hdlc_support; struct diag_md_session_t *session_info = NULL; - mutex_lock(&driver->md_session_lock); - session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); if (copy_from_user(&hdlc_support, (void __user *)ioarg, sizeof(uint8_t))) return -EFAULT; mutex_lock(&driver->hdlc_disable_mutex); - if (session_info) { - mutex_lock(&driver->md_session_lock); + mutex_lock(&driver->md_session_lock); + session_info = diag_md_session_get_pid(current->tgid); + if (session_info) session_info->hdlc_disabled = hdlc_support; - mutex_unlock(&driver->md_session_lock); - } else + else driver->hdlc_disabled = hdlc_support; + mutex_unlock(&driver->md_session_lock); mutex_unlock(&driver->hdlc_disable_mutex); diag_update_md_clients(HDLC_SUPPORT_TYPE); @@ -2783,7 +2807,6 @@ static int diag_user_process_raw_data(const char __user *buf, int len) int remote_proc = 0; const int mempool = POOL_TYPE_COPY; unsigned char *user_space_data = NULL; - struct diag_md_session_t *info = NULL; if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) { pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n", @@ -2834,13 +2857,11 @@ static int diag_user_process_raw_data(const char __user *buf, int len) } else { wait_event_interruptible(driver->wait_q, (driver->in_busy_pktdata == 0)); - mutex_lock(&driver->md_session_lock); - info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); - ret = diag_process_apps_pkt(user_space_data, len, info); + ret = diag_process_apps_pkt(user_space_data, len, + current->tgid); if (ret == 1) diag_send_error_rsp((void *)(user_space_data), len, - info); + current->tgid); } fail: diagmem_free(driver, user_space_data, mempool); @@ -2906,24 +2927,25 @@ static int diag_user_process_userspace_data(const char __user *buf, int len) if (!remote_proc) { mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); if (!session_info) { pr_err("diag:In %s request came from invalid md session pid:%d", __func__, current->tgid); + mutex_unlock(&driver->md_session_lock); return -EINVAL; } if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; + mutex_unlock(&driver->md_session_lock); if (!hdlc_disabled) diag_process_hdlc_pkt((void *) (driver->user_space_data_buf), - len, session_info); + len, current->tgid); else diag_process_non_hdlc_pkt((char *) (driver->user_space_data_buf), - len, session_info); + len, current->tgid); return 0; } @@ -3000,11 +3022,13 @@ static int diag_user_process_apps_data(const char __user *buf, int len, mutex_lock(&apps_data_mutex); mutex_lock(&driver->hdlc_disable_mutex); + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; + mutex_unlock(&driver->md_session_lock); if (hdlc_disabled) ret = diag_process_apps_data_non_hdlc(user_space_data, len, pkt_type); @@ -3029,16 +3053,6 @@ static int diag_user_process_apps_data(const char __user *buf, int len, return 0; } -static int check_data_ready(int index) -{ - int data_type = 0; - - mutex_lock(&driver->diagchar_mutex); - data_type = driver->data_ready[index]; - mutex_unlock(&driver->diagchar_mutex); - return data_type; -} - static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -3065,7 +3079,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, pr_err("diag: bad address from user side\n"); return -EFAULT; } - wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0); + wait_event_interruptible(driver->wait_q, + atomic_read(&driver->data_ready_notif[index]) > 0); mutex_lock(&driver->diagchar_mutex); @@ -3076,32 +3091,40 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE; driver->data_ready[index] ^= USER_SPACE_DATA_TYPE; + atomic_dec(&driver->data_ready_notif[index]); COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int)); /* place holder for number of data field */ ret += sizeof(int); mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); exit_stat = diag_md_copy_to_user(buf, &ret, count, session_info); + mutex_unlock(&driver->md_session_lock); goto exit; } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) { /* In case, the thread wakes up and the logging mode is not memory device any more, the condition needs to be cleared */ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE; + atomic_dec(&driver->data_ready_notif[index]); } if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) { data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE; driver->data_ready[index] ^= HDLC_SUPPORT_TYPE; + atomic_dec(&driver->data_ready_notif[index]); COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int)); mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_pid(current->tgid); - mutex_unlock(&driver->md_session_lock); - if (session_info) - COPY_USER_SPACE_OR_EXIT(buf+4, + if (session_info) { + COPY_USER_SPACE_OR_ERR(buf+4, session_info->hdlc_disabled, sizeof(uint8_t)); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } + } + mutex_unlock(&driver->md_session_lock); goto exit; } @@ -3110,6 +3133,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, data_type = driver->data_ready[index] & DEINIT_TYPE; COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); driver->data_ready[index] ^= DEINIT_TYPE; + atomic_dec(&driver->data_ready_notif[index]); mutex_unlock(&driver->diagchar_mutex); diag_remove_client_entry(file); return ret; @@ -3118,45 +3142,74 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, if (driver->data_ready[index] & MSG_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & MSG_MASKS_TYPE; + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); - COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int)); + COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int)); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } write_len = diag_copy_to_user_msg_mask(buf + ret, count, session_info); + mutex_unlock(&driver->md_session_lock); if (write_len > 0) ret += write_len; driver->data_ready[index] ^= MSG_MASKS_TYPE; + atomic_dec(&driver->data_ready_notif[index]); goto exit; } if (driver->data_ready[index] & EVENT_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & EVENT_MASKS_TYPE; + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); + COPY_USER_SPACE_OR_ERR(buf, data_type, 4); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } if (session_info && session_info->event_mask && session_info->event_mask->ptr) { - COPY_USER_SPACE_OR_EXIT(buf + sizeof(int), + COPY_USER_SPACE_OR_ERR(buf + sizeof(int), *(session_info->event_mask->ptr), session_info->event_mask->mask_len); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } } else { - COPY_USER_SPACE_OR_EXIT(buf + sizeof(int), + COPY_USER_SPACE_OR_ERR(buf + sizeof(int), *(event_mask.ptr), event_mask.mask_len); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } } + mutex_unlock(&driver->md_session_lock); driver->data_ready[index] ^= EVENT_MASKS_TYPE; + atomic_dec(&driver->data_ready_notif[index]); goto exit; } if (driver->data_ready[index] & LOG_MASKS_TYPE) { /*Copy the type of data being passed*/ data_type = driver->data_ready[index] & LOG_MASKS_TYPE; + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(APPS_DATA); - COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int)); + COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int)); + if (ret == -EFAULT) { + mutex_unlock(&driver->md_session_lock); + goto exit; + } write_len = diag_copy_to_user_log_mask(buf + ret, count, session_info); + mutex_unlock(&driver->md_session_lock); if (write_len > 0) ret += write_len; driver->data_ready[index] ^= LOG_MASKS_TYPE; + atomic_dec(&driver->data_ready_notif[index]); goto exit; } @@ -3168,6 +3221,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, *(driver->apps_req_buf), driver->apps_req_buf_len); driver->data_ready[index] ^= PKT_TYPE; + atomic_dec(&driver->data_ready_notif[index]); driver->in_busy_pktdata = 0; goto exit; } @@ -3179,6 +3233,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf), driver->dci_pkt_length); driver->data_ready[index] ^= DCI_PKT_TYPE; + atomic_dec(&driver->data_ready_notif[index]); driver->in_busy_dcipktdata = 0; goto exit; } @@ -3191,6 +3246,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC]. event_mask_composite), DCI_EVENT_MASK_SIZE); driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE; + atomic_dec(&driver->data_ready_notif[index]); goto exit; } @@ -3202,6 +3258,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC]. log_mask_composite), DCI_LOG_MASK_SIZE); driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE; + atomic_dec(&driver->data_ready_notif[index]); goto exit; } @@ -3233,6 +3290,7 @@ exit: exit_stat = diag_copy_dci(buf, count, entry, &ret); mutex_lock(&driver->diagchar_mutex); driver->data_ready[index] ^= DCI_DATA_TYPE; + atomic_dec(&driver->data_ready_notif[index]); mutex_unlock(&driver->diagchar_mutex); if (exit_stat == 1) { mutex_unlock(&driver->dci_mutex); diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index ef08f939c36e0f616ca1238c47d54c73bd35c702..ba18db4bbdc96393edab13fdccb8e8b6689ecc8a 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -226,6 +226,7 @@ void chk_logging_wakeup(void) * situation. */ driver->data_ready[i] |= USER_SPACE_DATA_TYPE; + atomic_inc(&driver->data_ready_notif[i]); pr_debug("diag: Force wakeup of logging process\n"); wake_up_interruptible(&driver->wait_q); break; @@ -241,7 +242,7 @@ void chk_logging_wakeup(void) } static void pack_rsp_and_send(unsigned char *buf, int len, - struct diag_md_session_t *info) + int pid) { int err; int retry_count = 0, i, rsp_ctxt; @@ -249,6 +250,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len, unsigned long flags; unsigned char *rsp_ptr = driver->encoded_rsp_buf; struct diag_pkt_frame_t header; + struct diag_md_session_t *session_info = NULL, *info = NULL; if (!rsp_ptr || !buf) return; @@ -259,6 +261,11 @@ static void pack_rsp_and_send(unsigned char *buf, int len, return; } + mutex_lock(&driver->md_session_lock); + session_info = diag_md_session_get_pid(pid); + info = (session_info) ? session_info : + diag_md_session_get_peripheral(APPS_DATA); + if (info && info->peripheral_mask) { if (info->peripheral_mask == DIAG_CON_ALL || (info->peripheral_mask & (1 << APPS_DATA)) || @@ -273,6 +280,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len, } } else rsp_ctxt = driver->rsp_buf_ctxt; + mutex_unlock(&driver->md_session_lock); /* * Keep trying till we get the buffer back. It should probably @@ -296,8 +304,11 @@ static void pack_rsp_and_send(unsigned char *buf, int len, * draining responses when we are in Memory Device Mode. */ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE || - driver->logging_mode == DIAG_MULTI_MODE) + driver->logging_mode == DIAG_MULTI_MODE) { + mutex_lock(&driver->md_session_lock); chk_logging_wakeup(); + mutex_unlock(&driver->md_session_lock); + } } if (driver->rsp_buf_busy) { pr_err("diag: unable to get hold of response buffer\n"); @@ -326,13 +337,14 @@ static void pack_rsp_and_send(unsigned char *buf, int len, } static void encode_rsp_and_send(unsigned char *buf, int len, - struct diag_md_session_t *info) + int pid) { struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; unsigned char *rsp_ptr = driver->encoded_rsp_buf; int err, i, rsp_ctxt, retry_count = 0; unsigned long flags; + struct diag_md_session_t *session_info = NULL, *info = NULL; if (!rsp_ptr || !buf) return; @@ -343,6 +355,11 @@ static void encode_rsp_and_send(unsigned char *buf, int len, return; } + mutex_lock(&driver->md_session_lock); + session_info = diag_md_session_get_pid(pid); + info = (session_info) ? session_info : + diag_md_session_get_peripheral(APPS_DATA); + if (info && info->peripheral_mask) { if (info->peripheral_mask == DIAG_CON_ALL || (info->peripheral_mask & (1 << APPS_DATA)) || @@ -357,7 +374,7 @@ static void encode_rsp_and_send(unsigned char *buf, int len, } } else rsp_ctxt = driver->rsp_buf_ctxt; - + mutex_unlock(&driver->md_session_lock); /* * Keep trying till we get the buffer back. It should probably * take one or two iterations. When this loops till UINT_MAX, it @@ -380,8 +397,11 @@ static void encode_rsp_and_send(unsigned char *buf, int len, * draining responses when we are in Memory Device Mode. */ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE || - driver->logging_mode == DIAG_MULTI_MODE) + driver->logging_mode == DIAG_MULTI_MODE) { + mutex_lock(&driver->md_session_lock); chk_logging_wakeup(); + mutex_unlock(&driver->md_session_lock); + } } if (driver->rsp_buf_busy) { @@ -412,22 +432,23 @@ static void encode_rsp_and_send(unsigned char *buf, int len, memset(buf, '\0', DIAG_MAX_RSP_SIZE); } -void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info) +static void diag_send_rsp(unsigned char *buf, int len, int pid) { - struct diag_md_session_t *session_info = NULL; + struct diag_md_session_t *session_info = NULL, *info = NULL; uint8_t hdlc_disabled; - + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); session_info = (info) ? info : diag_md_session_get_peripheral(APPS_DATA); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; - + mutex_unlock(&driver->md_session_lock); if (hdlc_disabled) - pack_rsp_and_send(buf, len, session_info); + pack_rsp_and_send(buf, len, pid); else - encode_rsp_and_send(buf, len, session_info); + encode_rsp_and_send(buf, len, pid); } void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type) @@ -480,8 +501,10 @@ void diag_update_userspace_clients(unsigned int type) mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients; i++) - if (driver->client_map[i].pid != 0) + if (driver->client_map[i].pid != 0) { driver->data_ready[i] |= type; + atomic_inc(&driver->data_ready_notif[i]); + } wake_up_interruptible(&driver->wait_q); mutex_unlock(&driver->diagchar_mutex); } @@ -491,6 +514,7 @@ void diag_update_md_clients(unsigned int type) int i, j; mutex_lock(&driver->diagchar_mutex); + mutex_lock(&driver->md_session_lock); for (i = 0; i < NUM_MD_SESSIONS; i++) { if (driver->md_session_map[i] != NULL) for (j = 0; j < driver->num_clients; j++) { @@ -498,10 +522,13 @@ void diag_update_md_clients(unsigned int type) driver->client_map[j].pid == driver->md_session_map[i]->pid) { driver->data_ready[j] |= type; + atomic_inc( + &driver->data_ready_notif[j]); break; } } } + mutex_unlock(&driver->md_session_lock); wake_up_interruptible(&driver->wait_q); mutex_unlock(&driver->diagchar_mutex); } @@ -513,6 +540,7 @@ void diag_update_sleeping_process(int process_id, int data_type) for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == process_id) { driver->data_ready[i] |= data_type; + atomic_inc(&driver->data_ready_notif[i]); break; } wake_up_interruptible(&driver->wait_q); @@ -899,7 +927,7 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len, } void diag_send_error_rsp(unsigned char *buf, int len, - struct diag_md_session_t *info) + int pid) { /* -1 to accomodate the first byte 0x13 */ if (len > (DIAG_MAX_RSP_SIZE - 1)) { @@ -909,27 +937,27 @@ void diag_send_error_rsp(unsigned char *buf, int len, *(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR; memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len); - diag_send_rsp(driver->apps_rsp_buf, len + 1, info); + diag_send_rsp(driver->apps_rsp_buf, len + 1, pid); } -int diag_process_apps_pkt(unsigned char *buf, int len, - struct diag_md_session_t *info) +int diag_process_apps_pkt(unsigned char *buf, int len, int pid) { - int i; + int i, p_mask = 0; int mask_ret; int write_len = 0; unsigned char *temp = NULL; struct diag_cmd_reg_entry_t entry; struct diag_cmd_reg_entry_t *temp_entry = NULL; struct diag_cmd_reg_t *reg_item = NULL; + struct diag_md_session_t *info = NULL; if (!buf) return -EIO; /* Check if the command is a supported mask command */ - mask_ret = diag_process_apps_masks(buf, len, info); + mask_ret = diag_process_apps_masks(buf, len, pid); if (mask_ret > 0) { - diag_send_rsp(driver->apps_rsp_buf, mask_ret, info); + diag_send_rsp(driver->apps_rsp_buf, mask_ret, pid); return 0; } @@ -951,7 +979,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, driver->apps_rsp_buf, DIAG_MAX_RSP_SIZE); if (write_len > 0) - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } @@ -960,14 +988,18 @@ int diag_process_apps_pkt(unsigned char *buf, int len, if (temp_entry) { reg_item = container_of(temp_entry, struct diag_cmd_reg_t, entry); + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); if (info) { - if (MD_PERIPHERAL_MASK(reg_item->proc) & - info->peripheral_mask) + p_mask = info->peripheral_mask; + mutex_unlock(&driver->md_session_lock); + if (MD_PERIPHERAL_MASK(reg_item->proc) & p_mask) write_len = diag_send_data(reg_item, buf, len); } else { + mutex_unlock(&driver->md_session_lock); if (MD_PERIPHERAL_MASK(reg_item->proc) & driver->logging_mask) - diag_send_error_rsp(buf, len, info); + diag_send_error_rsp(buf, len, pid); else write_len = diag_send_data(reg_item, buf, len); } @@ -983,13 +1015,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len, for (i = 0; i < 4; i++) *(driver->apps_rsp_buf+i) = *(buf+i); *(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE; - diag_send_rsp(driver->apps_rsp_buf, 8, info); + diag_send_rsp(driver->apps_rsp_buf, 8, pid); return 0; } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) && (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) { len = diag_process_stm_cmd(buf, driver->apps_rsp_buf); if (len > 0) { - diag_send_rsp(driver->apps_rsp_buf, len, info); + diag_send_rsp(driver->apps_rsp_buf, len, pid); return 0; } return len; @@ -1002,7 +1034,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, driver->apps_rsp_buf, DIAG_MAX_RSP_SIZE); if (write_len > 0) - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } /* Check for time sync switch command */ @@ -1013,14 +1045,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len, driver->apps_rsp_buf, DIAG_MAX_RSP_SIZE); if (write_len > 0) - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } /* Check for download command */ else if ((chk_apps_master()) && (*buf == 0x3A)) { /* send response back */ driver->apps_rsp_buf[0] = *buf; - diag_send_rsp(driver->apps_rsp_buf, 1, info); + diag_send_rsp(driver->apps_rsp_buf, 1, pid); msleep(5000); /* call download API */ msm_set_restart_mode(RESTART_DLOAD); @@ -1040,7 +1072,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, for (i = 0; i < 13; i++) driver->apps_rsp_buf[i+3] = 0; - diag_send_rsp(driver->apps_rsp_buf, 16, info); + diag_send_rsp(driver->apps_rsp_buf, 16, pid); return 0; } } @@ -1049,7 +1081,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) { memcpy(driver->apps_rsp_buf, buf, 4); driver->apps_rsp_buf[4] = wrap_enabled; - diag_send_rsp(driver->apps_rsp_buf, 5, info); + diag_send_rsp(driver->apps_rsp_buf, 5, pid); return 0; } /* Wrap the Delayed Rsp ID */ @@ -1058,7 +1090,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, wrap_enabled = true; memcpy(driver->apps_rsp_buf, buf, 4); driver->apps_rsp_buf[4] = wrap_count; - diag_send_rsp(driver->apps_rsp_buf, 6, info); + diag_send_rsp(driver->apps_rsp_buf, 6, pid); return 0; } /* Mobile ID Rsp */ @@ -1069,7 +1101,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, driver->apps_rsp_buf, DIAG_MAX_RSP_SIZE); if (write_len > 0) { - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } } @@ -1089,7 +1121,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, for (i = 0; i < 55; i++) driver->apps_rsp_buf[i] = 0; - diag_send_rsp(driver->apps_rsp_buf, 55, info); + diag_send_rsp(driver->apps_rsp_buf, 55, pid); return 0; } /* respond to 0x7c command */ @@ -1102,14 +1134,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len, chk_config_get_id(); *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0'; *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0'; - diag_send_rsp(driver->apps_rsp_buf, 14, info); + diag_send_rsp(driver->apps_rsp_buf, 14, pid); return 0; } } write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf, DIAG_MAX_RSP_SIZE); if (write_len > 0) { - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf, @@ -1121,7 +1153,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len, * before disabling HDLC encoding on Apps processor. */ mutex_lock(&driver->hdlc_disable_mutex); - diag_send_rsp(driver->apps_rsp_buf, write_len, info); + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); /* * Set the value of hdlc_disabled after sending the response to * the tools. This is required since the tools is expecting a @@ -1129,10 +1161,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len, */ pr_debug("diag: In %s, disabling HDLC encoding\n", __func__); + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); if (info) info->hdlc_disabled = 1; else driver->hdlc_disabled = 1; + mutex_unlock(&driver->md_session_lock); diag_update_md_clients(HDLC_SUPPORT_TYPE); mutex_unlock(&driver->hdlc_disable_mutex); return 0; @@ -1141,13 +1176,12 @@ int diag_process_apps_pkt(unsigned char *buf, int len, /* We have now come to the end of the function. */ if (chk_apps_only()) - diag_send_error_rsp(buf, len, info); + diag_send_error_rsp(buf, len, pid); return 0; } -void diag_process_hdlc_pkt(void *data, unsigned len, - struct diag_md_session_t *info) +void diag_process_hdlc_pkt(void *data, unsigned int len, int pid) { int err = 0; int ret = 0; @@ -1207,7 +1241,7 @@ void diag_process_hdlc_pkt(void *data, unsigned len, } err = diag_process_apps_pkt(driver->hdlc_buf, - driver->hdlc_buf_len, info); + driver->hdlc_buf_len, pid); if (err < 0) goto fail; } else { @@ -1224,7 +1258,7 @@ fail: * recovery algorithm. Send an error response if the * packet is not in expected format. */ - diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info); + diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, pid); driver->hdlc_buf_len = 0; end: mutex_unlock(&driver->diag_hdlc_mutex); @@ -1321,9 +1355,11 @@ static int diagfwd_mux_close(int id, int mode) static uint8_t hdlc_reset; -static void hdlc_reset_timer_start(struct diag_md_session_t *info) +static void hdlc_reset_timer_start(int pid) { + struct diag_md_session_t *info = NULL; mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); if (!hdlc_timer_in_progress) { hdlc_timer_in_progress = 1; if (info) @@ -1365,15 +1401,16 @@ void diag_md_hdlc_reset_timer_func(unsigned long pid) } static void diag_hdlc_start_recovery(unsigned char *buf, int len, - struct diag_md_session_t *info) + int pid) { int i; static uint32_t bad_byte_counter; unsigned char *start_ptr = NULL; struct diag_pkt_frame_t *actual_pkt = NULL; + struct diag_md_session_t *info = NULL; hdlc_reset = 1; - hdlc_reset_timer_start(info); + hdlc_reset_timer_start(pid); actual_pkt = (struct diag_pkt_frame_t *)buf; for (i = 0; i < len; i++) { @@ -1392,10 +1429,13 @@ static void diag_hdlc_start_recovery(unsigned char *buf, int len, pr_err("diag: In %s, re-enabling HDLC encoding\n", __func__); mutex_lock(&driver->hdlc_disable_mutex); + mutex_lock(&driver->md_session_lock); + info = diag_md_session_get_pid(pid); if (info) info->hdlc_disabled = 0; else driver->hdlc_disabled = 0; + mutex_unlock(&driver->md_session_lock); mutex_unlock(&driver->hdlc_disable_mutex); diag_update_md_clients(HDLC_SUPPORT_TYPE); @@ -1408,12 +1448,11 @@ static void diag_hdlc_start_recovery(unsigned char *buf, int len, mutex_lock(&driver->hdlc_recovery_mutex); driver->incoming_pkt.processing = 0; mutex_unlock(&driver->hdlc_recovery_mutex); - diag_process_non_hdlc_pkt(start_ptr, len - i, info); + diag_process_non_hdlc_pkt(start_ptr, len - i, pid); } } -void diag_process_non_hdlc_pkt(unsigned char *buf, int len, - struct diag_md_session_t *info) +void diag_process_non_hdlc_pkt(unsigned char *buf, int len, int pid) { int err = 0; uint16_t pkt_len = 0; @@ -1469,11 +1508,11 @@ void diag_process_non_hdlc_pkt(unsigned char *buf, int len, if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR) { mutex_unlock(&driver->hdlc_recovery_mutex); - diag_hdlc_start_recovery(buf, len, info); + diag_hdlc_start_recovery(buf, len, pid); mutex_lock(&driver->hdlc_recovery_mutex); } err = diag_process_apps_pkt(data_ptr, - actual_pkt->length, info); + actual_pkt->length, pid); if (err) { pr_err("diag: In %s, unable to process incoming data packet, err: %d\n", __func__, err); @@ -1495,8 +1534,8 @@ start: pkt_len = actual_pkt->length; if (actual_pkt->start != CONTROL_CHAR) { - diag_hdlc_start_recovery(buf, len, info); - diag_send_error_rsp(buf, len, info); + diag_hdlc_start_recovery(buf, len, pid); + diag_send_error_rsp(buf, len, pid); goto end; } mutex_lock(&driver->hdlc_recovery_mutex); @@ -1504,7 +1543,7 @@ start: pr_err("diag: In %s, incoming data is too large for the request buffer %d\n", __func__, pkt_len); mutex_unlock(&driver->hdlc_recovery_mutex); - diag_hdlc_start_recovery(buf, len, info); + diag_hdlc_start_recovery(buf, len, pid); break; } if ((pkt_len + header_len) > (len - read_bytes)) { @@ -1521,13 +1560,13 @@ start: if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR) { mutex_unlock(&driver->hdlc_recovery_mutex); - diag_hdlc_start_recovery(buf, len, info); + diag_hdlc_start_recovery(buf, len, pid); mutex_lock(&driver->hdlc_recovery_mutex); } else hdlc_reset = 0; err = diag_process_apps_pkt(data_ptr, - actual_pkt->length, info); + actual_pkt->length, pid); if (err) { mutex_unlock(&driver->hdlc_recovery_mutex); break; @@ -1546,9 +1585,9 @@ static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt) return -EINVAL; if (!driver->hdlc_disabled) - diag_process_hdlc_pkt(buf, len, NULL); + diag_process_hdlc_pkt(buf, len, 0); else - diag_process_non_hdlc_pkt(buf, len, NULL); + diag_process_non_hdlc_pkt(buf, len, 0); diag_mux_queue_read(ctxt); return 0; @@ -1703,6 +1742,8 @@ int diagfwd_init(void) , GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->data_ready); + for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++) + atomic_set(&driver->data_ready_notif[i], 0); if (driver->apps_req_buf == NULL) { driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL); if (!driver->apps_req_buf) diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h index 97ad3f60ba5ec9b96929603c356cd5686e064b01..8b097cfc4527584a000bd4c931532f073ce87668 100644 --- a/drivers/char/diag/diagfwd.h +++ b/drivers/char/diag/diagfwd.h @@ -30,10 +30,8 @@ int diagfwd_init(void); void diagfwd_exit(void); -void diag_process_hdlc_pkt(void *data, unsigned len, - struct diag_md_session_t *info); -void diag_process_non_hdlc_pkt(unsigned char *data, int len, - struct diag_md_session_t *info); +void diag_process_hdlc_pkt(void *data, unsigned int len, int pid); +void diag_process_non_hdlc_pkt(unsigned char *data, int len, int pid); int chk_config_get_id(void); int chk_apps_only(void); int chk_apps_master(void); @@ -45,10 +43,8 @@ int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len, int diag_check_common_cmd(struct diag_pkt_header_t *header); void diag_update_userspace_clients(unsigned int type); void diag_update_sleeping_process(int process_id, int data_type); -int diag_process_apps_pkt(unsigned char *buf, int len, - struct diag_md_session_t *info); -void diag_send_error_rsp(unsigned char *buf, int len, - struct diag_md_session_t *info); +int diag_process_apps_pkt(unsigned char *buf, int len, int pid); +void diag_send_error_rsp(unsigned char *buf, int len, int pid); void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type); int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf); void diag_md_hdlc_reset_timer_func(unsigned long pid); diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 7e428ce972a8963dea7d1738579553c505314620..a7abe3dafb69b1affb616bacf6feb513b5fee59b 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -316,14 +316,13 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, diag_ws_release(); return; } - - session_info = - diag_md_session_get_peripheral(peripheral); + mutex_lock(&driver->md_session_lock); + session_info = diag_md_session_get_peripheral(peripheral); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; - + mutex_unlock(&driver->md_session_lock); if (hdlc_disabled) { /* The data is raw and and on APPS side HDLC is disabled */ if (!buf) { @@ -638,12 +637,13 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info, mutex_lock(&driver->hdlc_disable_mutex); mutex_lock(&fwd_info->data_mutex); + mutex_lock(&driver->md_session_lock); session_info = diag_md_session_get_peripheral(fwd_info->peripheral); if (session_info) hdlc_disabled = session_info->hdlc_disabled; else hdlc_disabled = driver->hdlc_disabled; - + mutex_unlock(&driver->md_session_lock); if (!driver->feature[fwd_info->peripheral].encode_hdlc) { if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) { temp_buf = fwd_info->buf_1; diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index eb72217b9b1caf4f0878445c08544c94d3b8feab..a1635bad3bb03660faa30b9b838c8b572df8ee47 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2868,7 +2868,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf, else if (c->trace_method == XOR_PACKET) len = snprintf(debug_buf, sizeof(debug_buf), "xor\n"); - rc = simple_read_from_buffer((void __user *) buf, len, ppos, + rc = simple_read_from_buffer((void __user *) buf, count, ppos, (void *) debug_buf, len); mutex_unlock(&debug_buf_mutex); diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 510a9803bd820041d4787b51ef6de5de861f95e0..68f4afde0d0cbe59093e58856c16abc38c54717f 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2691,7 +2691,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf, else if (c->trace_method == XOR_PACKET) len = snprintf(debug_buf, sizeof(debug_buf), "xor\n"); - rc = simple_read_from_buffer((void __user *) buf, len, ppos, + rc = simple_read_from_buffer((void __user *) buf, count, ppos, (void *) debug_buf, len); mutex_unlock(&debug_buf_mutex); diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index ff0c8327fabe1551fca65a9d5f3094b4136a5e9a..3c3cf8e04eea7650cf55bf69534d0631b77aa3dd 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -210,9 +210,11 @@ static unsigned long clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_tbl *f_curr; u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; - if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) { + if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw) + || !clk_hw_is_enabled(hw))) { if (!rcg->current_freq) rcg->current_freq = cxo_f.freq; return rcg->current_freq; @@ -232,9 +234,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) mode >>= CFG_MODE_SHIFT; } - mask = BIT(rcg->hid_width) - 1; - hid_div = cfg >> CFG_SRC_DIV_SHIFT; - hid_div &= mask; + if (rcg->enable_safe_config) { + f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq); + if (!f_curr) + return -EINVAL; + + hid_div = f_curr->pre_div; + } else { + mask = BIT(rcg->hid_width) - 1; + hid_div = cfg >> CFG_SRC_DIV_SHIFT; + hid_div &= mask; + } return calc_rate(parent_rate, m, n, mode, hid_div); } diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index 542737e4d2043a1586ecc1e1d75406025b493892..05606f1b23dc134e4075e8750b9f13d6ec2add93 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2419,7 +2419,7 @@ static struct clk_regmap_div mmss_mdss_byte0_intf_div_clk = { }, .num_parents = 1, .ops = &clk_regmap_div_ops, - .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_GET_RATE_NOCACHE, }, }, }; @@ -2476,7 +2476,7 @@ static struct clk_regmap_div mmss_mdss_byte1_intf_div_clk = { }, .num_parents = 1, .ops = &clk_regmap_div_ops, - .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_GET_RATE_NOCACHE, }, }, }; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index dbc198b007925d66be5dc98b86417e0aa714fc32..cb3b25ddd0dad400c0e3eb71b7f659f38726bb1a 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -98,3 +98,13 @@ config DRM_SDE_HDMI default y help Choose this option if HDMI connector support is needed in SDE driver. + +config DRM_SDE_EVTLOG_DEBUG + bool "Enable event logging in MSM DRM" + depends on DRM_MSM + help + The SDE DRM debugging provides support to enable display debugging + features to: dump SDE registers during driver errors, panic + driver during fatal errors and enable some display-driver logging + into an internal buffer (this avoids logging overhead). + diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 4c082fff2fc5f73696fb5167d88bb82d5f0d615f..678b2178cb698dc17bb9477d4d883228216713f4 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -49,6 +49,7 @@ msm_drm-y := \ sde/sde_color_processing.o \ sde/sde_vbif.o \ sde/sde_splash.o \ + sde_dbg.o \ sde_dbg_evtlog.o \ sde_io_util.o \ dba_bridge.o \ diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 0f77e35ef287279513a95acc945912da773c7854..b6b07135fdd1d3084f0b57087714890921cde265 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -1215,6 +1215,9 @@ static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on) gpio_free(config->hpd_gpio); + if (config->hpd5v_gpio != -1) + gpio_free(config->hpd5v_gpio); + if (config->mux_en_gpio != -1) { gpio_set_value_cansleep(config->mux_en_gpio, 0); gpio_free(config->mux_en_gpio); @@ -1336,19 +1339,26 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) HDMI_HPD_CTRL_ENABLE | hpd_ctrl); spin_unlock_irqrestore(&hdmi->reg_lock, flags); + hdmi->hpd_off = false; + SDE_DEBUG("enabled hdmi hpd\n"); return 0; fail: return ret; } -static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi) +static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi) { struct hdmi *hdmi = sde_hdmi->ctrl.ctrl; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; int i, ret = 0; + if (hdmi->hpd_off) { + pr_warn("hdmi display hpd was already disabled\n"); + return; + } + /* Disable HPD interrupt */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); @@ -1371,6 +1381,36 @@ static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi) pr_warn("failed to disable hpd regulator: %s (%d)\n", config->hpd_reg_names[i], ret); } + hdmi->hpd_off = true; + SDE_DEBUG("disabled hdmi hpd\n"); +} + +/** + * _sde_hdmi_update_hpd_state() - Update the HDMI HPD clock state + * + * @state: non-zero to disbale HPD clock, 0 to enable. + * return: 0 on success, non-zero in case of failure. + * + */ +static int +_sde_hdmi_update_hpd_state(struct sde_hdmi *hdmi_display, u64 state) +{ + struct hdmi *hdmi = hdmi_display->ctrl.ctrl; + int rc = 0; + + if (hdmi_display->non_pluggable) + return 0; + + SDE_DEBUG("changing hdmi hpd state to %llu\n", state); + + if (state == SDE_MODE_HPD_ON) { + if (!hdmi->hpd_off) + pr_warn("hdmi display hpd was already enabled\n"); + rc = _sde_hdmi_hpd_enable(hdmi_display); + } else + _sde_hdmi_hpd_disable(hdmi_display); + + return rc; } static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display) @@ -2140,6 +2180,8 @@ int sde_hdmi_set_property(struct drm_connector *connector, rc = _sde_hdmi_enable_pll_update(display, value); else if (property_index == CONNECTOR_PROP_PLL_DELTA) rc = _sde_hdmi_update_pll_delta(display, value); + else if (property_index == CONNECTOR_PROP_HPD_OFF) + rc = _sde_hdmi_update_hpd_state(display, value); return rc; } @@ -2217,7 +2259,7 @@ int sde_hdmi_connector_pre_deinit(struct drm_connector *connector, return -EINVAL; } - _sde_hdmi_hdp_disable(sde_hdmi); + _sde_hdmi_hpd_disable(sde_hdmi); return 0; } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 8ca7b36ee0c8225c74682426a7b67b3a6fbde175..9a0733bf81ffb2393ec9af435289e9be7b754a0e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -50,6 +50,9 @@ struct hdmi { const struct hdmi_platform_config *config; + /* hpd state: */ + bool hpd_off; + /* audio state: */ struct hdmi_audio audio; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 0c119ec5d97cc1de2f55de4b847158d65de54e6b..5ebf50575dd6be5fdaf92beaeb93e62846e719e6 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2014 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -562,6 +562,11 @@ int msm_atomic_commit(struct drm_device *dev, struct msm_commit *commit; int i, ret; + if (!priv || priv->shutdown_in_progress) { + DRM_ERROR("priv is null or shutdown is in-progress\n"); + return -EINVAL; + } + SDE_ATRACE_BEGIN("atomic_commit"); ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c8b11425a817b658ae4366ab0e9fe2444b26beb6..dc24b9d44abbba4d2f340c65cbb2388ffe8c7138 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -293,7 +293,7 @@ static int msm_unload(struct drm_device *dev) priv->vram.paddr, &attrs); } - sde_evtlog_destroy(); + sde_dbg_destroy(); sde_power_client_destroy(&priv->phandle, priv->pclient); sde_power_resource_deinit(pdev, &priv->phandle); @@ -423,12 +423,19 @@ static int msm_component_bind_all(struct device *dev, } #endif +static int msm_power_enable_wrapper(void *handle, void *client, bool enable) +{ + return sde_power_resource_enable(handle, client, enable); +} + static int msm_load(struct drm_device *dev, unsigned long flags) { struct platform_device *pdev = dev->platformdev; struct msm_drm_private *priv; struct msm_kms *kms; + struct sde_dbg_power_ctrl dbg_power_ctrl = { NULL }; int ret, i; + struct sched_param param; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { @@ -477,9 +484,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags) if (ret) goto fail; - ret = sde_evtlog_init(dev->primary->debugfs_root); + dbg_power_ctrl.handle = &priv->phandle; + dbg_power_ctrl.client = priv->pclient; + dbg_power_ctrl.enable_fn = msm_power_enable_wrapper; + ret = sde_dbg_init(dev->primary->debugfs_root, &pdev->dev, + &dbg_power_ctrl); if (ret) { - dev_err(dev->dev, "failed to init evtlog: %d\n", ret); + dev_err(dev->dev, "failed to init sde dbg: %d\n", ret); goto fail; } @@ -518,7 +529,12 @@ static int msm_load(struct drm_device *dev, unsigned long flags) goto fail; } } - + /** + * this priority was found during empiric testing to have appropriate + * realtime scheduling to process display updates and interact with + * other real time and normal priority task + */ + param.sched_priority = 16; /* initialize commit thread structure */ for (i = 0; i < priv->num_crtcs; i++) { priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; @@ -529,6 +545,11 @@ static int msm_load(struct drm_device *dev, unsigned long flags) &priv->disp_thread[i].worker, "crtc_commit:%d", priv->disp_thread[i].crtc_id); + ret = sched_setscheduler(priv->disp_thread[i].thread, + SCHED_FIFO, ¶m); + if (ret) + pr_warn("display thread priority update failed: %d\n", + ret); if (IS_ERR(priv->disp_thread[i].thread)) { dev_err(dev->dev, "failed to create kthread\n"); @@ -2181,6 +2202,28 @@ static const struct platform_device_id msm_id[] = { { } }; +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + struct msm_drm_private *priv = NULL; + + if (!ddev) { + DRM_ERROR("invalid drm device node\n"); + return; + } + + priv = ddev->dev_private; + if (!priv) { + DRM_ERROR("invalid msm drm private node\n"); + return; + } + + msm_lastclose(ddev); + + /* set this after lastclose to allow kickoff from lastclose */ + priv->shutdown_in_progress = true; +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp" }, /* mdp4 */ { .compatible = "qcom,sde-kms" }, /* sde */ @@ -2191,6 +2234,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm_drm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 25dc5f9ef561cba4a02d00e5e2c2542a1dcfd2f6..e0ac0582e79195f4e09d495c877ebff37d17d3af 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -164,6 +164,7 @@ enum msm_mdp_conn_property { CONNECTOR_PROP_TOPOLOGY_NAME, CONNECTOR_PROP_TOPOLOGY_CONTROL, CONNECTOR_PROP_LP, + CONNECTOR_PROP_HPD_OFF, /* total # of properties */ CONNECTOR_PROP_COUNT @@ -374,6 +375,9 @@ struct msm_drm_private { /* list of clients waiting for events */ struct list_head client_event_list; + + /* update the flag when msm driver receives shutdown notification */ + bool shutdown_in_progress; }; struct msm_format { diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index a3f0392c2f88b17d883a5afb8a700dfb74ea286d..d222fdd69a57dee86473860e757fc679e3ae53c1 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -33,15 +33,31 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + struct msm_framebuffer *msm_fb; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); + return drm_gem_handle_create(file_priv, msm_fb->planes[0], handle); } static void msm_framebuffer_destroy(struct drm_framebuffer *fb) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + struct msm_framebuffer *msm_fb; + int i, n; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = drm_format_num_planes(fb->pixel_format); DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); @@ -72,9 +88,16 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { #ifdef CONFIG_DEBUG_FS void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + struct msm_framebuffer *msm_fb; + int i, n; + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = drm_format_num_planes(fb->pixel_format); seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", fb->width, fb->height, (char *)&fb->pixel_format, fb->refcount.refcount.counter, fb->base.id); @@ -95,10 +118,17 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int ret, i, n = drm_format_num_planes(fb->pixel_format); + struct msm_framebuffer *msm_fb; + int ret, i, n; uint64_t iova; + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); + n = drm_format_num_planes(fb->pixel_format); for (i = 0; i < n; i++) { ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova); DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); @@ -112,8 +142,16 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, void msm_framebuffer_cleanup(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + struct msm_framebuffer *msm_fb; + int i, n; + + if (fb == NULL) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = drm_format_num_planes(fb->pixel_format); for (i = 0; i < n; i++) msm_gem_put_iova(msm_fb->planes[i], aspace); @@ -123,9 +161,15 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace, int plane) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + struct msm_framebuffer *msm_fb; uint64_t iova; + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); if (!msm_fb->planes[plane]) return 0; @@ -137,7 +181,14 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + struct msm_framebuffer *msm_fb; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return ERR_PTR(-EINVAL); + } + + msm_fb = to_msm_framebuffer(fb); return msm_fb->planes[plane]; } diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c index 10f89de25831a8a34941da6f43cbd2b91ea74e53..02ed2b7a062f97a11439e78642bce149768b4cb5 100644 --- a/drivers/gpu/drm/msm/msm_prop.c +++ b/drivers/gpu/drm/msm/msm_prop.c @@ -340,9 +340,16 @@ void msm_property_install_enum(struct msm_property_info *info, info->property_data[property_idx].default_value = default_value; info->property_data[property_idx].force_dirty = false; + /* select first defined value for enums */ + if (!is_bitmask) + info->property_data[property_idx].default_value = + values->type; + /* always attach property, if created */ if (*prop) { - drm_object_attach_property(info->base, *prop, 0); + drm_object_attach_property(info->base, *prop, + info->property_data + [property_idx].default_value); ++info->install_count; } } diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 8d15fd2f9cf7c4918532c978f68e55f4f27a0df1..84b0108438faab6863c96bd9f637099234ac8154 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -45,6 +45,11 @@ static const struct drm_prop_enum_list e_power_mode[] = { {SDE_MODE_DPMS_OFF, "OFF"}, }; +static const struct drm_prop_enum_list hpd_clock_state[] = { + {SDE_MODE_HPD_ON, "ON"}, + {SDE_MODE_HPD_OFF, "OFF"}, +}; + int sde_connector_get_info(struct drm_connector *connector, struct msm_display_info *info) { @@ -474,6 +479,9 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, _sde_connector_update_power_locked(c_conn); mutex_unlock(&c_conn->lock); break; + case CONNECTOR_PROP_HPD_OFF: + c_conn->hpd_mode = val; + break; default: break; } @@ -802,6 +810,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, c_conn->display = display; c_conn->dpms_mode = DRM_MODE_DPMS_ON; + c_conn->hpd_mode = SDE_MODE_HPD_ON; c_conn->lp_mode = 0; c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON; @@ -929,6 +938,11 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, ARRAY_SIZE(e_power_mode), CONNECTOR_PROP_LP, 0); + msm_property_install_enum(&c_conn->property_info, "HPD_OFF", + DRM_MODE_PROP_ATOMIC, 0, hpd_clock_state, + ARRAY_SIZE(hpd_clock_state), + CONNECTOR_PROP_HPD_OFF, 0); + rc = msm_property_install_get_status(&c_conn->property_info); if (rc) { SDE_ERROR("failed to create one or more properties\n"); diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index f9b8c3966d74899fbe4d9000a27442a6033d7395..a0ddcf4445400f1c7023b731db447e5a8bdad81d 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,6 +22,9 @@ #include "sde_kms.h" #include "sde_fence.h" +#define SDE_MODE_HPD_ON 0 +#define SDE_MODE_HPD_OFF 1 + #define SDE_CONNECTOR_NAME_SIZE 16 struct sde_connector; @@ -207,6 +210,7 @@ struct sde_connector { struct sde_fence retire_fence; struct sde_connector_ops ops; int dpms_mode; + u64 hpd_mode; int lp_mode; int last_panel_power_mode; diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c index dbfc2dd11a172efdee769bca7e5d8dddf28c092f..4f7e688650de05ebc0e7b58413ae70ab8806efda 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c @@ -31,23 +31,35 @@ static void sde_core_irq_callback_handler(void *arg, int irq_idx) struct sde_irq *irq_obj = &sde_kms->irq_obj; struct sde_irq_callback *cb; unsigned long irq_flags; + bool cb_tbl_error = false; + int enable_counts = 0; - SDE_DEBUG("irq_idx=%d\n", irq_idx); + pr_debug("irq_idx=%d\n", irq_idx); - if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) - SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx); + spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags); + if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) { + /* print error outside lock */ + cb_tbl_error = true; + enable_counts = atomic_read( + &sde_kms->irq_obj.enable_counts[irq_idx]); + } atomic_inc(&irq_obj->irq_counts[irq_idx]); /* * Perform registered function callback */ - spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags); list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list) if (cb->func) cb->func(cb->arg, irq_idx); spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags); + if (cb_tbl_error) { + SDE_ERROR("irq has no registered callback, idx %d enables %d\n", + irq_idx, enable_counts); + SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR); + } + /* * Clear pending interrupt status in HW. * NOTE: sde_core_irq_callback_handler is protected by top-level diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 66b03522903f39310e3e43af904698847146cb61..118d9c1ab2324df4cf6eeb545348f8296b757afc 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1341,8 +1341,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, struct drm_display_mode *mode; int cnt = 0, rc = 0, mixer_width, i, z_pos; - int left_crtc_zpos_cnt[SDE_STAGE_MAX] = {0}; - int right_crtc_zpos_cnt[SDE_STAGE_MAX] = {0}; + int left_zpos_cnt = 0, right_zpos_cnt = 0; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -1396,11 +1395,12 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, } } + /* assign mixer stages based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + if (!sde_is_custom_client()) { int stage_old = pstates[0].stage; - /* assign mixer stages based on sorted zpos property */ - sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); z_pos = 0; for (i = 0; i < cnt; i++) { if (stage_old != pstates[i].stage) @@ -1410,8 +1410,14 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, } } + z_pos = -1; for (i = 0; i < cnt; i++) { - z_pos = pstates[i].stage; + /* reset counts at every new blend stage */ + if (pstates[i].stage != z_pos) { + left_zpos_cnt = 0; + right_zpos_cnt = 0; + z_pos = pstates[i].stage; + } /* verify z_pos setting before using it */ if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) { @@ -1420,22 +1426,24 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, rc = -EINVAL; goto end; } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { - if (left_crtc_zpos_cnt[z_pos] == 2) { - SDE_ERROR("> 2 plane @ stage%d on left\n", + if (left_zpos_cnt == 2) { + SDE_ERROR("> 2 planes @ stage %d on left\n", z_pos); rc = -EINVAL; goto end; } - left_crtc_zpos_cnt[z_pos]++; + left_zpos_cnt++; + } else { - if (right_crtc_zpos_cnt[z_pos] == 2) { - SDE_ERROR("> 2 plane @ stage%d on right\n", + if (right_zpos_cnt == 2) { + SDE_ERROR("> 2 planes @ stage %d on right\n", z_pos); rc = -EINVAL; goto end; } - right_crtc_zpos_cnt[z_pos]++; + right_zpos_cnt++; } + pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0; SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos); } @@ -1447,6 +1455,73 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, goto end; } + /* validate source split: + * use pstates sorted by stage to check planes on same stage + * we assume that all pipes are in source split so its valid to compare + * without taking into account left/right mixer placement + */ + for (i = 1; i < cnt; i++) { + struct plane_state *prv_pstate, *cur_pstate; + struct sde_rect left_rect, right_rect; + int32_t left_pid, right_pid; + int32_t stage; + + prv_pstate = &pstates[i - 1]; + cur_pstate = &pstates[i]; + if (prv_pstate->stage != cur_pstate->stage) + continue; + + stage = cur_pstate->stage; + + left_pid = prv_pstate->sde_pstate->base.plane->base.id; + POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x, + prv_pstate->drm_pstate->crtc_y, + prv_pstate->drm_pstate->crtc_w, + prv_pstate->drm_pstate->crtc_h, false); + + right_pid = cur_pstate->sde_pstate->base.plane->base.id; + POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x, + cur_pstate->drm_pstate->crtc_y, + cur_pstate->drm_pstate->crtc_w, + cur_pstate->drm_pstate->crtc_h, false); + + if (right_rect.x < left_rect.x) { + swap(left_pid, right_pid); + swap(left_rect, right_rect); + } + + /** + * - planes are enumerated in pipe-priority order such that + * planes with lower drm_id must be left-most in a shared + * blend-stage when using source split. + * - planes in source split must be contiguous in width + * - planes in source split must have same dest yoff and height + */ + if (right_pid < left_pid) { + SDE_ERROR( + "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", + stage, left_pid, right_pid); + rc = -EINVAL; + goto end; + } else if (right_rect.x != (left_rect.x + left_rect.w)) { + SDE_ERROR( + "non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n", + stage, left_rect.x, left_rect.w, + right_rect.x, right_rect.w); + rc = -EINVAL; + goto end; + } else if ((left_rect.y != right_rect.y) || + (left_rect.h != right_rect.h)) { + SDE_ERROR( + "source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n", + stage, left_rect.y, right_rect.y, + left_rect.h, right_rect.h); + rc = -EINVAL; + goto end; + } + } + + end: return rc; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index cb8b349e72c784f63f3c92b05b2b13946c3b206a..7393de0b7b1fb17efbbe03a17d9b431a6ff3ef36 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -600,6 +600,12 @@ static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc, SDE_ATRACE_BEGIN("encoder_underrun_callback"); atomic_inc(&phy_enc->underrun_cnt); SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt)); + + trace_sde_encoder_underrun(DRMID(drm_enc), + atomic_read(&phy_enc->underrun_cnt)); + SDE_DBG_CTRL("stop_ftrace"); + SDE_DBG_CTRL("panic_underrun"); + SDE_ATRACE_END("encoder_underrun_callback"); } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index a22287421360af556771b2f7fba6206f470908d0..2f89c571fcfc4934f26980316e29180ef45ae473 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -281,23 +281,40 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) { struct sde_encoder_phys_vid *vid_enc = arg; struct sde_encoder_phys *phys_enc; + struct sde_hw_ctl *hw_ctl; unsigned long lock_flags; - int new_cnt; + u32 flush_register = 0; + int new_cnt = -1, old_cnt = -1; if (!vid_enc) return; phys_enc = &vid_enc->base; + hw_ctl = phys_enc->hw_ctl; + if (phys_enc->parent_ops.handle_vblank_virt) phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc); + old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt); + + /* + * only decrement the pending flush count if we've actually flushed + * hardware. due to sw irq latency, vblank may have already happened + * so we need to double-check with hw that it accepted the flush bits + */ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); - SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0, - new_cnt); + if (hw_ctl && hw_ctl->ops.get_flush_register) + flush_register = hw_ctl->ops.get_flush_register(hw_ctl); + + if (flush_register == 0) + new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, + -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0, + old_cnt, new_cnt, flush_register); + /* Signal any waiting atomic commit thread */ wake_up_all(&phys_enc->pending_kickoff_wq); } @@ -316,10 +333,24 @@ static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) phys_enc); } +static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc) +{ + enum sde_rm_topology_name topology; + + if (!phys_enc) + return false; + + topology = sde_connector_get_topology_name(phys_enc->connector); + if (topology == SDE_RM_TOPOLOGY_PPSPLIT) + return true; + + return false; +} + static bool sde_encoder_phys_vid_needs_single_flush( struct sde_encoder_phys *phys_enc) { - return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO; + return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc); } static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc, @@ -657,7 +688,7 @@ static int sde_encoder_phys_vid_wait_for_vblank( KICKOFF_TIMEOUT_MS); if (ret <= 0) { irq_status = sde_core_irq_read(phys_enc->sde_kms, - INTR_IDX_VSYNC, true); + vid_enc->irq_idx[INTR_IDX_VSYNC], true); if (irq_status) { SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0); diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c index 2187d221a352e9a341af3cf796f05c8aa5c164d7..340cba536367e7bcdfad28e3ebd87d14c46d58ac 100644 --- a/drivers/gpu/drm/msm/sde/sde_formats.c +++ b/drivers/gpu/drm/msm/sde/sde_formats.c @@ -22,6 +22,11 @@ #define SDE_UBWC_META_BLOCK_SIZE 256 #define SDE_UBWC_PLANE_SIZE_ALIGNMENT 4096 +#define SDE_TILE_HEIGHT_DEFAULT 1 +#define SDE_TILE_HEIGHT_TILED 4 +#define SDE_TILE_HEIGHT_UBWC 4 +#define SDE_TILE_HEIGHT_NV12 8 + #define SDE_MAX_IMG_WIDTH 0x3FFF #define SDE_MAX_IMG_HEIGHT 0x3FFF @@ -48,9 +53,30 @@ bp, flg, fm, np) \ .bpp = bp, \ .fetch_mode = fm, \ .flag = {(flg)}, \ - .num_planes = np \ + .num_planes = np, \ + .tile_height = SDE_TILE_HEIGHT_DEFAULT \ } +#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \ +alpha, bp, flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = SDE_CHROMA_RGB, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = uc, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ +} + + #define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \ alpha, chroma, count, bp, flg, fm, np) \ { \ @@ -66,7 +92,8 @@ alpha, chroma, count, bp, flg, fm, np) \ .bpp = bp, \ .fetch_mode = fm, \ .flag = {(flg)}, \ - .num_planes = np \ + .num_planes = np, \ + .tile_height = SDE_TILE_HEIGHT_DEFAULT \ } #define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \ @@ -83,7 +110,27 @@ alpha, chroma, count, bp, flg, fm, np) \ .bpp = 2, \ .fetch_mode = fm, \ .flag = {(flg)}, \ - .num_planes = np \ + .num_planes = np, \ + .tile_height = SDE_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ } #define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\ @@ -100,9 +147,30 @@ alpha, chroma, count, bp, flg, fm, np) \ .bpp = 2, \ .fetch_mode = fm, \ .flag = {(flg)}, \ - .num_planes = np \ + .num_planes = np, \ + .tile_height = SDE_TILE_HEIGHT_DEFAULT \ } +#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 1, \ + .unpack_tight = 0, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ +} + + #define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \ flg, fm, np) \ { \ @@ -118,7 +186,8 @@ flg, fm, np) \ .bpp = bp, \ .fetch_mode = fm, \ .flag = {(flg)}, \ - .num_planes = np \ + .num_planes = np, \ + .tile_height = SDE_TILE_HEIGHT_DEFAULT \ } /* @@ -414,75 +483,99 @@ static const struct sde_format sde_format_map[] = { * These tables hold the A5x tile formats supported. */ static const struct sde_format sde_format_map_tile[] = { - INTERLEAVED_RGB_FMT(ARGB8888, + INTERLEAVED_RGB_FMT_TILED(BGR565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 2, 0, + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), + + INTERLEAVED_RGB_FMT_TILED(ARGB8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, true, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(ABGR8888, + INTERLEAVED_RGB_FMT_TILED(ABGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, true, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(RGBA8888, + INTERLEAVED_RGB_FMT_TILED(XBGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, 0, + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), + + INTERLEAVED_RGB_FMT_TILED(RGBA8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, true, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(BGRA8888, + INTERLEAVED_RGB_FMT_TILED(BGRA8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, true, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(BGRX8888, + INTERLEAVED_RGB_FMT_TILED(BGRX8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, false, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(XRGB8888, + INTERLEAVED_RGB_FMT_TILED(XRGB8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, false, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - INTERLEAVED_RGB_FMT(RGBX8888, + INTERLEAVED_RGB_FMT_TILED(RGBX8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, false, 4, 0, - SDE_FETCH_UBWC, 1), + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), - PSEUDO_YUV_FMT(NV12, + INTERLEAVED_RGB_FMT_TILED(ABGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, SDE_FORMAT_FLAG_DX, + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), + + INTERLEAVED_RGB_FMT_TILED(XBGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, SDE_FORMAT_FLAG_DX, + SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED), + + PSEUDO_YUV_FMT_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12), - PSEUDO_YUV_FMT(NV21, + PSEUDO_YUV_FMT_TILED(NV21, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C1_B_Cb, SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12), }; static const struct sde_format sde_format_map_p010_tile[] = { - PSEUDO_YUV_FMT_LOOSE(NV12, + PSEUDO_YUV_FMT_LOOSE_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX), - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12), }; static const struct sde_format sde_format_map_tp10_tile[] = { - PSEUDO_YUV_FMT(NV12, + PSEUDO_YUV_FMT_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX), - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12), }; /* @@ -492,42 +585,42 @@ static const struct sde_format sde_format_map_tp10_tile[] = { * the data will be passed by user-space. */ static const struct sde_format sde_format_map_ubwc[] = { - INTERLEAVED_RGB_FMT(BGR565, + INTERLEAVED_RGB_FMT_TILED(BGR565, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, false, 2, SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC), - INTERLEAVED_RGB_FMT(ABGR8888, + INTERLEAVED_RGB_FMT_TILED(ABGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, true, 4, SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC), - INTERLEAVED_RGB_FMT(XBGR8888, + INTERLEAVED_RGB_FMT_TILED(XBGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, false, 4, SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC), - INTERLEAVED_RGB_FMT(ABGR2101010, + INTERLEAVED_RGB_FMT_TILED(ABGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC), - INTERLEAVED_RGB_FMT(XBGR2101010, + INTERLEAVED_RGB_FMT_TILED(XBGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 2), + SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC), - PSEUDO_YUV_FMT(NV12, + PSEUDO_YUV_FMT_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_COMPRESSED, - SDE_FETCH_UBWC, 4), + SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12), }; static const struct sde_format sde_format_map_p010[] = { @@ -539,21 +632,21 @@ static const struct sde_format sde_format_map_p010[] = { }; static const struct sde_format sde_format_map_p010_ubwc[] = { - PSEUDO_YUV_FMT_LOOSE(NV12, + PSEUDO_YUV_FMT_LOOSE_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED), - SDE_FETCH_UBWC, 4), + SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12), }; static const struct sde_format sde_format_map_tp10_ubwc[] = { - PSEUDO_YUV_FMT(NV12, + PSEUDO_YUV_FMT_TILED(NV12, 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C1_B_Cb, C2_R_Cr, SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED), - SDE_FETCH_UBWC, 4), + SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12), }; /* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 9c572ce26166027ee65e53496f26bfaa800694b2..ed9a6ea37397d4aec5aedd005dc67bf5f05205f0 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -719,7 +719,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; sblk->format_list = plane_formats_yuv; sspp->id = SSPP_VIG0 + *vig_count; - snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id); + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count; sspp->type = SSPP_TYPE_VIG; set_bit(SDE_SSPP_QOS, &sspp->features); @@ -736,7 +737,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, VIG_QSEED_LEN, 0); snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_scaler%u", sspp->id); + "sspp_scaler%u", sspp->id - SSPP_VIG0); } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) { set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features); sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3; @@ -745,12 +746,12 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, VIG_QSEED_LEN, 0); snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_scaler%u", sspp->id); + "sspp_scaler%u", sspp->id - SSPP_VIG0); } sblk->csc_blk.id = SDE_SSPP_CSC; snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_csc%u", sspp->id); + "sspp_csc%u", sspp->id - SSPP_VIG0); if (sde_cfg->csc_type == SDE_SSPP_CSC) { set_bit(SDE_SSPP_CSC, &sspp->features); sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value, @@ -763,7 +764,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->hsic_blk.id = SDE_SSPP_HSIC; snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_hsic%u", sspp->id); + "sspp_hsic%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_HSIC_PROP]) { sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_HSIC_PROP, 0); @@ -775,7 +776,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR; snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_memcolor%u", sspp->id); + "sspp_memcolor%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_MEMCOLOR_PROP]) { sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_MEMCOLOR_PROP, 0); @@ -787,7 +788,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->pcc_blk.id = SDE_SSPP_PCC; snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_pcc%u", sspp->id); + "sspp_pcc%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_PCC_PROP]) { sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_PCC_PROP, 0); @@ -807,7 +808,8 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; sblk->format_list = plane_formats; sspp->id = SSPP_RGB0 + *rgb_count; - snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id); + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count; sspp->type = SSPP_TYPE_RGB; set_bit(SDE_SSPP_QOS, &sspp->features); @@ -824,7 +826,7 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, RGB_SCALER_LEN, 0); snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_scaler%u", sspp->id); + "sspp_scaler%u", sspp->id - SSPP_VIG0); } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) { set_bit(SDE_SSPP_SCALER_RGB, &sspp->features); sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3; @@ -833,7 +835,7 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SCALE_SIZE, 0); snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, - "sspp_scaler%u", sspp->id); + "sspp_scaler%u", sspp->id - SSPP_VIG0); } sblk->pcc_blk.id = SDE_SSPP_PCC; @@ -857,7 +859,8 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = SSPP_UNITY_SCALE; sblk->format_list = cursor_formats; sspp->id = SSPP_CURSOR0 + *cursor_count; - snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id); + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count; sspp->type = SSPP_TYPE_CURSOR; (*cursor_count)++; @@ -874,7 +877,8 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg, sspp->id = SSPP_DMA0 + *dma_count; sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count; sspp->type = SSPP_TYPE_DMA; - snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id); + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); set_bit(SDE_SSPP_QOS, &sspp->features); (*dma_count)++; snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1); @@ -978,8 +982,6 @@ static int sde_sspp_parse_dt(struct device_node *np, set_bit(SDE_SSPP_SRC, &sspp->features); sblk->src_blk.id = SDE_SSPP_SRC; - snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u", - sblk->src_blk.id); of_property_read_string_index(np, sspp_prop[SSPP_TYPE].prop_name, i, &type); @@ -1104,7 +1106,8 @@ static int sde_ctl_parse_dt(struct device_node *np, ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i); ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0); ctl->id = CTL_0 + i; - snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u", ctl->id); + snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u", + ctl->id - CTL_0); if (i < MAX_SPLIT_DISPLAY_CTL) set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features); @@ -1196,7 +1199,8 @@ static int sde_mixer_parse_dt(struct device_node *np, mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i); mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0); mixer->id = LM_0 + i; - snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u", mixer->id); + snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u", + mixer->id - LM_0); if (!prop_exists[MIXER_LEN]) mixer->len = DEFAULT_SDE_HW_BLOCK_LEN; @@ -1284,7 +1288,8 @@ static int sde_intf_parse_dt(struct device_node *np, intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i); intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0); intf->id = INTF_0 + i; - snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u", intf->id); + snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u", + intf->id - INTF_0); if (!prop_exists[INTF_LEN]) intf->len = DEFAULT_SDE_HW_BLOCK_LEN; @@ -1365,7 +1370,8 @@ static int sde_wb_parse_dt(struct device_node *np, wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i); wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i); - snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u", wb->id); + snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u", + wb->id - WB_0); wb->clk_ctrl = SDE_CLK_CTRL_WB0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i); wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i); @@ -1601,7 +1607,8 @@ static int sde_dspp_parse_dt(struct device_node *np, dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i); dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0); dspp->id = DSPP_0 + i; - snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u", dspp->id); + snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u", + dspp->id - DSPP_0); sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); if (!sblk) { @@ -1671,7 +1678,8 @@ static int sde_cdm_parse_dt(struct device_node *np, cdm = sde_cfg->cdm + i; cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i); cdm->id = CDM_0 + i; - snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", cdm->id); + snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", + cdm->id - CDM_0); cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0); /* intf3 and wb2 for cdm block */ @@ -1737,6 +1745,8 @@ static int sde_vbif_parse_dt(struct device_node *np, vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i); vbif->len = vbif_len; vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i); + snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u", + vbif->id - VBIF_0); SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0); @@ -1864,19 +1874,21 @@ static int sde_pp_parse_dt(struct device_node *np, pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i); pp->id = PINGPONG_0 + i; - snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u", pp->id); + snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u", + pp->id - PINGPONG_0); pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0); sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i); sblk->te.id = SDE_PINGPONG_TE; - snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u", pp->id); + snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u", + pp->id - PINGPONG_0); set_bit(SDE_PINGPONG_TE, &pp->features); sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i); if (sblk->te2.base) { sblk->te2.id = SDE_PINGPONG_TE2; snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u", - pp->id); + pp->id - PINGPONG_0); set_bit(SDE_PINGPONG_TE2, &pp->features); set_bit(SDE_PINGPONG_SPLIT, &pp->features); } @@ -1888,7 +1900,7 @@ static int sde_pp_parse_dt(struct device_node *np, if (sblk->dsc.base) { sblk->dsc.id = SDE_PINGPONG_DSC; snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u", - pp->id); + sblk->dsc.id - PINGPONG_0); set_bit(SDE_PINGPONG_DSC, &pp->features); } } @@ -2020,12 +2032,12 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg) cfg->mdss[0].base = MDSS_BASE_OFFSET; cfg->mdss[0].id = MDP_TOP; snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u", - cfg->mdss[0].id); + cfg->mdss[0].id - MDP_TOP); cfg->mdp_count = 1; cfg->mdp[0].id = MDP_TOP; snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u", - cfg->mdp[0].id); + cfg->mdp[0].id - MDP_TOP); cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0); cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0); if (!prop_exists[SDE_LEN]) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 9d9a188849388cf25e981ffdc68ce1b56b39cfe8..81e6bfe6defea5bc71ef0e37e1b70119dc870a49 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -44,7 +44,7 @@ #define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */ #define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */ #define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */ -#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */ +#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */ #define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c index 5f0a8340aabe3916448138ecef855acb1c087774..f386840bfaf5b8bf0d59821d6bc3bcf340678e21 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_cdm.h" +#include "sde_dbg.h" #define CDM_CSC_10_OPMODE 0x000 #define CDM_CSC_10_BASE 0x004 @@ -296,6 +297,9 @@ struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx, */ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index e9dd2d484bc0fc8791c66fca4350bc98ec786987..81a62f06d608eecbe80d7772e29c9403917b7111 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include <linux/delay.h> #include "sde_hwio.h" #include "sde_hw_ctl.h" +#include "sde_dbg.h" #define CTL_LAYER(lm) \ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) @@ -96,6 +97,12 @@ static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx) SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); } +static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + return SDE_REG_READ(c, CTL_FLUSH); +} static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx, enum sde_sspp sspp) @@ -458,6 +465,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, ops->update_pending_flush = sde_hw_ctl_update_pending_flush; ops->get_pending_flush = sde_hw_ctl_get_pending_flush; ops->trigger_flush = sde_hw_ctl_trigger_flush; + ops->get_flush_register = sde_hw_ctl_get_flush_register; ops->trigger_start = sde_hw_ctl_trigger_start; ops->setup_intf_cfg = sde_hw_ctl_intf_cfg; ops->reset = sde_hw_ctl_reset_control; @@ -496,6 +504,9 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, c->mixer_count = m->mixer_count; c->mixer_hw_caps = m->mixer; + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h index 2b54912460aea77f7b44d888d76d320f21952b6d..74dbde92639a7720246359655be56db580b4e34a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -93,6 +93,13 @@ struct sde_hw_ctl_ops { */ void (*trigger_flush)(struct sde_hw_ctl *ctx); + /** + * Read the value of the flush register + * @ctx : ctl path ctx pointer + * @Return: value of the ctl flush register. + */ + u32 (*get_flush_register)(struct sde_hw_ctl *ctx); + /** * Setup ctl_path interface config * @ctx diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c index bcf19a0036b599a7f08b9c7f1c645db1516cf06e..3250e5a75905d7fa163d15dfa37d3cd132be9d97 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sde_hw_catalog.h" #include "sde_hw_dspp.h" #include "sde_hw_color_processing.h" +#include "sde_dbg.h" static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp, struct sde_mdss_cfg *m, @@ -112,6 +113,9 @@ struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx, c->cap = cfg; _setup_dspp_ops(c, c->cap->features); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index bfa5f3e2a682b7067ac3470452107621105fb333..9d868297c057095925320bfe82f3295b2279e33a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_intf.h" +#include "sde_dbg.h" #define INTF_TIMING_ENGINE_EN 0x000 #define INTF_CONFIG 0x004 @@ -325,9 +326,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, c->mdss = m; _setup_intf_ops(&c->ops, c->cap->features); - /* - * Perform any default initialization for the intf - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index 0badd5fe1eae8a289521c11261b28335e19ee809..40f877dd081108ead851158c1992708f4f93ed25 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_lm.h" #include "sde_hw_mdss.h" +#include "sde_dbg.h" #define LM_OP_MODE 0x00 #define LM_OUT_SIZE 0x04 @@ -196,9 +197,9 @@ struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx, c->cap = cfg; _setup_mixer_ops(m, &c->ops, c->cap->features); - /* - * Perform any default initialization for the sspp blocks - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index 92dd829eee3e5611b79bd1b17542cc8e9172b627..05c876e3b685accc19c435435b8f9ae511155967 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,6 +18,8 @@ #include "msm_drv.h" +#define SDE_DBG_NAME "sde" + #define SDE_NONE 0 #ifndef SDE_CSC_MATRIX_COEFF_SIZE diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c index 7f5f2c3d0ae0b7e1777d54c5cc1158a5b5fe249c..67dccedd05a2b4a63d54053fe3e77e15fba3bb16 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_pingpong.h" +#include "sde_dbg.h" #define PP_TEAR_CHECK_EN 0x000 #define PP_SYNC_CONFIG_VSYNC 0x004 @@ -160,6 +161,9 @@ struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx, c->pingpong_hw_cap = cfg; _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index 7bc624eaf80ecf6ea2fd79ecce9bbef418f8b0e0..72235725d5a293d32ca2fd139e0f03e2f19a72a2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sde_hw_lm.h" #include "sde_hw_sspp.h" #include "sde_hw_color_processing.h" +#include "sde_dbg.h" #define SDE_FETCH_CONFIG_RESET_VALUE 0x00000087 @@ -594,10 +595,8 @@ static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx, || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk) return; - if (!scaler3_cfg->enable) { - SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, 0x0); - return; - } + if (!scaler3_cfg->enable) + goto end; op_mode |= BIT(0); op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16; @@ -607,9 +606,6 @@ static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx, op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24; } - if (!SDE_FORMAT_IS_DX(sspp->layout.format)) - op_mode |= BIT(14); - op_mode |= (scaler3_cfg->blend_cfg & 1) << 31; op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0; @@ -637,10 +633,6 @@ static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx, _sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg); if (ctx->cap->sblk->scaler_blk.version == 0x1002) { - if (sspp->layout.format->alpha_enable) { - op_mode |= BIT(10); - op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30; - } phase_init = ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) | ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) | @@ -648,10 +640,6 @@ static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx, ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24); SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init); } else { - if (sspp->layout.format->alpha_enable) { - op_mode |= BIT(10); - op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29; - } SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx, scaler3_cfg->init_phase_x[0] & 0x1FFFFF); SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx, @@ -682,6 +670,17 @@ static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx, SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst); +end: + if (!SDE_FORMAT_IS_DX(sspp->layout.format)) + op_mode |= BIT(14); + + if (sspp->layout.format->alpha_enable) { + op_mode |= BIT(10); + if (ctx->cap->sblk->scaler_blk.version == 0x1002) + op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30; + else + op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29; + } SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode); } @@ -937,6 +936,19 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, _setup_layer_ops(hw_pipe, hw_pipe->cap->features); hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit; + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, + hw_pipe->hw.blk_off, + hw_pipe->hw.blk_off + hw_pipe->hw.length, + hw_pipe->hw.xin_id); + + if (cfg->sblk->scaler_blk.len) + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, + cfg->sblk->scaler_blk.name, + hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base, + hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base + + cfg->sblk->scaler_blk.len, + hw_pipe->hw.xin_id); + return hw_pipe; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c index c9759496c4d598e3561eec86e2e36de9789a6feb..0cc4cc6752cf37225bbffe1e399c372fe53d40a7 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_top.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_top.h" +#include "sde_dbg.h" #define SSPP_SPARE 0x28 @@ -259,9 +260,10 @@ struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx, mdp->cap = cfg; _setup_mdp_ops(&mdp->ops, mdp->cap->features); - /* - * Perform any default initialization for the intf - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, + mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length, + mdp->hw.xin_id); + sde_dbg_set_sde_top_offset(mdp->hw.blk_off); return mdp; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c index c548b21d7d66f47023507bb9f4b0c0163a724fbc..55e78c31471f8744948d6840d9d3131c47d111dc 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_vbif.h" +#include "sde_dbg.h" #define VBIF_VERSION 0x0000 #define VBIF_CLK_FORCE_CTRL0 0x0008 @@ -157,6 +158,8 @@ struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx, c->cap = cfg; _setup_vbif_ops(&c->ops, c->cap->features); + /* no need to register sub-range in sde dbg, dump entire vbif io base */ + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c index afca7152db0899ebe2b7e55c25308dea8477078a..9a44f7215a5f78b5d2c8625f68bc3c7d5a8fb5b9 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sde_hw_catalog.h" #include "sde_hw_wb.h" #include "sde_formats.h" +#include "sde_dbg.h" #define WB_DST_FORMAT 0x000 #define WB_DST_OP_MODE 0x004 @@ -216,6 +217,9 @@ struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx, c->highest_bank_bit = m->mdp[0].highest_bank_bit; c->hw_mdp = hw_mdp; + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c index eeb7a0002eab6cd14449b5b6dab49125172eb7e1..7864b9fef87b35927cddf8c739bf04121c51f7d9 100644 --- a/drivers/gpu/drm/msm/sde/sde_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_irq.c @@ -19,6 +19,8 @@ #include "sde_irq.h" #include "sde_core_irq.h" +static uint32_t g_sde_irq_status; + irqreturn_t sde_irq(struct msm_kms *kms) { struct sde_kms *sde_kms = to_sde_kms(kms); @@ -27,6 +29,9 @@ irqreturn_t sde_irq(struct msm_kms *kms) sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr, &interrupts); + /* store irq status in case of irq-storm debugging */ + g_sde_irq_status = interrupts; + /* * Taking care of MDP interrupt */ @@ -40,13 +45,30 @@ irqreturn_t sde_irq(struct msm_kms *kms) */ while (interrupts) { irq_hw_number_t hwirq = fls(interrupts) - 1; + unsigned int mapping; + int rc; + + mapping = irq_find_mapping(sde_kms->irq_controller.domain, + hwirq); + if (mapping == 0) { + SDE_EVT32(hwirq, SDE_EVTLOG_ERROR); + goto error; + } + + rc = generic_handle_irq(mapping); + if (rc < 0) { + SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR); + goto error; + } - generic_handle_irq(irq_find_mapping( - sde_kms->irq_controller.domain, hwirq)); interrupts &= ~(1 << hwirq); } return IRQ_HANDLED; + +error: + /* bad situation, inform irq system, it may disable overall MDSS irq */ + return IRQ_NONE; } void sde_irq_preinstall(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index cdf67c0aa8644a0619e806debbecda4c9f5cac2c..b2227a191673f9f870e259ba8f7b14343dc4dfcd 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -1167,6 +1167,44 @@ fail: return ret; } +static void __iomem *_sde_kms_ioremap(struct platform_device *pdev, + const char *name, unsigned long *out_size) +{ + struct resource *res; + unsigned long size; + void __iomem *ptr; + + if (out_size) + *out_size = 0; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + /* availability depends on platform */ + SDE_DEBUG("failed to get memory resource: %s\n", name); + return NULL; + } + + size = resource_size(res); + + ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); + if (!ptr) { + SDE_ERROR("failed to ioremap: %s\n", name); + return NULL; + } + + SDE_DEBUG("IO:region %s %pK %08lx\n", name, ptr, size); + + if (out_size) + *out_size = size; + + return ptr; +} + + static int sde_kms_hw_init(struct msm_kms *kms) { struct sde_kms *sde_kms; @@ -1193,29 +1231,42 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto end; } - sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "SDE"); - if (IS_ERR(sde_kms->mmio)) { - rc = PTR_ERR(sde_kms->mmio); - SDE_ERROR("mdp register memory map failed: %d\n", rc); - sde_kms->mmio = NULL; + sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys", + &sde_kms->mmio_len); + if (!sde_kms->mmio) { + SDE_ERROR("mdp register memory map failed\n"); goto error; } DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio); - sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, - "vbif_phys", "VBIF"); - if (IS_ERR(sde_kms->vbif[VBIF_RT])) { - rc = PTR_ERR(sde_kms->vbif[VBIF_RT]); - SDE_ERROR("vbif register memory map failed: %d\n", rc); - sde_kms->vbif[VBIF_RT] = NULL; + rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio, + sde_kms->mmio_len); + if (rc) + SDE_ERROR("dbg base register kms failed: %d\n", rc); + + sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys", + &sde_kms->vbif_len[VBIF_RT]); + if (!sde_kms->vbif[VBIF_RT]) { + SDE_ERROR("vbif register memory map failed\n"); goto error; } - sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, - "vbif_nrt_phys", "VBIF_NRT"); - if (IS_ERR(sde_kms->vbif[VBIF_NRT])) { - sde_kms->vbif[VBIF_NRT] = NULL; + rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT], + sde_kms->vbif_len[VBIF_RT]); + if (rc) + SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc); + + sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev, + "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]); + if (!sde_kms->vbif[VBIF_NRT]) { SDE_DEBUG("VBIF NRT is not defined"); + } else { + rc = sde_dbg_reg_register_base("vbif_nrt", + sde_kms->vbif[VBIF_NRT], + sde_kms->vbif_len[VBIF_NRT]); + if (rc) + SDE_ERROR("dbg base register vbif_nrt failed: %d\n", + rc); } sde_kms->core_client = sde_power_client_create(&priv->phandle, "core"); @@ -1245,6 +1296,8 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } + sde_dbg_init_dbg_buses(sde_kms->core_rev); + rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio, sde_kms->dev); if (rc) { diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index fa0288b89b91c4ca15c4d258d947f4c330a5b349..961875d409b21a02abe3b911e5fe630dbab0b3fe 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -134,6 +134,7 @@ struct sde_kms { /* io/register spaces: */ void __iomem *mmio, *vbif[VBIF_MAX]; + unsigned long mmio_len, vbif_len[VBIF_MAX]; struct regulator *vdd; struct regulator *mmagic; @@ -369,6 +370,49 @@ void sde_kms_info_append_format(struct sde_kms_info *info, */ void sde_kms_info_stop(struct sde_kms_info *info); +/** + * sde_kms_rect_intersect - intersect two rectangles + * @r1: first rectangle + * @r2: scissor rectangle + * @result: result rectangle, all 0's on no intersection found + */ +void sde_kms_rect_intersect(const struct sde_rect *r1, + const struct sde_rect *r2, + struct sde_rect *result); + +/** + * sde_kms_rect_is_equal - compares two rects + * @r1: rect value to compare + * @r2: rect value to compare + * + * Returns 1 if the rects are same, 0 otherwise. + */ +static inline bool sde_kms_rect_is_equal(struct sde_rect *r1, + struct sde_rect *r2) +{ + if ((!r1 && r2) || (r1 && !r2)) + return false; + + if (!r1 && !r2) + return true; + + return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w && + r1->h == r2->h; +} + +/** + * sde_kms_rect_is_null - returns true if the width or height of a rect is 0 + * @rect: rectangle to check for zero size + * @Return: True if width or height of rectangle is 0 + */ +static inline bool sde_kms_rect_is_null(const struct sde_rect *r) +{ + if (!r) + return true; + + return (!r->w || !r->h); +} + /** * Vblank enable/disable functions */ diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c index 6e29c09deb403b134cd1221ad6c33bcd77550e67..30e12c96953818a7b03d38b6f41d3eff2f019241 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c +++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -151,3 +151,27 @@ void sde_kms_info_stop(struct sde_kms_info *info) info->len = info->staged_len + len; } } + +void sde_kms_rect_intersect(const struct sde_rect *r1, + const struct sde_rect *r2, + struct sde_rect *result) +{ + int l, t, r, b; + + if (!r1 || !r2 || !result) + return; + + l = max(r1->x, r2->x); + t = max(r1->y, r2->y); + r = min((r1->x + r1->w), (r2->x + r2->w)); + b = min((r1->y + r1->h), (r2->y + r2->h)); + + if (r < l || b < t) { + memset(result, 0, sizeof(*result)); + } else { + result->x = l; + result->y = t; + result->w = r - l; + result->h = b - t; + } +} diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h index 2a4e6b59a08c3c58ef73aeb60b830515cacff3f9..d28562eabccbb2cfcfdeaf42b699871c27e34c04 100644 --- a/drivers/gpu/drm/msm/sde/sde_trace.h +++ b/drivers/gpu/drm/msm/sde/sde_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -125,6 +125,22 @@ TRACE_EVENT(sde_cmd_release_bw, TP_printk("crtc:%d", __entry->crtc_id) ); +TRACE_EVENT(sde_encoder_underrun, + TP_PROTO(u32 enc_id, u32 underrun_cnt), + TP_ARGS(enc_id, underrun_cnt), + TP_STRUCT__entry( + __field(u32, enc_id) + __field(u32, underrun_cnt) + ), + TP_fast_assign( + __entry->enc_id = enc_id; + __entry->underrun_cnt = underrun_cnt; + + ), + TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id, + __entry->underrun_cnt) +); + TRACE_EVENT(sde_mark_write, TP_PROTO(int pid, const char *name, bool trace_begin), TP_ARGS(pid, name, trace_begin), diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c new file mode 100644 index 0000000000000000000000000000000000000000..a2cfaf176e0b9cfe3ef9cf9263e1da30e4d88a07 --- /dev/null +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -0,0 +1,2283 @@ +/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/ktime.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/dma-buf.h> +#include <linux/slab.h> +#include <linux/list_sort.h> + +#include "sde_dbg.h" +#include "sde/sde_hw_catalog.h" + +#define SDE_DBG_BASE_MAX 10 + +#define DEFAULT_PANIC 1 +#define DEFAULT_REGDUMP SDE_DBG_DUMP_IN_MEM +#define DEFAULT_DBGBUS_SDE SDE_DBG_DUMP_IN_MEM +#define DEFAULT_DBGBUS_VBIFRT SDE_DBG_DUMP_IN_MEM +#define DEFAULT_BASE_REG_CNT 0x100 +#define GROUP_BYTES 4 +#define ROW_BYTES 16 +#define RANGE_NAME_LEN 40 +#define REG_BASE_NAME_LEN 80 + +#define DBGBUS_FLAGS_DSPP BIT(0) +#define DBGBUS_DSPP_STATUS 0x34C + +#define DBGBUS_NAME_SDE "sde" +#define DBGBUS_NAME_VBIF_RT "vbif_rt" + +/* offsets from sde top address for the debug buses */ +#define DBGBUS_SSPP0 0x188 +#define DBGBUS_SSPP1 0x298 +#define DBGBUS_DSPP 0x348 +#define DBGBUS_PERIPH 0x418 + +#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0)) + +/* following offsets are with respect to MDP VBIF base for DBG BUS access */ +#define MMSS_VBIF_CLKON 0x4 +#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210 +#define MMSS_VBIF_TEST_BUS_OUT 0x230 + +/* print debug ranges in groups of 4 u32s */ +#define REG_DUMP_ALIGN 16 +#define DBG_CTRL_STOP_FTRACE BIT(0) +#define DBG_CTRL_PANIC_UNDERRUN BIT(1) +#define DBG_CTRL_MAX BIT(2) + +/** + * struct sde_dbg_reg_offset - tracking for start and end of region + * @start: start offset + * @start: end offset + */ +struct sde_dbg_reg_offset { + u32 start; + u32 end; +}; + +/** + * struct sde_dbg_reg_range - register dumping named sub-range + * @head: head of this node + * @reg_dump: address for the mem dump + * @range_name: name of this range + * @offset: offsets for range to dump + * @xin_id: client xin id + */ +struct sde_dbg_reg_range { + struct list_head head; + u32 *reg_dump; + char range_name[RANGE_NAME_LEN]; + struct sde_dbg_reg_offset offset; + uint32_t xin_id; +}; + +/** + * struct sde_dbg_reg_base - register region base. + * may sub-ranges: sub-ranges are used for dumping + * or may not have sub-ranges: dumping is base -> max_offset + * @reg_base_head: head of this node + * @sub_range_list: head to the list with dump ranges + * @name: register base name + * @base: base pointer + * @off: cached offset of region for manual register dumping + * @cnt: cached range of region for manual register dumping + * @max_offset: length of region + * @buf: buffer used for manual register dumping + * @buf_len: buffer length used for manual register dumping + * @reg_dump: address for the mem dump if no ranges used + */ +struct sde_dbg_reg_base { + struct list_head reg_base_head; + struct list_head sub_range_list; + char name[REG_BASE_NAME_LEN]; + void __iomem *base; + size_t off; + size_t cnt; + size_t max_offset; + char *buf; + size_t buf_len; + u32 *reg_dump; +}; + +struct sde_debug_bus_entry { + u32 wr_addr; + u32 block_id; + u32 test_id; +}; + +struct vbif_debug_bus_entry { + u32 disable_bus_addr; + u32 block_bus_addr; + u32 bit_offset; + u32 block_cnt; + u32 test_pnt_start; + u32 test_pnt_cnt; +}; + +struct sde_dbg_debug_bus_common { + char *name; + u32 enable_mask; + bool include_in_deferred_work; + u32 flags; + u32 entries_size; + u32 *dumped_content; +}; + +struct sde_dbg_sde_debug_bus { + struct sde_dbg_debug_bus_common cmn; + struct sde_debug_bus_entry *entries; + u32 top_blk_off; +}; + +struct sde_dbg_vbif_debug_bus { + struct sde_dbg_debug_bus_common cmn; + struct vbif_debug_bus_entry *entries; +}; + +/** + * struct sde_dbg_base - global sde debug base structure + * @evtlog: event log instance + * @reg_base_list: list of register dumping regions + * @root: base debugfs root + * @dev: device pointer + * @mutex: mutex to serialize access to serialze dumps, debugfs access + * @power_ctrl: callback structure for enabling power for reading hw registers + * @req_dump_blks: list of blocks requested for dumping + * @panic_on_err: whether to kernel panic after triggering dump via debugfs + * @dump_work: work struct for deferring register dump work to separate thread + * @work_panic: panic after dump if internal user passed "panic" special region + * @enable_reg_dump: whether to dump registers into memory, kernel log, or both + * @dbgbus_sde: debug bus structure for the sde + * @dbgbus_vbif_rt: debug bus structure for the realtime vbif + * @dump_all: dump all entries in register dump + */ +static struct sde_dbg_base { + struct sde_dbg_evtlog *evtlog; + struct list_head reg_base_list; + struct dentry *root; + struct device *dev; + struct mutex mutex; + struct sde_dbg_power_ctrl power_ctrl; + + struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX]; + + u32 panic_on_err; + struct work_struct dump_work; + bool work_panic; + u32 enable_reg_dump; + + struct sde_dbg_sde_debug_bus dbgbus_sde; + struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt; + bool dump_all; + u32 debugfs_ctrl; +} sde_dbg_base; + +/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */ +struct sde_dbg_evtlog *sde_dbg_base_evtlog; + +static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { + + /* Unpack 0 sspp 0*/ + { DBGBUS_SSPP0, 50, 2 }, + { DBGBUS_SSPP0, 60, 2 }, + { DBGBUS_SSPP0, 70, 2 }, + { DBGBUS_SSPP0, 85, 2 }, + + /* Upack 0 sspp 1*/ + { DBGBUS_SSPP1, 50, 2 }, + { DBGBUS_SSPP1, 60, 2 }, + { DBGBUS_SSPP1, 70, 2 }, + { DBGBUS_SSPP1, 85, 2 }, + + /* scheduler */ + { DBGBUS_DSPP, 130, 0 }, + { DBGBUS_DSPP, 130, 1 }, + { DBGBUS_DSPP, 130, 2 }, + { DBGBUS_DSPP, 130, 3 }, + { DBGBUS_DSPP, 130, 4 }, + { DBGBUS_DSPP, 130, 5 }, + + /* qseed */ + { DBGBUS_SSPP0, 6, 0}, + { DBGBUS_SSPP0, 6, 1}, + { DBGBUS_SSPP0, 26, 0}, + { DBGBUS_SSPP0, 26, 1}, + { DBGBUS_SSPP1, 6, 0}, + { DBGBUS_SSPP1, 6, 1}, + { DBGBUS_SSPP1, 26, 0}, + { DBGBUS_SSPP1, 26, 1}, + + /* scale */ + { DBGBUS_SSPP0, 16, 0}, + { DBGBUS_SSPP0, 16, 1}, + { DBGBUS_SSPP0, 36, 0}, + { DBGBUS_SSPP0, 36, 1}, + { DBGBUS_SSPP1, 16, 0}, + { DBGBUS_SSPP1, 16, 1}, + { DBGBUS_SSPP1, 36, 0}, + { DBGBUS_SSPP1, 36, 1}, + + /* fetch sspp0 */ + + /* vig 0 */ + { DBGBUS_SSPP0, 0, 0 }, + { DBGBUS_SSPP0, 0, 1 }, + { DBGBUS_SSPP0, 0, 2 }, + { DBGBUS_SSPP0, 0, 3 }, + { DBGBUS_SSPP0, 0, 4 }, + { DBGBUS_SSPP0, 0, 5 }, + { DBGBUS_SSPP0, 0, 6 }, + { DBGBUS_SSPP0, 0, 7 }, + + { DBGBUS_SSPP0, 1, 0 }, + { DBGBUS_SSPP0, 1, 1 }, + { DBGBUS_SSPP0, 1, 2 }, + { DBGBUS_SSPP0, 1, 3 }, + { DBGBUS_SSPP0, 1, 4 }, + { DBGBUS_SSPP0, 1, 5 }, + { DBGBUS_SSPP0, 1, 6 }, + { DBGBUS_SSPP0, 1, 7 }, + + { DBGBUS_SSPP0, 2, 0 }, + { DBGBUS_SSPP0, 2, 1 }, + { DBGBUS_SSPP0, 2, 2 }, + { DBGBUS_SSPP0, 2, 3 }, + { DBGBUS_SSPP0, 2, 4 }, + { DBGBUS_SSPP0, 2, 5 }, + { DBGBUS_SSPP0, 2, 6 }, + { DBGBUS_SSPP0, 2, 7 }, + + { DBGBUS_SSPP0, 4, 0 }, + { DBGBUS_SSPP0, 4, 1 }, + { DBGBUS_SSPP0, 4, 2 }, + { DBGBUS_SSPP0, 4, 3 }, + { DBGBUS_SSPP0, 4, 4 }, + { DBGBUS_SSPP0, 4, 5 }, + { DBGBUS_SSPP0, 4, 6 }, + { DBGBUS_SSPP0, 4, 7 }, + + { DBGBUS_SSPP0, 5, 0 }, + { DBGBUS_SSPP0, 5, 1 }, + { DBGBUS_SSPP0, 5, 2 }, + { DBGBUS_SSPP0, 5, 3 }, + { DBGBUS_SSPP0, 5, 4 }, + { DBGBUS_SSPP0, 5, 5 }, + { DBGBUS_SSPP0, 5, 6 }, + { DBGBUS_SSPP0, 5, 7 }, + + /* vig 2 */ + { DBGBUS_SSPP0, 20, 0 }, + { DBGBUS_SSPP0, 20, 1 }, + { DBGBUS_SSPP0, 20, 2 }, + { DBGBUS_SSPP0, 20, 3 }, + { DBGBUS_SSPP0, 20, 4 }, + { DBGBUS_SSPP0, 20, 5 }, + { DBGBUS_SSPP0, 20, 6 }, + { DBGBUS_SSPP0, 20, 7 }, + + { DBGBUS_SSPP0, 21, 0 }, + { DBGBUS_SSPP0, 21, 1 }, + { DBGBUS_SSPP0, 21, 2 }, + { DBGBUS_SSPP0, 21, 3 }, + { DBGBUS_SSPP0, 21, 4 }, + { DBGBUS_SSPP0, 21, 5 }, + { DBGBUS_SSPP0, 21, 6 }, + { DBGBUS_SSPP0, 21, 7 }, + + { DBGBUS_SSPP0, 22, 0 }, + { DBGBUS_SSPP0, 22, 1 }, + { DBGBUS_SSPP0, 22, 2 }, + { DBGBUS_SSPP0, 22, 3 }, + { DBGBUS_SSPP0, 22, 4 }, + { DBGBUS_SSPP0, 22, 5 }, + { DBGBUS_SSPP0, 22, 6 }, + { DBGBUS_SSPP0, 22, 7 }, + + { DBGBUS_SSPP0, 24, 0 }, + { DBGBUS_SSPP0, 24, 1 }, + { DBGBUS_SSPP0, 24, 2 }, + { DBGBUS_SSPP0, 24, 3 }, + { DBGBUS_SSPP0, 24, 4 }, + { DBGBUS_SSPP0, 24, 5 }, + { DBGBUS_SSPP0, 24, 6 }, + { DBGBUS_SSPP0, 24, 7 }, + + { DBGBUS_SSPP0, 25, 0 }, + { DBGBUS_SSPP0, 25, 1 }, + { DBGBUS_SSPP0, 25, 2 }, + { DBGBUS_SSPP0, 25, 3 }, + { DBGBUS_SSPP0, 25, 4 }, + { DBGBUS_SSPP0, 25, 5 }, + { DBGBUS_SSPP0, 25, 6 }, + { DBGBUS_SSPP0, 25, 7 }, + + /* dma 2 */ + { DBGBUS_SSPP0, 30, 0 }, + { DBGBUS_SSPP0, 30, 1 }, + { DBGBUS_SSPP0, 30, 2 }, + { DBGBUS_SSPP0, 30, 3 }, + { DBGBUS_SSPP0, 30, 4 }, + { DBGBUS_SSPP0, 30, 5 }, + { DBGBUS_SSPP0, 30, 6 }, + { DBGBUS_SSPP0, 30, 7 }, + + { DBGBUS_SSPP0, 31, 0 }, + { DBGBUS_SSPP0, 31, 1 }, + { DBGBUS_SSPP0, 31, 2 }, + { DBGBUS_SSPP0, 31, 3 }, + { DBGBUS_SSPP0, 31, 4 }, + { DBGBUS_SSPP0, 31, 5 }, + { DBGBUS_SSPP0, 31, 6 }, + { DBGBUS_SSPP0, 31, 7 }, + + { DBGBUS_SSPP0, 32, 0 }, + { DBGBUS_SSPP0, 32, 1 }, + { DBGBUS_SSPP0, 32, 2 }, + { DBGBUS_SSPP0, 32, 3 }, + { DBGBUS_SSPP0, 32, 4 }, + { DBGBUS_SSPP0, 32, 5 }, + { DBGBUS_SSPP0, 32, 6 }, + { DBGBUS_SSPP0, 32, 7 }, + + { DBGBUS_SSPP0, 33, 0 }, + { DBGBUS_SSPP0, 33, 1 }, + { DBGBUS_SSPP0, 33, 2 }, + { DBGBUS_SSPP0, 33, 3 }, + { DBGBUS_SSPP0, 33, 4 }, + { DBGBUS_SSPP0, 33, 5 }, + { DBGBUS_SSPP0, 33, 6 }, + { DBGBUS_SSPP0, 33, 7 }, + + { DBGBUS_SSPP0, 34, 0 }, + { DBGBUS_SSPP0, 34, 1 }, + { DBGBUS_SSPP0, 34, 2 }, + { DBGBUS_SSPP0, 34, 3 }, + { DBGBUS_SSPP0, 34, 4 }, + { DBGBUS_SSPP0, 34, 5 }, + { DBGBUS_SSPP0, 34, 6 }, + { DBGBUS_SSPP0, 34, 7 }, + + { DBGBUS_SSPP0, 35, 0 }, + { DBGBUS_SSPP0, 35, 1 }, + { DBGBUS_SSPP0, 35, 2 }, + { DBGBUS_SSPP0, 35, 3 }, + + /* dma 0 */ + { DBGBUS_SSPP0, 40, 0 }, + { DBGBUS_SSPP0, 40, 1 }, + { DBGBUS_SSPP0, 40, 2 }, + { DBGBUS_SSPP0, 40, 3 }, + { DBGBUS_SSPP0, 40, 4 }, + { DBGBUS_SSPP0, 40, 5 }, + { DBGBUS_SSPP0, 40, 6 }, + { DBGBUS_SSPP0, 40, 7 }, + + { DBGBUS_SSPP0, 41, 0 }, + { DBGBUS_SSPP0, 41, 1 }, + { DBGBUS_SSPP0, 41, 2 }, + { DBGBUS_SSPP0, 41, 3 }, + { DBGBUS_SSPP0, 41, 4 }, + { DBGBUS_SSPP0, 41, 5 }, + { DBGBUS_SSPP0, 41, 6 }, + { DBGBUS_SSPP0, 41, 7 }, + + { DBGBUS_SSPP0, 42, 0 }, + { DBGBUS_SSPP0, 42, 1 }, + { DBGBUS_SSPP0, 42, 2 }, + { DBGBUS_SSPP0, 42, 3 }, + { DBGBUS_SSPP0, 42, 4 }, + { DBGBUS_SSPP0, 42, 5 }, + { DBGBUS_SSPP0, 42, 6 }, + { DBGBUS_SSPP0, 42, 7 }, + + { DBGBUS_SSPP0, 44, 0 }, + { DBGBUS_SSPP0, 44, 1 }, + { DBGBUS_SSPP0, 44, 2 }, + { DBGBUS_SSPP0, 44, 3 }, + { DBGBUS_SSPP0, 44, 4 }, + { DBGBUS_SSPP0, 44, 5 }, + { DBGBUS_SSPP0, 44, 6 }, + { DBGBUS_SSPP0, 44, 7 }, + + { DBGBUS_SSPP0, 45, 0 }, + { DBGBUS_SSPP0, 45, 1 }, + { DBGBUS_SSPP0, 45, 2 }, + { DBGBUS_SSPP0, 45, 3 }, + { DBGBUS_SSPP0, 45, 4 }, + { DBGBUS_SSPP0, 45, 5 }, + { DBGBUS_SSPP0, 45, 6 }, + { DBGBUS_SSPP0, 45, 7 }, + + /* fetch sspp1 */ + /* vig 1 */ + { DBGBUS_SSPP1, 0, 0 }, + { DBGBUS_SSPP1, 0, 1 }, + { DBGBUS_SSPP1, 0, 2 }, + { DBGBUS_SSPP1, 0, 3 }, + { DBGBUS_SSPP1, 0, 4 }, + { DBGBUS_SSPP1, 0, 5 }, + { DBGBUS_SSPP1, 0, 6 }, + { DBGBUS_SSPP1, 0, 7 }, + + { DBGBUS_SSPP1, 1, 0 }, + { DBGBUS_SSPP1, 1, 1 }, + { DBGBUS_SSPP1, 1, 2 }, + { DBGBUS_SSPP1, 1, 3 }, + { DBGBUS_SSPP1, 1, 4 }, + { DBGBUS_SSPP1, 1, 5 }, + { DBGBUS_SSPP1, 1, 6 }, + { DBGBUS_SSPP1, 1, 7 }, + + { DBGBUS_SSPP1, 2, 0 }, + { DBGBUS_SSPP1, 2, 1 }, + { DBGBUS_SSPP1, 2, 2 }, + { DBGBUS_SSPP1, 2, 3 }, + { DBGBUS_SSPP1, 2, 4 }, + { DBGBUS_SSPP1, 2, 5 }, + { DBGBUS_SSPP1, 2, 6 }, + { DBGBUS_SSPP1, 2, 7 }, + + { DBGBUS_SSPP1, 4, 0 }, + { DBGBUS_SSPP1, 4, 1 }, + { DBGBUS_SSPP1, 4, 2 }, + { DBGBUS_SSPP1, 4, 3 }, + { DBGBUS_SSPP1, 4, 4 }, + { DBGBUS_SSPP1, 4, 5 }, + { DBGBUS_SSPP1, 4, 6 }, + { DBGBUS_SSPP1, 4, 7 }, + + { DBGBUS_SSPP1, 5, 0 }, + { DBGBUS_SSPP1, 5, 1 }, + { DBGBUS_SSPP1, 5, 2 }, + { DBGBUS_SSPP1, 5, 3 }, + { DBGBUS_SSPP1, 5, 4 }, + { DBGBUS_SSPP1, 5, 5 }, + { DBGBUS_SSPP1, 5, 6 }, + { DBGBUS_SSPP1, 5, 7 }, + + /* vig 3 */ + { DBGBUS_SSPP1, 20, 0 }, + { DBGBUS_SSPP1, 20, 1 }, + { DBGBUS_SSPP1, 20, 2 }, + { DBGBUS_SSPP1, 20, 3 }, + { DBGBUS_SSPP1, 20, 4 }, + { DBGBUS_SSPP1, 20, 5 }, + { DBGBUS_SSPP1, 20, 6 }, + { DBGBUS_SSPP1, 20, 7 }, + + { DBGBUS_SSPP1, 21, 0 }, + { DBGBUS_SSPP1, 21, 1 }, + { DBGBUS_SSPP1, 21, 2 }, + { DBGBUS_SSPP1, 21, 3 }, + { DBGBUS_SSPP1, 21, 4 }, + { DBGBUS_SSPP1, 21, 5 }, + { DBGBUS_SSPP1, 21, 6 }, + { DBGBUS_SSPP1, 21, 7 }, + + { DBGBUS_SSPP1, 22, 0 }, + { DBGBUS_SSPP1, 22, 1 }, + { DBGBUS_SSPP1, 22, 2 }, + { DBGBUS_SSPP1, 22, 3 }, + { DBGBUS_SSPP1, 22, 4 }, + { DBGBUS_SSPP1, 22, 5 }, + { DBGBUS_SSPP1, 22, 6 }, + { DBGBUS_SSPP1, 22, 7 }, + + { DBGBUS_SSPP1, 24, 0 }, + { DBGBUS_SSPP1, 24, 1 }, + { DBGBUS_SSPP1, 24, 2 }, + { DBGBUS_SSPP1, 24, 3 }, + { DBGBUS_SSPP1, 24, 4 }, + { DBGBUS_SSPP1, 24, 5 }, + { DBGBUS_SSPP1, 24, 6 }, + { DBGBUS_SSPP1, 24, 7 }, + + { DBGBUS_SSPP1, 25, 0 }, + { DBGBUS_SSPP1, 25, 1 }, + { DBGBUS_SSPP1, 25, 2 }, + { DBGBUS_SSPP1, 25, 3 }, + { DBGBUS_SSPP1, 25, 4 }, + { DBGBUS_SSPP1, 25, 5 }, + { DBGBUS_SSPP1, 25, 6 }, + { DBGBUS_SSPP1, 25, 7 }, + + /* dma 3 */ + { DBGBUS_SSPP1, 30, 0 }, + { DBGBUS_SSPP1, 30, 1 }, + { DBGBUS_SSPP1, 30, 2 }, + { DBGBUS_SSPP1, 30, 3 }, + { DBGBUS_SSPP1, 30, 4 }, + { DBGBUS_SSPP1, 30, 5 }, + { DBGBUS_SSPP1, 30, 6 }, + { DBGBUS_SSPP1, 30, 7 }, + + { DBGBUS_SSPP1, 31, 0 }, + { DBGBUS_SSPP1, 31, 1 }, + { DBGBUS_SSPP1, 31, 2 }, + { DBGBUS_SSPP1, 31, 3 }, + { DBGBUS_SSPP1, 31, 4 }, + { DBGBUS_SSPP1, 31, 5 }, + { DBGBUS_SSPP1, 31, 6 }, + { DBGBUS_SSPP1, 31, 7 }, + + { DBGBUS_SSPP1, 32, 0 }, + { DBGBUS_SSPP1, 32, 1 }, + { DBGBUS_SSPP1, 32, 2 }, + { DBGBUS_SSPP1, 32, 3 }, + { DBGBUS_SSPP1, 32, 4 }, + { DBGBUS_SSPP1, 32, 5 }, + { DBGBUS_SSPP1, 32, 6 }, + { DBGBUS_SSPP1, 32, 7 }, + + { DBGBUS_SSPP1, 33, 0 }, + { DBGBUS_SSPP1, 33, 1 }, + { DBGBUS_SSPP1, 33, 2 }, + { DBGBUS_SSPP1, 33, 3 }, + { DBGBUS_SSPP1, 33, 4 }, + { DBGBUS_SSPP1, 33, 5 }, + { DBGBUS_SSPP1, 33, 6 }, + { DBGBUS_SSPP1, 33, 7 }, + + { DBGBUS_SSPP1, 34, 0 }, + { DBGBUS_SSPP1, 34, 1 }, + { DBGBUS_SSPP1, 34, 2 }, + { DBGBUS_SSPP1, 34, 3 }, + { DBGBUS_SSPP1, 34, 4 }, + { DBGBUS_SSPP1, 34, 5 }, + { DBGBUS_SSPP1, 34, 6 }, + { DBGBUS_SSPP1, 34, 7 }, + + { DBGBUS_SSPP1, 35, 0 }, + { DBGBUS_SSPP1, 35, 1 }, + { DBGBUS_SSPP1, 35, 2 }, + + /* dma 1 */ + { DBGBUS_SSPP1, 40, 0 }, + { DBGBUS_SSPP1, 40, 1 }, + { DBGBUS_SSPP1, 40, 2 }, + { DBGBUS_SSPP1, 40, 3 }, + { DBGBUS_SSPP1, 40, 4 }, + { DBGBUS_SSPP1, 40, 5 }, + { DBGBUS_SSPP1, 40, 6 }, + { DBGBUS_SSPP1, 40, 7 }, + + { DBGBUS_SSPP1, 41, 0 }, + { DBGBUS_SSPP1, 41, 1 }, + { DBGBUS_SSPP1, 41, 2 }, + { DBGBUS_SSPP1, 41, 3 }, + { DBGBUS_SSPP1, 41, 4 }, + { DBGBUS_SSPP1, 41, 5 }, + { DBGBUS_SSPP1, 41, 6 }, + { DBGBUS_SSPP1, 41, 7 }, + + { DBGBUS_SSPP1, 42, 0 }, + { DBGBUS_SSPP1, 42, 1 }, + { DBGBUS_SSPP1, 42, 2 }, + { DBGBUS_SSPP1, 42, 3 }, + { DBGBUS_SSPP1, 42, 4 }, + { DBGBUS_SSPP1, 42, 5 }, + { DBGBUS_SSPP1, 42, 6 }, + { DBGBUS_SSPP1, 42, 7 }, + + { DBGBUS_SSPP1, 44, 0 }, + { DBGBUS_SSPP1, 44, 1 }, + { DBGBUS_SSPP1, 44, 2 }, + { DBGBUS_SSPP1, 44, 3 }, + { DBGBUS_SSPP1, 44, 4 }, + { DBGBUS_SSPP1, 44, 5 }, + { DBGBUS_SSPP1, 44, 6 }, + { DBGBUS_SSPP1, 44, 7 }, + + { DBGBUS_SSPP1, 45, 0 }, + { DBGBUS_SSPP1, 45, 1 }, + { DBGBUS_SSPP1, 45, 2 }, + { DBGBUS_SSPP1, 45, 3 }, + { DBGBUS_SSPP1, 45, 4 }, + { DBGBUS_SSPP1, 45, 5 }, + { DBGBUS_SSPP1, 45, 6 }, + { DBGBUS_SSPP1, 45, 7 }, + + /* cursor 1 */ + { DBGBUS_SSPP1, 80, 0 }, + { DBGBUS_SSPP1, 80, 1 }, + { DBGBUS_SSPP1, 80, 2 }, + { DBGBUS_SSPP1, 80, 3 }, + { DBGBUS_SSPP1, 80, 4 }, + { DBGBUS_SSPP1, 80, 5 }, + { DBGBUS_SSPP1, 80, 6 }, + { DBGBUS_SSPP1, 80, 7 }, + + { DBGBUS_SSPP1, 81, 0 }, + { DBGBUS_SSPP1, 81, 1 }, + { DBGBUS_SSPP1, 81, 2 }, + { DBGBUS_SSPP1, 81, 3 }, + { DBGBUS_SSPP1, 81, 4 }, + { DBGBUS_SSPP1, 81, 5 }, + { DBGBUS_SSPP1, 81, 6 }, + { DBGBUS_SSPP1, 81, 7 }, + + { DBGBUS_SSPP1, 82, 0 }, + { DBGBUS_SSPP1, 82, 1 }, + { DBGBUS_SSPP1, 82, 2 }, + { DBGBUS_SSPP1, 82, 3 }, + { DBGBUS_SSPP1, 82, 4 }, + { DBGBUS_SSPP1, 82, 5 }, + { DBGBUS_SSPP1, 82, 6 }, + { DBGBUS_SSPP1, 82, 7 }, + + { DBGBUS_SSPP1, 83, 0 }, + { DBGBUS_SSPP1, 83, 1 }, + { DBGBUS_SSPP1, 83, 2 }, + { DBGBUS_SSPP1, 83, 3 }, + { DBGBUS_SSPP1, 83, 4 }, + { DBGBUS_SSPP1, 83, 5 }, + { DBGBUS_SSPP1, 83, 6 }, + { DBGBUS_SSPP1, 83, 7 }, + + { DBGBUS_SSPP1, 84, 0 }, + { DBGBUS_SSPP1, 84, 1 }, + { DBGBUS_SSPP1, 84, 2 }, + { DBGBUS_SSPP1, 84, 3 }, + { DBGBUS_SSPP1, 84, 4 }, + { DBGBUS_SSPP1, 84, 5 }, + { DBGBUS_SSPP1, 84, 6 }, + { DBGBUS_SSPP1, 84, 7 }, + + /* dspp */ + { DBGBUS_DSPP, 13, 0 }, + { DBGBUS_DSPP, 19, 0 }, + { DBGBUS_DSPP, 14, 0 }, + { DBGBUS_DSPP, 14, 1 }, + { DBGBUS_DSPP, 14, 3 }, + { DBGBUS_DSPP, 20, 0 }, + { DBGBUS_DSPP, 20, 1 }, + { DBGBUS_DSPP, 20, 3 }, + + /* ppb_0 */ + { DBGBUS_DSPP, 31, 0 }, + { DBGBUS_DSPP, 33, 0 }, + { DBGBUS_DSPP, 35, 0 }, + { DBGBUS_DSPP, 42, 0 }, + + /* ppb_1 */ + { DBGBUS_DSPP, 32, 0 }, + { DBGBUS_DSPP, 34, 0 }, + { DBGBUS_DSPP, 36, 0 }, + { DBGBUS_DSPP, 43, 0 }, + + /* lm_lut */ + { DBGBUS_DSPP, 109, 0 }, + { DBGBUS_DSPP, 105, 0 }, + { DBGBUS_DSPP, 103, 0 }, + + /* tear-check */ + { DBGBUS_PERIPH, 63, 0 }, + { DBGBUS_PERIPH, 64, 0 }, + { DBGBUS_PERIPH, 65, 0 }, + { DBGBUS_PERIPH, 73, 0 }, + { DBGBUS_PERIPH, 74, 0 }, + + /* crossbar */ + { DBGBUS_DSPP, 0, 0}, + + /* rotator */ + { DBGBUS_DSPP, 9, 0}, + + /* blend */ + /* LM0 */ + { DBGBUS_DSPP, 63, 0}, + { DBGBUS_DSPP, 63, 1}, + { DBGBUS_DSPP, 63, 2}, + { DBGBUS_DSPP, 63, 3}, + { DBGBUS_DSPP, 63, 4}, + { DBGBUS_DSPP, 63, 5}, + { DBGBUS_DSPP, 63, 6}, + { DBGBUS_DSPP, 63, 7}, + + { DBGBUS_DSPP, 64, 0}, + { DBGBUS_DSPP, 64, 1}, + { DBGBUS_DSPP, 64, 2}, + { DBGBUS_DSPP, 64, 3}, + { DBGBUS_DSPP, 64, 4}, + { DBGBUS_DSPP, 64, 5}, + { DBGBUS_DSPP, 64, 6}, + { DBGBUS_DSPP, 64, 7}, + + { DBGBUS_DSPP, 65, 0}, + { DBGBUS_DSPP, 65, 1}, + { DBGBUS_DSPP, 65, 2}, + { DBGBUS_DSPP, 65, 3}, + { DBGBUS_DSPP, 65, 4}, + { DBGBUS_DSPP, 65, 5}, + { DBGBUS_DSPP, 65, 6}, + { DBGBUS_DSPP, 65, 7}, + + { DBGBUS_DSPP, 66, 0}, + { DBGBUS_DSPP, 66, 1}, + { DBGBUS_DSPP, 66, 2}, + { DBGBUS_DSPP, 66, 3}, + { DBGBUS_DSPP, 66, 4}, + { DBGBUS_DSPP, 66, 5}, + { DBGBUS_DSPP, 66, 6}, + { DBGBUS_DSPP, 66, 7}, + + { DBGBUS_DSPP, 67, 0}, + { DBGBUS_DSPP, 67, 1}, + { DBGBUS_DSPP, 67, 2}, + { DBGBUS_DSPP, 67, 3}, + { DBGBUS_DSPP, 67, 4}, + { DBGBUS_DSPP, 67, 5}, + { DBGBUS_DSPP, 67, 6}, + { DBGBUS_DSPP, 67, 7}, + + { DBGBUS_DSPP, 68, 0}, + { DBGBUS_DSPP, 68, 1}, + { DBGBUS_DSPP, 68, 2}, + { DBGBUS_DSPP, 68, 3}, + { DBGBUS_DSPP, 68, 4}, + { DBGBUS_DSPP, 68, 5}, + { DBGBUS_DSPP, 68, 6}, + { DBGBUS_DSPP, 68, 7}, + + { DBGBUS_DSPP, 69, 0}, + { DBGBUS_DSPP, 69, 1}, + { DBGBUS_DSPP, 69, 2}, + { DBGBUS_DSPP, 69, 3}, + { DBGBUS_DSPP, 69, 4}, + { DBGBUS_DSPP, 69, 5}, + { DBGBUS_DSPP, 69, 6}, + { DBGBUS_DSPP, 69, 7}, + + /* LM1 */ + { DBGBUS_DSPP, 70, 0}, + { DBGBUS_DSPP, 70, 1}, + { DBGBUS_DSPP, 70, 2}, + { DBGBUS_DSPP, 70, 3}, + { DBGBUS_DSPP, 70, 4}, + { DBGBUS_DSPP, 70, 5}, + { DBGBUS_DSPP, 70, 6}, + { DBGBUS_DSPP, 70, 7}, + + { DBGBUS_DSPP, 71, 0}, + { DBGBUS_DSPP, 71, 1}, + { DBGBUS_DSPP, 71, 2}, + { DBGBUS_DSPP, 71, 3}, + { DBGBUS_DSPP, 71, 4}, + { DBGBUS_DSPP, 71, 5}, + { DBGBUS_DSPP, 71, 6}, + { DBGBUS_DSPP, 71, 7}, + + { DBGBUS_DSPP, 72, 0}, + { DBGBUS_DSPP, 72, 1}, + { DBGBUS_DSPP, 72, 2}, + { DBGBUS_DSPP, 72, 3}, + { DBGBUS_DSPP, 72, 4}, + { DBGBUS_DSPP, 72, 5}, + { DBGBUS_DSPP, 72, 6}, + { DBGBUS_DSPP, 72, 7}, + + { DBGBUS_DSPP, 73, 0}, + { DBGBUS_DSPP, 73, 1}, + { DBGBUS_DSPP, 73, 2}, + { DBGBUS_DSPP, 73, 3}, + { DBGBUS_DSPP, 73, 4}, + { DBGBUS_DSPP, 73, 5}, + { DBGBUS_DSPP, 73, 6}, + { DBGBUS_DSPP, 73, 7}, + + { DBGBUS_DSPP, 74, 0}, + { DBGBUS_DSPP, 74, 1}, + { DBGBUS_DSPP, 74, 2}, + { DBGBUS_DSPP, 74, 3}, + { DBGBUS_DSPP, 74, 4}, + { DBGBUS_DSPP, 74, 5}, + { DBGBUS_DSPP, 74, 6}, + { DBGBUS_DSPP, 74, 7}, + + { DBGBUS_DSPP, 75, 0}, + { DBGBUS_DSPP, 75, 1}, + { DBGBUS_DSPP, 75, 2}, + { DBGBUS_DSPP, 75, 3}, + { DBGBUS_DSPP, 75, 4}, + { DBGBUS_DSPP, 75, 5}, + { DBGBUS_DSPP, 75, 6}, + { DBGBUS_DSPP, 75, 7}, + + { DBGBUS_DSPP, 76, 0}, + { DBGBUS_DSPP, 76, 1}, + { DBGBUS_DSPP, 76, 2}, + { DBGBUS_DSPP, 76, 3}, + { DBGBUS_DSPP, 76, 4}, + { DBGBUS_DSPP, 76, 5}, + { DBGBUS_DSPP, 76, 6}, + { DBGBUS_DSPP, 76, 7}, + + /* LM2 */ + { DBGBUS_DSPP, 77, 0}, + { DBGBUS_DSPP, 77, 1}, + { DBGBUS_DSPP, 77, 2}, + { DBGBUS_DSPP, 77, 3}, + { DBGBUS_DSPP, 77, 4}, + { DBGBUS_DSPP, 77, 5}, + { DBGBUS_DSPP, 77, 6}, + { DBGBUS_DSPP, 77, 7}, + + { DBGBUS_DSPP, 78, 0}, + { DBGBUS_DSPP, 78, 1}, + { DBGBUS_DSPP, 78, 2}, + { DBGBUS_DSPP, 78, 3}, + { DBGBUS_DSPP, 78, 4}, + { DBGBUS_DSPP, 78, 5}, + { DBGBUS_DSPP, 78, 6}, + { DBGBUS_DSPP, 78, 7}, + + { DBGBUS_DSPP, 79, 0}, + { DBGBUS_DSPP, 79, 1}, + { DBGBUS_DSPP, 79, 2}, + { DBGBUS_DSPP, 79, 3}, + { DBGBUS_DSPP, 79, 4}, + { DBGBUS_DSPP, 79, 5}, + { DBGBUS_DSPP, 79, 6}, + { DBGBUS_DSPP, 79, 7}, + + { DBGBUS_DSPP, 80, 0}, + { DBGBUS_DSPP, 80, 1}, + { DBGBUS_DSPP, 80, 2}, + { DBGBUS_DSPP, 80, 3}, + { DBGBUS_DSPP, 80, 4}, + { DBGBUS_DSPP, 80, 5}, + { DBGBUS_DSPP, 80, 6}, + { DBGBUS_DSPP, 80, 7}, + + { DBGBUS_DSPP, 81, 0}, + { DBGBUS_DSPP, 81, 1}, + { DBGBUS_DSPP, 81, 2}, + { DBGBUS_DSPP, 81, 3}, + { DBGBUS_DSPP, 81, 4}, + { DBGBUS_DSPP, 81, 5}, + { DBGBUS_DSPP, 81, 6}, + { DBGBUS_DSPP, 81, 7}, + + { DBGBUS_DSPP, 82, 0}, + { DBGBUS_DSPP, 82, 1}, + { DBGBUS_DSPP, 82, 2}, + { DBGBUS_DSPP, 82, 3}, + { DBGBUS_DSPP, 82, 4}, + { DBGBUS_DSPP, 82, 5}, + { DBGBUS_DSPP, 82, 6}, + { DBGBUS_DSPP, 82, 7}, + + { DBGBUS_DSPP, 83, 0}, + { DBGBUS_DSPP, 83, 1}, + { DBGBUS_DSPP, 83, 2}, + { DBGBUS_DSPP, 83, 3}, + { DBGBUS_DSPP, 83, 4}, + { DBGBUS_DSPP, 83, 5}, + { DBGBUS_DSPP, 83, 6}, + { DBGBUS_DSPP, 83, 7}, + + /* csc */ + { DBGBUS_SSPP0, 7, 0}, + { DBGBUS_SSPP0, 7, 1}, + { DBGBUS_SSPP0, 27, 0}, + { DBGBUS_SSPP0, 27, 1}, + { DBGBUS_SSPP1, 7, 0}, + { DBGBUS_SSPP1, 7, 1}, + { DBGBUS_SSPP1, 27, 0}, + { DBGBUS_SSPP1, 27, 1}, + + /* pcc */ + { DBGBUS_SSPP0, 3, 3}, + { DBGBUS_SSPP0, 23, 3}, + { DBGBUS_SSPP0, 33, 3}, + { DBGBUS_SSPP0, 43, 3}, + { DBGBUS_SSPP1, 3, 3}, + { DBGBUS_SSPP1, 23, 3}, + { DBGBUS_SSPP1, 33, 3}, + { DBGBUS_SSPP1, 43, 3}, + + /* spa */ + { DBGBUS_SSPP0, 8, 0}, + { DBGBUS_SSPP0, 28, 0}, + { DBGBUS_SSPP1, 8, 0}, + { DBGBUS_SSPP1, 28, 0}, + { DBGBUS_DSPP, 13, 0}, + { DBGBUS_DSPP, 19, 0}, + + /* igc */ + { DBGBUS_SSPP0, 9, 0}, + { DBGBUS_SSPP0, 9, 1}, + { DBGBUS_SSPP0, 9, 3}, + { DBGBUS_SSPP0, 29, 0}, + { DBGBUS_SSPP0, 29, 1}, + { DBGBUS_SSPP0, 29, 3}, + { DBGBUS_SSPP0, 17, 0}, + { DBGBUS_SSPP0, 17, 1}, + { DBGBUS_SSPP0, 17, 3}, + { DBGBUS_SSPP0, 37, 0}, + { DBGBUS_SSPP0, 37, 1}, + { DBGBUS_SSPP0, 37, 3}, + { DBGBUS_SSPP0, 46, 0}, + { DBGBUS_SSPP0, 46, 1}, + { DBGBUS_SSPP0, 46, 3}, + + { DBGBUS_SSPP1, 9, 0}, + { DBGBUS_SSPP1, 9, 1}, + { DBGBUS_SSPP1, 9, 3}, + { DBGBUS_SSPP1, 29, 0}, + { DBGBUS_SSPP1, 29, 1}, + { DBGBUS_SSPP1, 29, 3}, + { DBGBUS_SSPP1, 17, 0}, + { DBGBUS_SSPP1, 17, 1}, + { DBGBUS_SSPP1, 17, 3}, + { DBGBUS_SSPP1, 37, 0}, + { DBGBUS_SSPP1, 37, 1}, + { DBGBUS_SSPP1, 37, 3}, + { DBGBUS_SSPP1, 46, 0}, + { DBGBUS_SSPP1, 46, 1}, + { DBGBUS_SSPP1, 46, 3}, + + { DBGBUS_DSPP, 14, 0}, + { DBGBUS_DSPP, 14, 1}, + { DBGBUS_DSPP, 14, 3}, + { DBGBUS_DSPP, 20, 0}, + { DBGBUS_DSPP, 20, 1}, + { DBGBUS_DSPP, 20, 3}, + + { DBGBUS_PERIPH, 60, 0}, +}; + +static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = { + {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */ + {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */ + {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */ + {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */ + {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */ + {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */ + {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */ +}; + +/** + * _sde_dbg_enable_power - use callback to turn power on for hw register access + * @enable: whether to turn power on or off + */ +static inline void _sde_dbg_enable_power(int enable) +{ + if (!sde_dbg_base.power_ctrl.enable_fn) + return; + sde_dbg_base.power_ctrl.enable_fn( + sde_dbg_base.power_ctrl.handle, + sde_dbg_base.power_ctrl.client, + enable); +} + +/** + * _sde_dump_reg - helper function for dumping rotator register set content + * @dump_name: register set name + * @reg_dump_flag: dumping flag controlling in-log/memory dump location + * @base_addr: starting address of io region for calculating offsets to print + * @addr: starting address offset for dumping + * @len_bytes: range of the register set + * @dump_mem: output buffer for memory dump location option + * @from_isr: whether being called from isr context + */ +static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag, + char __iomem *base_addr, char __iomem *addr, size_t len_bytes, + u32 **dump_mem, bool from_isr) +{ + u32 in_log, in_mem, len_align, len_padded; + u32 *dump_addr = NULL; + char __iomem *end_addr; + int i; + + if (!len_bytes) + return; + + in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG); + in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM); + + pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n", + dump_name, reg_dump_flag, in_log, in_mem); + + if (!in_log && !in_mem) + return; + + if (in_log) + dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n", + dump_name, addr - base_addr, len_bytes); + + len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN; + len_padded = len_align * REG_DUMP_ALIGN; + end_addr = addr + len_bytes; + + if (in_mem) { + if (dump_mem && !(*dump_mem)) { + phys_addr_t phys = 0; + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + len_padded, &phys, GFP_KERNEL); + } + + if (dump_mem && *dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n", + dump_name, dump_addr, len_padded, + addr - base_addr); + } else { + in_mem = 0; + pr_err("dump_mem: kzalloc fails!\n"); + } + } + + if (!from_isr) + _sde_dbg_enable_power(true); + + for (i = 0; i < len_align; i++) { + u32 x0, x4, x8, xc; + + x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; + x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; + x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; + xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; + + if (in_log) + dev_info(sde_dbg_base.dev, + "0x%lx : %08x %08x %08x %08x\n", + addr - base_addr, x0, x4, x8, xc); + + if (dump_addr) { + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; + } + + addr += REG_DUMP_ALIGN; + } + + if (!from_isr) + _sde_dbg_enable_power(false); +} + +/** + * _sde_dbg_get_dump_range - helper to retrieve dump length for a range node + * @range_node: range node to dump + * @max_offset: max offset of the register base + * @Return: length + */ +static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node, + size_t max_offset) +{ + u32 length = 0; + + if ((range_node->start > range_node->end) || + (range_node->end > max_offset) || (range_node->start == 0 + && range_node->end == 0)) { + length = max_offset; + } else { + length = range_node->end - range_node->start; + } + + return length; +} + +static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct sde_dbg_reg_range *ar, *br; + + if (!a || !b) + return 0; + + ar = container_of(a, struct sde_dbg_reg_range, head); + br = container_of(b, struct sde_dbg_reg_range, head); + + return ar->offset.start - br->offset.start; +} + +/** + * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base + * @dbg: register blk base structure + * @reg_dump_flag: dump target, memory, kernel log, or both + */ +static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg, + u32 reg_dump_flag) +{ + char __iomem *addr; + size_t len; + struct sde_dbg_reg_range *range_node; + + if (!dbg || !dbg->base) { + pr_err("dbg base is null!\n"); + return; + } + + dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__, + dbg->name); + + /* If there is a list to dump the registers by ranges, use the ranges */ + if (!list_empty(&dbg->sub_range_list)) { + /* sort the list by start address first */ + list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp); + list_for_each_entry(range_node, &dbg->sub_range_list, head) { + len = _sde_dbg_get_dump_range(&range_node->offset, + dbg->max_offset); + addr = dbg->base + range_node->offset.start; + pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n", + range_node->range_name, + addr, range_node->offset.start, + range_node->offset.end); + + _sde_dump_reg(range_node->range_name, reg_dump_flag, + dbg->base, addr, len, + &range_node->reg_dump, false); + } + } else { + /* If there is no list to dump ranges, dump all registers */ + dev_info(sde_dbg_base.dev, + "Ranges not found, will dump full registers\n"); + dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base, + dbg->max_offset); + addr = dbg->base; + len = dbg->max_offset; + _sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len, + &dbg->reg_dump, false); + } +} + +/** + * _sde_dump_reg_by_blk - dump a named register base region + * @blk_name: register blk name + */ +static void _sde_dump_reg_by_blk(const char *blk_name) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + if (!dbg_base) + return; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) { + if (strlen(blk_base->name) && + !strcmp(blk_base->name, blk_name)) { + _sde_dump_reg_by_ranges(blk_base, + dbg_base->enable_reg_dump); + break; + } + } +} + +/** + * _sde_dump_reg_all - dump all register regions + */ +static void _sde_dump_reg_all(void) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + if (!dbg_base) + return; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) + if (strlen(blk_base->name)) + _sde_dump_reg_by_blk(blk_base->name); +} + +/** + * _sde_dump_get_blk_addr - retrieve register block address by name + * @blk_name: register blk name + * @Return: register blk base, or NULL + */ +static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) + if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name)) + return blk_base; + + return NULL; +} + +static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus) +{ + bool in_log, in_mem; + u32 **dump_mem = NULL; + u32 *dump_addr = NULL; + u32 status = 0; + struct sde_debug_bus_entry *head; + phys_addr_t phys = 0; + int list_size; + int i; + u32 offset; + void __iomem *mem_base = NULL; + struct sde_dbg_reg_base *reg_base; + + if (!bus || !bus->cmn.entries_size) + return; + + list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list, + reg_base_head) + if (strlen(reg_base->name) && + !strcmp(reg_base->name, bus->cmn.name)) + mem_base = reg_base->base + bus->top_blk_off; + + if (!mem_base) { + pr_err("unable to find mem_base for %s\n", bus->cmn.name); + return; + } + + dump_mem = &bus->cmn.dumped_content; + + /* will keep in memory 4 entries of 4 bytes each */ + list_size = (bus->cmn.entries_size * 4 * 4); + + in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG); + in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM); + + if (!in_log && !in_mem) + return; + + dev_info(sde_dbg_base.dev, "======== start %s dump =========\n", + bus->cmn.name); + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + list_size, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x\n", + __func__, dump_addr, list_size); + } else { + in_mem = false; + pr_err("dump_mem: allocation fails\n"); + } + } + + _sde_dbg_enable_power(true); + for (i = 0; i < bus->cmn.entries_size; i++) { + head = bus->entries + i; + writel_relaxed(TEST_MASK(head->block_id, head->test_id), + mem_base + head->wr_addr); + wmb(); /* make sure test bits were written */ + + if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) + offset = DBGBUS_DSPP_STATUS; + else + offset = head->wr_addr + 0x4; + + status = readl_relaxed(mem_base + offset); + + if (in_log) + dev_info(sde_dbg_base.dev, + "waddr=0x%x blk=%d tst=%d val=0x%x\n", + head->wr_addr, head->block_id, + head->test_id, status); + + if (dump_addr && in_mem) { + dump_addr[i*4] = head->wr_addr; + dump_addr[i*4 + 1] = head->block_id; + dump_addr[i*4 + 2] = head->test_id; + dump_addr[i*4 + 3] = status; + } + + /* Disable debug bus once we are done */ + writel_relaxed(0, mem_base + head->wr_addr); + + } + _sde_dbg_enable_power(false); + + dev_info(sde_dbg_base.dev, "======== end %s dump =========\n", + bus->cmn.name); +} + +static void _sde_dbg_dump_vbif_debug_bus_entry( + struct vbif_debug_bus_entry *head, void __iomem *mem_base, + u32 *dump_addr, bool in_log) +{ + int i, j; + u32 val; + + if (!dump_addr && !in_log) + return; + + for (i = 0; i < head->block_cnt; i++) { + writel_relaxed(1 << (i + head->bit_offset), + mem_base + head->block_bus_addr); + /* make sure that current bus blcok enable */ + wmb(); + for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) { + writel_relaxed(j, mem_base + head->block_bus_addr + 4); + /* make sure that test point is enabled */ + wmb(); + val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT); + if (dump_addr) { + *dump_addr++ = head->block_bus_addr; + *dump_addr++ = i; + *dump_addr++ = j; + *dump_addr++ = val; + } + if (in_log) + dev_info(sde_dbg_base.dev, + "testpoint:%x arb/xin id=%d index=%d val=0x%x\n", + head->block_bus_addr, i, j, val); + } + } +} + +static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus) +{ + bool in_log, in_mem; + u32 **dump_mem = NULL; + u32 *dump_addr = NULL; + u32 value; + struct vbif_debug_bus_entry *head; + phys_addr_t phys = 0; + int i, list_size = 0; + void __iomem *mem_base = NULL; + struct vbif_debug_bus_entry *dbg_bus; + u32 bus_size; + struct sde_dbg_reg_base *reg_base; + + if (!bus || !bus->cmn.entries_size) + return; + + list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list, + reg_base_head) + if (strlen(reg_base->name) && + !strcmp(reg_base->name, bus->cmn.name)) + mem_base = reg_base->base; + + if (!mem_base) { + pr_err("unable to find mem_base for %s\n", bus->cmn.name); + return; + } + + dbg_bus = bus->entries; + bus_size = bus->cmn.entries_size; + list_size = bus->cmn.entries_size; + dump_mem = &bus->cmn.dumped_content; + + dev_info(sde_dbg_base.dev, "======== start %s dump =========\n", + bus->cmn.name); + + if (!dump_mem || !dbg_bus || !bus_size || !list_size) + return; + + /* allocate memory for each test point */ + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + list_size += (head->block_cnt * head->test_pnt_cnt); + } + + /* 4 bytes * 4 entries for each test point*/ + list_size *= 16; + + in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG); + in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM); + + if (!in_log && !in_mem) + return; + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + list_size, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x\n", + __func__, dump_addr, list_size); + } else { + in_mem = false; + pr_err("dump_mem: allocation fails\n"); + } + } + + _sde_dbg_enable_power(true); + + value = readl_relaxed(mem_base + MMSS_VBIF_CLKON); + writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON); + + /* make sure that vbif core is on */ + wmb(); + + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + + writel_relaxed(0, mem_base + head->disable_bus_addr); + writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL); + /* make sure that other bus is off */ + wmb(); + + _sde_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr, + in_log); + if (dump_addr) + dump_addr += (head->block_cnt * head->test_pnt_cnt * 4); + } + + _sde_dbg_enable_power(false); + + dev_info(sde_dbg_base.dev, "======== end %s dump =========\n", + bus->cmn.name); +} + +/** + * _sde_dump_array - dump array of register bases + * @blk_arr: array of register base pointers + * @len: length of blk_arr + * @do_panic: whether to trigger a panic after dumping + * @name: string indicating origin of dump + * @dump_dbgbus_sde: whether to dump the sde debug bus + * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus + */ +static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[], + u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde, + bool dump_dbgbus_vbif_rt, bool dump_all) +{ + int i; + + mutex_lock(&sde_dbg_base.mutex); + + for (i = 0; i < len; i++) { + if (blk_arr[i] != NULL) + _sde_dump_reg_by_ranges(blk_arr[i], + sde_dbg_base.enable_reg_dump); + } + + if (dump_all) + sde_evtlog_dump_all(sde_dbg_base.evtlog); + + if (dump_dbgbus_sde) + _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); + + if (dump_dbgbus_vbif_rt) + _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); + + if (do_panic && sde_dbg_base.panic_on_err) + panic(name); + + mutex_unlock(&sde_dbg_base.mutex); +} + +/** + * _sde_dump_work - deferred dump work function + * @work: work structure + */ +static void _sde_dump_work(struct work_struct *work) +{ + _sde_dump_array(sde_dbg_base.req_dump_blks, + ARRAY_SIZE(sde_dbg_base.req_dump_blks), + sde_dbg_base.work_panic, "evtlog_workitem", + sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work, + sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work, + sde_dbg_base.dump_all); +} + +void sde_dbg_dump(bool queue_work, const char *name, ...) +{ + int i, index = 0; + bool do_panic = false; + bool dump_dbgbus_sde = false; + bool dump_dbgbus_vbif_rt = false; + bool dump_all = false; + va_list args; + char *blk_name = NULL; + struct sde_dbg_reg_base *blk_base = NULL; + struct sde_dbg_reg_base **blk_arr; + u32 blk_len; + + if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_DEFAULT)) + return; + + if (queue_work && work_pending(&sde_dbg_base.dump_work)) + return; + + blk_arr = &sde_dbg_base.req_dump_blks[0]; + blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks); + + memset(sde_dbg_base.req_dump_blks, 0, + sizeof(sde_dbg_base.req_dump_blks)); + sde_dbg_base.dump_all = false; + + va_start(args, name); + i = 0; + while ((blk_name = va_arg(args, char*))) { + if (i++ >= SDE_EVTLOG_MAX_DATA) { + pr_err("could not parse all dump arguments\n"); + break; + } + if (IS_ERR_OR_NULL(blk_name)) + break; + + blk_base = _sde_dump_get_blk_addr(blk_name); + if (blk_base) { + if (index < blk_len) { + blk_arr[index] = blk_base; + index++; + } else { + pr_err("insufficient space to to dump %s\n", + blk_name); + } + } + if (!strcmp(blk_name, "all")) + dump_all = true; + + if (!strcmp(blk_name, "dbg_bus")) + dump_dbgbus_sde = true; + + if (!strcmp(blk_name, "vbif_dbg_bus")) + dump_dbgbus_vbif_rt = true; + + if (!strcmp(blk_name, "panic")) + do_panic = true; + } + va_end(args); + + if (queue_work) { + /* schedule work to dump later */ + sde_dbg_base.work_panic = do_panic; + sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work = + dump_dbgbus_sde; + sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work = + dump_dbgbus_vbif_rt; + sde_dbg_base.dump_all = dump_all; + schedule_work(&sde_dbg_base.dump_work); + } else { + _sde_dump_array(blk_arr, blk_len, do_panic, name, + dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all); + } +} + +void sde_dbg_ctrl(const char *name, ...) +{ + int i = 0; + va_list args; + char *blk_name = NULL; + + + /* no debugfs controlled events are enabled, just return */ + if (!sde_dbg_base.debugfs_ctrl) + return; + + va_start(args, name); + + while ((blk_name = va_arg(args, char*))) { + if (i++ >= SDE_EVTLOG_MAX_DATA) { + pr_err("could not parse all dbg arguments\n"); + break; + } + + if (IS_ERR_OR_NULL(blk_name)) + break; + + if (!strcmp(blk_name, "stop_ftrace") && + sde_dbg_base.debugfs_ctrl & + DBG_CTRL_STOP_FTRACE) { + pr_debug("tracing off\n"); + tracing_off(); + } + + if (!strcmp(blk_name, "panic_underrun") && + sde_dbg_base.debugfs_ctrl & + DBG_CTRL_PANIC_UNDERRUN) { + pr_debug("panic underrun\n"); + panic("underrun"); + } + } + +} + +/* + * sde_dbg_debugfs_open - debugfs open handler for evtlog dump + * @inode: debugfs inode + * @file: file handle + */ +static int sde_dbg_debugfs_open(struct inode *inode, struct file *file) +{ + if (!inode || !file) + return -EINVAL; + + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + return 0; +} + +/** + * sde_evtlog_dump_read - debugfs read handler for evtlog dump + * @file: file handler + * @buff: user buffer content for debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char evtlog_buf[SDE_EVTLOG_BUF_MAX]; + + if (!buff || !ppos) + return -EINVAL; + + len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf, + SDE_EVTLOG_BUF_MAX, true); + if (copy_to_user(buff, evtlog_buf, len)) + return -EFAULT; + *ppos += len; + + return len; +} + +/** + * sde_evtlog_dump_write - debugfs write handler for evtlog dump + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_evtlog_dump_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + _sde_dump_reg_all(); + + sde_evtlog_dump_all(sde_dbg_base.evtlog); + + _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); + _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); + + if (sde_dbg_base.panic_on_err) + panic("sde"); + + return count; +} + +static const struct file_operations sde_evtlog_fops = { + .open = sde_dbg_debugfs_open, + .read = sde_evtlog_dump_read, + .write = sde_evtlog_dump_write, +}; + +/** + * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read + * @file: file handler + * @buff: user buffer content for debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char buf[24] = {'\0'}; + + if (!buff || !ppos) + return -EINVAL; + + if (*ppos) + return 0; /* the end */ + + len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl); + pr_debug("%s: ctrl:0x%x len:0x%zx\n", + __func__, sde_dbg_base.debugfs_ctrl, len); + + if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) { + pr_err("error copying the buffer! count:0x%zx\n", count); + return -EFAULT; + } + + *ppos += len; /* increase offset */ + return len; +} + +/** + * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_ctrl_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + u32 dbg_ctrl = 0; + char buf[24]; + + if (!file) { + pr_err("DbgDbg: %s: error no file --\n", __func__); + return -EINVAL; + } + + if (count >= sizeof(buf)) + return -EFAULT; + + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &dbg_ctrl)) { + pr_err("%s: error in the number of bytes\n", __func__); + return -EFAULT; + } + + pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl); + sde_dbg_base.debugfs_ctrl = dbg_ctrl; + + return count; +} + +static const struct file_operations sde_dbg_ctrl_fops = { + .open = sde_dbg_debugfs_open, + .read = sde_dbg_ctrl_read, + .write = sde_dbg_ctrl_write, +}; + +void sde_dbg_init_dbg_buses(u32 hwversion) +{ + static struct sde_dbg_base *dbg = &sde_dbg_base; + char debug_name[80] = ""; + + memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde)); + memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt)); + + switch (hwversion) { + case SDE_HW_VER_300: + case SDE_HW_VER_301: + dbg->dbgbus_sde.entries = dbg_bus_sde_8998; + dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde_8998); + dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP; + + dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998; + dbg->dbgbus_vbif_rt.cmn.entries_size = + ARRAY_SIZE(vbif_dbg_bus_msm8998); + break; + default: + pr_err("unsupported chipset id %u\n", hwversion); + break; + } + + if (dbg->dbgbus_sde.entries) { + dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE; + snprintf(debug_name, sizeof(debug_name), "%s_dbgbus", + dbg->dbgbus_sde.cmn.name); + dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE; + debugfs_create_u32(debug_name, 0600, dbg->root, + &dbg->dbgbus_sde.cmn.enable_mask); + } + + if (dbg->dbgbus_vbif_rt.entries) { + dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT; + snprintf(debug_name, sizeof(debug_name), "%s_dbgbus", + dbg->dbgbus_vbif_rt.cmn.name); + dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT; + debugfs_create_u32(debug_name, 0600, dbg->root, + &dbg->dbgbus_vbif_rt.cmn.enable_mask); + } +} + +int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl) +{ + int i; + + mutex_init(&sde_dbg_base.mutex); + INIT_LIST_HEAD(&sde_dbg_base.reg_base_list); + sde_dbg_base.dev = dev; + sde_dbg_base.power_ctrl = *power_ctrl; + + + sde_dbg_base.evtlog = sde_evtlog_init(); + if (IS_ERR_OR_NULL(sde_dbg_base.evtlog)) + return PTR_ERR(sde_dbg_base.evtlog); + + sde_dbg_base_evtlog = sde_dbg_base.evtlog; + + sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root); + if (IS_ERR_OR_NULL(sde_dbg_base.root)) { + pr_err("debugfs_create_dir fail, error %ld\n", + PTR_ERR(sde_dbg_base.root)); + sde_dbg_base.root = NULL; + return -ENODEV; + } + + INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work); + sde_dbg_base.work_panic = false; + + for (i = 0; i < SDE_EVTLOG_ENTRY; i++) + sde_dbg_base.evtlog->logs[i].counter = i; + + debugfs_create_file("dbg_ctrl", 0600, sde_dbg_base.root, NULL, + &sde_dbg_ctrl_fops); + debugfs_create_file("dump", 0600, sde_dbg_base.root, NULL, + &sde_evtlog_fops); + debugfs_create_u32("enable", 0600, sde_dbg_base.root, + &(sde_dbg_base.evtlog->enable)); + debugfs_create_u32("panic", 0600, sde_dbg_base.root, + &sde_dbg_base.panic_on_err); + debugfs_create_u32("reg_dump", 0600, sde_dbg_base.root, + &sde_dbg_base.enable_reg_dump); + + sde_dbg_base.panic_on_err = DEFAULT_PANIC; + sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP; + + pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n", + sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err, + sde_dbg_base.enable_reg_dump); + + return 0; +} + +/** + * sde_dbg_destroy - destroy sde debug facilities + */ +void sde_dbg_destroy(void) +{ + debugfs_remove_recursive(sde_dbg_base.root); + sde_dbg_base.root = NULL; + + sde_dbg_base_evtlog = NULL; + sde_evtlog_destroy(sde_dbg_base.evtlog); + sde_dbg_base.evtlog = NULL; + mutex_destroy(&sde_dbg_base.mutex); +} + +/** + * sde_dbg_reg_base_release - release allocated reg dump file private data + * @inode: debugfs inode + * @file: file handle + * @Return: 0 on success + */ +static int sde_dbg_reg_base_release(struct inode *inode, struct file *file) +{ + struct sde_dbg_reg_base *dbg; + + if (!file) + return -EINVAL; + + dbg = file->private_data; + if (!dbg) + return -ENODEV; + + mutex_lock(&sde_dbg_base.mutex); + if (dbg && dbg->buf) { + kfree(dbg->buf); + dbg->buf_len = 0; + dbg->buf = NULL; + } + mutex_unlock(&sde_dbg_base.mutex); + + return 0; +} + + +/** + * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_offset_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg; + u32 off = 0; + u32 cnt = DEFAULT_BASE_REG_CNT; + char buf[24]; + ssize_t rc = count; + + if (!file) + return -EINVAL; + + dbg = file->private_data; + if (!dbg) + return -ENODEV; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (sscanf(buf, "%5x %x", &off, &cnt) != 2) + return -EFAULT; + + mutex_lock(&sde_dbg_base.mutex); + if (off > dbg->max_offset) { + rc = -EINVAL; + goto exit; + } + + if (off % sizeof(u32)) { + rc = -EINVAL; + goto exit; + } + + if (cnt > (dbg->max_offset - off)) + cnt = dbg->max_offset - off; + + if (cnt % sizeof(u32)) { + rc = -EINVAL; + goto exit; + } + + if (cnt == 0) + return -EINVAL; + + dbg->off = off; + dbg->cnt = cnt; + +exit: + mutex_unlock(&sde_dbg_base.mutex); + pr_debug("offset=%x cnt=%x\n", off, cnt); + + return rc; +} + +/** + * sde_dbg_reg_base_offset_read - read current offset and len of register base + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_offset_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg; + int len = 0; + char buf[24] = {'\0'}; + + if (!file) + return -EINVAL; + + dbg = file->private_data; + if (!dbg) + return -ENODEV; + + if (!ppos) + return -EINVAL; + + if (*ppos) + return 0; /* the end */ + + mutex_lock(&sde_dbg_base.mutex); + if (dbg->off % sizeof(u32)) { + mutex_unlock(&sde_dbg_base.mutex); + return -EFAULT; + } + + len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt); + if (len < 0 || len >= sizeof(buf)) { + mutex_unlock(&sde_dbg_base.mutex); + return 0; + } + + if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) { + mutex_unlock(&sde_dbg_base.mutex); + return -EFAULT; + } + + *ppos += len; /* increase offset */ + mutex_unlock(&sde_dbg_base.mutex); + + return len; +} + +/** + * sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_reg_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg; + size_t off; + u32 data, cnt; + char buf[24]; + + if (!file) + return -EINVAL; + + dbg = file->private_data; + if (!dbg) + return -ENODEV; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + cnt = sscanf(buf, "%zx %x", &off, &data); + + if (cnt < 2) + return -EFAULT; + + mutex_lock(&sde_dbg_base.mutex); + if (off >= dbg->max_offset) { + mutex_unlock(&sde_dbg_base.mutex); + return -EFAULT; + } + + _sde_dbg_enable_power(true); + + writel_relaxed(data, dbg->base + off); + + _sde_dbg_enable_power(false); + + mutex_unlock(&sde_dbg_base.mutex); + + pr_debug("addr=%zx data=%x\n", off, data); + + return count; +} + +/** + * sde_dbg_reg_base_reg_read - read len from reg base hw at current offset + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_reg_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg; + size_t len; + + if (!file) + return -EINVAL; + + dbg = file->private_data; + if (!dbg) { + pr_err("invalid handle\n"); + return -ENODEV; + } + + if (!ppos) + return -EINVAL; + + mutex_lock(&sde_dbg_base.mutex); + if (!dbg->buf) { + char *hwbuf, *hwbuf_cur; + char dump_buf[64]; + char __iomem *ioptr; + int cnt, tot; + + dbg->buf_len = sizeof(dump_buf) * + DIV_ROUND_UP(dbg->cnt, ROW_BYTES); + + if (dbg->buf_len % sizeof(u32)) + return -EINVAL; + + dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL); + + if (!dbg->buf) { + mutex_unlock(&sde_dbg_base.mutex); + return -ENOMEM; + } + + hwbuf = kzalloc(dbg->buf_len, GFP_KERNEL); + if (!hwbuf) { + kfree(dbg->buf); + mutex_unlock(&sde_dbg_base.mutex); + return -ENOMEM; + } + hwbuf_cur = hwbuf; + + ioptr = dbg->base + dbg->off; + tot = 0; + + _sde_dbg_enable_power(true); + + memcpy_fromio(hwbuf, ioptr, dbg->buf_len); + + _sde_dbg_enable_power(false); + + for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) { + hex_dump_to_buffer(hwbuf_cur, + min(cnt, ROW_BYTES), + ROW_BYTES, GROUP_BYTES, dump_buf, + sizeof(dump_buf), false); + len = scnprintf(dbg->buf + tot, dbg->buf_len - tot, + "0x%08x: %s\n", + ((int) (unsigned long) hwbuf_cur) - + ((int) (unsigned long) dbg->base), + dump_buf); + + hwbuf_cur += ROW_BYTES; + tot += len; + if (tot >= dbg->buf_len) + break; + } + + dbg->buf_len = tot; + kfree(hwbuf); + } + + if (*ppos >= dbg->buf_len) { + mutex_unlock(&sde_dbg_base.mutex); + return 0; /* done reading */ + } + + len = min(count, dbg->buf_len - (size_t) *ppos); + if (copy_to_user(user_buf, dbg->buf + *ppos, len)) { + mutex_unlock(&sde_dbg_base.mutex); + pr_err("failed to copy to user\n"); + return -EFAULT; + } + + *ppos += len; /* increase offset */ + mutex_unlock(&sde_dbg_base.mutex); + + return len; +} + +static const struct file_operations sde_off_fops = { + .open = sde_dbg_debugfs_open, + .release = sde_dbg_reg_base_release, + .read = sde_dbg_reg_base_offset_read, + .write = sde_dbg_reg_base_offset_write, +}; + +static const struct file_operations sde_reg_fops = { + .open = sde_dbg_debugfs_open, + .release = sde_dbg_reg_base_release, + .read = sde_dbg_reg_base_reg_read, + .write = sde_dbg_reg_base_reg_write, +}; + +int sde_dbg_reg_register_base(const char *name, void __iomem *base, + size_t max_offset) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *reg_base; + struct dentry *ent_off, *ent_reg; + char dn[80] = ""; + int prefix_len = 0; + + reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL); + if (!reg_base) + return -ENOMEM; + + if (name) + strlcpy(reg_base->name, name, sizeof(reg_base->name)); + reg_base->base = base; + reg_base->max_offset = max_offset; + reg_base->off = 0; + reg_base->cnt = DEFAULT_BASE_REG_CNT; + reg_base->reg_dump = NULL; + + if (name) + prefix_len = snprintf(dn, sizeof(dn), "%s_", name); + strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len); + ent_off = debugfs_create_file(dn, 0600, dbg_base->root, reg_base, + &sde_off_fops); + if (IS_ERR_OR_NULL(ent_off)) { + pr_err("debugfs_create_file: offset fail\n"); + goto off_fail; + } + + strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len); + ent_reg = debugfs_create_file(dn, 0600, dbg_base->root, reg_base, + &sde_reg_fops); + if (IS_ERR_OR_NULL(ent_reg)) { + pr_err("debugfs_create_file: reg fail\n"); + goto reg_fail; + } + + /* Initialize list to make sure check for null list will be valid */ + INIT_LIST_HEAD(®_base->sub_range_list); + + pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name, + reg_base->base, reg_base->max_offset); + + list_add(®_base->reg_base_head, &dbg_base->reg_base_list); + + return 0; +reg_fail: + debugfs_remove(ent_off); +off_fail: + kfree(reg_base); + return -ENODEV; +} + +void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id) +{ + struct sde_dbg_reg_base *reg_base; + struct sde_dbg_reg_range *range; + + reg_base = _sde_dump_get_blk_addr(base_name); + if (!reg_base) { + pr_err("error: for range %s unable to locate base %s\n", + range_name, base_name); + return; + } + + if (!range_name || strlen(range_name) == 0) { + pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n", + __builtin_return_address(0), base_name, + offset_start, offset_end); + return; + } + + if (offset_end - offset_start < REG_DUMP_ALIGN || + offset_start > offset_end) { + pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n", + __builtin_return_address(0), base_name, + range_name, offset_start, offset_end); + return; + } + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) + return; + + strlcpy(range->range_name, range_name, sizeof(range->range_name)); + range->offset.start = offset_start; + range->offset.end = offset_end; + range->xin_id = xin_id; + list_add_tail(&range->head, ®_base->sub_range_list); + + pr_debug("base %s, range %s, start 0x%X, end 0x%X\n", + base_name, range->range_name, + range->offset.start, range->offset.end); +} + +void sde_dbg_set_sde_top_offset(u32 blk_off) +{ + sde_dbg_base.dbgbus_sde.top_blk_off = blk_off; +} diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h index 271c41f05ce5058d72db5ba6d00ae247ab6f43ef..ce36cba0803946e0cc0efc8ea91b7b0fd9874f6b 100644 --- a/drivers/gpu/drm/msm/sde_dbg.h +++ b/drivers/gpu/drm/msm/sde_dbg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,9 +17,10 @@ #include <linux/debugfs.h> #include <linux/list.h> -#define SDE_EVTLOG_DATA_LIMITER (-1) +#define SDE_EVTLOG_DATA_LIMITER (0xC0DEBEEF) #define SDE_EVTLOG_FUNC_ENTRY 0x1111 #define SDE_EVTLOG_FUNC_EXIT 0x2222 +#define SDE_EVTLOG_ERROR 0xebad #define SDE_DBG_DUMP_DATA_LIMITER (NULL) @@ -29,34 +30,312 @@ enum sde_dbg_evtlog_flag { SDE_EVTLOG_ALL = BIT(7) }; +enum sde_dbg_dump_flag { + SDE_DBG_DUMP_IN_LOG = BIT(0), + SDE_DBG_DUMP_IN_MEM = BIT(1), +}; + +#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG +#define SDE_EVTLOG_DEFAULT_ENABLE 1 +#else +#define SDE_EVTLOG_DEFAULT_ENABLE 0 +#endif + +/* + * evtlog will print this number of entries when it is called through + * sysfs node or panic. This prevents kernel log from evtlog message + * flood. + */ +#define SDE_EVTLOG_PRINT_ENTRY 256 + +/* + * evtlog keeps this number of entries in memory for debug purpose. This + * number must be greater than print entry to prevent out of bound evtlog + * entry array access. + */ +#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 8) +#define SDE_EVTLOG_MAX_DATA 15 +#define SDE_EVTLOG_BUF_MAX 512 +#define SDE_EVTLOG_BUF_ALIGN 32 + +struct sde_dbg_power_ctrl { + void *handle; + void *client; + int (*enable_fn)(void *handle, void *client, bool enable); +}; + +struct sde_dbg_evtlog_log { + u32 counter; + s64 time; + const char *name; + int line; + u32 data[SDE_EVTLOG_MAX_DATA]; + u32 data_cnt; + int pid; +}; + +struct sde_dbg_evtlog { + struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY]; + u32 first; + u32 last; + u32 last_dump; + u32 curr; + u32 next; + u32 enable; + spinlock_t spin_lock; +}; + +extern struct sde_dbg_evtlog *sde_dbg_base_evtlog; + /** - * SDE_EVT32 - Write an list of 32bit values as an event into the event log + * SDE_EVT32 - Write a list of 32bit values to the event log, default area * ... - variable arguments */ -#define SDE_EVT32(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_DEFAULT, \ - ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER) -#define SDE_EVT32_IRQ(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_IRQ, \ - ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER) +#define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \ + __LINE__, SDE_EVTLOG_DEFAULT, ##__VA_ARGS__, \ + SDE_EVTLOG_DATA_LIMITER) + +/** + * SDE_EVT32_IRQ - Write a list of 32bit values to the event log, IRQ area + * ... - variable arguments + */ +#define SDE_EVT32_IRQ(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \ + __LINE__, SDE_EVTLOG_IRQ, ##__VA_ARGS__, \ + SDE_EVTLOG_DATA_LIMITER) + +/** + * SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + */ +#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \ + SDE_DBG_DUMP_DATA_LIMITER) -#define SDE_DBG_DUMP(...) \ - sde_dbg_dump(false, __func__, ##__VA_ARGS__, \ +/** + * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + */ +#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \ SDE_DBG_DUMP_DATA_LIMITER) -#define SDE_DBG_DUMP_WQ(...) \ - sde_dbg_dump(true, __func__, ##__VA_ARGS__, \ +/** + * SDE_DBG_EVT_CTRL - trigger a different driver events + * event: event that trigger different behavior in the driver + */ +#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \ SDE_DBG_DUMP_DATA_LIMITER) #if defined(CONFIG_DEBUG_FS) -int sde_evtlog_init(struct dentry *debugfs_root); -void sde_evtlog_destroy(void); -void sde_evtlog(const char *name, int line, int flag, ...); -void sde_dbg_dump(bool queue, const char *name, ...); +/** + * sde_evtlog_init - allocate a new event log object + * Returns: evtlog or -ERROR + */ +struct sde_dbg_evtlog *sde_evtlog_init(void); + +/** + * sde_evtlog_destroy - destroy previously allocated event log + * @evtlog: pointer to evtlog + * Returns: none + */ +void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog); + +/** + * sde_evtlog_log - log an entry into the event log. + * log collection may be enabled/disabled entirely via debugfs + * log area collection may be filtered by user provided flags via debugfs. + * @evtlog: pointer to evtlog + * @name: function name of call site + * @line: line number of call site + * @flag: log area filter flag checked against user's debugfs request + * Returns: none + */ +void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line, + int flag, ...); + +/** + * sde_evtlog_dump_all - print all entries in event log to kernel log + * @evtlog: pointer to evtlog + * Returns: none + */ +void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog); + +/** + * sde_evtlog_is_enabled - check whether log collection is enabled for given + * event log and log area flag + * @evtlog: pointer to evtlog + * @flag: log area filter flag + * Returns: none + */ +bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag); + +/** + * sde_evtlog_dump_to_buffer - print content of event log to the given buffer + * @evtlog: pointer to evtlog + * @evtlog_buf: target buffer to print into + * @evtlog_buf_size: size of target buffer + * @update_last_entry:» whether or not to stop at most recent entry + * Returns: number of bytes written to buffer + */ +ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size, + bool update_last_entry); + +/** + * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset + * @hwversion: Chipset revision + */ +void sde_dbg_init_dbg_buses(u32 hwversion); + +/** + * sde_dbg_init - initialize global sde debug facilities: evtlog, regdump + * @debugfs_root: debugfs root in which to create sde debug entries + * @dev: device handle + * @power_ctrl: power control callback structure for enabling clocks + * during register dumping + * Returns: 0 or -ERROR + */ +int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl); + +/** + * sde_dbg_destroy - destroy the global sde debug facilities + * Returns: none + */ +void sde_dbg_destroy(void); + +/** + * sde_dbg_dump - trigger dumping of all sde_dbg facilities + * @queue_work: whether to queue the dumping work to the work_struct + * @name: string indicating origin of dump + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + * Returns: none + */ +void sde_dbg_dump(bool queue_work, const char *name, ...); + +/** + * sde_dbg_ctrl - trigger specific actions for the driver with debugging + * purposes. Those actions need to be enabled by the debugfs entry + * so the driver executes those actions in the corresponding calls. + * @va_args: list of actions to trigger + * Returns: none + */ +void sde_dbg_ctrl(const char *name, ...); + +/** + * sde_dbg_reg_register_base - register a hw register address section for later + * dumping. call this before calling sde_dbg_reg_register_dump_range + * to be able to specify sub-ranges within the base hw range. + * @name: name of base region + * @base: base pointer of region + * @max_offset: length of region + * Returns: 0 or -ERROR + */ +int sde_dbg_reg_register_base(const char *name, void __iomem *base, + size_t max_offset); + +/** + * sde_dbg_reg_register_dump_range - register a hw register sub-region for + * later register dumping associated with base specified by + * sde_dbg_reg_register_base + * @base_name: name of base region + * @range_name: name of sub-range within base region + * @offset_start: sub-range's start offset from base's base pointer + * @offset_end: sub-range's end offset from base's base pointer + * @xin_id: xin id + * Returns: none + */ +void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id); + +/** + * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base + * address of the top registers. Used for accessing debug bus controls. + * @blk_off: offset from mdss base of the top block + */ +void sde_dbg_set_sde_top_offset(u32 blk_off); #else -static inline int sde_evtlog_init(struct dentry *debugfs_root) { return 0; } -static inline void sde_evtlog(const char *name, int line, flag, ...) {} -static inline void sde_evtlog_destroy(void) { } -static inline void sde_dbg_dump(bool queue, const char *name, ...) {} -#endif +static inline struct sde_dbg_evtlog *sde_evtlog_init(void) +{ + return NULL; +} + +static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog) +{ +} + +static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, + const char *name, int line, int flag, ...) +{ +} + +static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog) +{ +} + +static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, + u32 flag) +{ + return false; +} + +static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size, + bool update_last_entry) +{ + return 0; +} + +void sde_dbg_init_dbg_buses(u32 hwversion) +{ +} + +static inline int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl) +{ + return 0; +} + +static inline void sde_dbg_destroy(void) +{ +} + +static inline void sde_dbg_dump(bool queue_work, const char *name, ...) +{ +} + +static inline void sde_dbg_ctrl(const char *name, ...) +{ +} + +static inline int sde_dbg_reg_register_base(const char *name, + void __iomem *base, size_t max_offset) +{ + return 0; +} + +static inline void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id) +{ +} + +void sde_dbg_set_sde_top_offset(u32 blk_off) +{ +} +#endif /* defined(CONFIG_DEBUG_FS) */ + #endif /* SDE_DBG_H_ */ diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c index 72832776659d479faed3d85a3c047372d0f5e9c7..70ba127ceb082df64c190d1ededed5a912d56083 100644 --- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c +++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,7 +10,7 @@ * GNU General Public License for more details. */ -#define pr_fmt(fmt) "sde_evtlog:[%s] " fmt, __func__ +#define pr_fmt(fmt) "sde_dbg:[%s] " fmt, __func__ #include <linux/delay.h> #include <linux/spinlock.h> @@ -18,77 +18,36 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/dma-buf.h> +#include <linux/slab.h> #include "sde_dbg.h" #include "sde_trace.h" -#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG -#define SDE_EVTLOG_DEFAULT_ENABLE 1 -#else -#define SDE_EVTLOG_DEFAULT_ENABLE 0 -#endif - -#define SDE_DBG_DEFAULT_PANIC 1 - -/* - * evtlog will print this number of entries when it is called through - * sysfs node or panic. This prevents kernel log from evtlog message - * flood. - */ -#define SDE_EVTLOG_PRINT_ENTRY 256 - -/* - * evtlog keeps this number of entries in memory for debug purpose. This - * number must be greater than print entry to prevent out of bound evtlog - * entry array access. - */ -#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4) -#define SDE_EVTLOG_MAX_DATA 15 -#define SDE_EVTLOG_BUF_MAX 512 -#define SDE_EVTLOG_BUF_ALIGN 32 - -DEFINE_SPINLOCK(sde_evtloglock); - -struct tlog { - u32 counter; - s64 time; - const char *name; - int line; - u32 data[SDE_EVTLOG_MAX_DATA]; - u32 data_cnt; - int pid; -}; - -static struct sde_dbg_evtlog { - struct tlog logs[SDE_EVTLOG_ENTRY]; - u32 first; - u32 last; - u32 curr; - struct dentry *evtlog; - u32 evtlog_enable; - u32 panic_on_err; - struct work_struct evtlog_dump_work; - bool work_panic; -} sde_dbg_evtlog; - -static inline bool sde_evtlog_is_enabled(u32 flag) +bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag) { - return (flag & sde_dbg_evtlog.evtlog_enable) || - (flag == SDE_EVTLOG_ALL && sde_dbg_evtlog.evtlog_enable); + if (!evtlog) + return false; + + return (flag & evtlog->enable) || + (flag == SDE_EVTLOG_ALL && evtlog->enable); } -void sde_evtlog(const char *name, int line, int flag, ...) +void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line, + int flag, ...) { unsigned long flags; int i, val = 0; va_list args; - struct tlog *log; + struct sde_dbg_evtlog_log *log; + + if (!evtlog) + return; - if (!sde_evtlog_is_enabled(flag)) + if (!sde_evtlog_is_enabled(evtlog, flag)) return; - spin_lock_irqsave(&sde_evtloglock, flags); - log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.curr]; + spin_lock_irqsave(&evtlog->spin_lock, flags); + log = &evtlog->logs[evtlog->curr]; log->time = ktime_to_us(ktime_get()); log->name = name; log->line = line; @@ -106,64 +65,79 @@ void sde_evtlog(const char *name, int line, int flag, ...) } va_end(args); log->data_cnt = i; - sde_dbg_evtlog.curr = (sde_dbg_evtlog.curr + 1) % SDE_EVTLOG_ENTRY; - sde_dbg_evtlog.last++; + evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY; + evtlog->last++; trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0, i > 1 ? log->data[1] : 0); - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); } /* always dump the last entries which are not dumped yet */ -static bool _sde_evtlog_dump_calc_range(void) +static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog, + bool update_last_entry) { - static u32 next; bool need_dump = true; unsigned long flags; - struct sde_dbg_evtlog *evtlog = &sde_dbg_evtlog; - spin_lock_irqsave(&sde_evtloglock, flags); + if (!evtlog) + return false; + + spin_lock_irqsave(&evtlog->spin_lock, flags); - evtlog->first = next; + evtlog->first = evtlog->next; - if (evtlog->last == evtlog->first) { + if (update_last_entry) + evtlog->last_dump = evtlog->last; + + if (evtlog->last_dump == evtlog->first) { need_dump = false; goto dump_exit; } - if (evtlog->last < evtlog->first) { + if (evtlog->last_dump < evtlog->first) { evtlog->first %= SDE_EVTLOG_ENTRY; - if (evtlog->last < evtlog->first) - evtlog->last += SDE_EVTLOG_ENTRY; + if (evtlog->last_dump < evtlog->first) + evtlog->last_dump += SDE_EVTLOG_ENTRY; } - if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) { - pr_warn("evtlog buffer overflow before dump: %d\n", - evtlog->last - evtlog->first); - evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY; + if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) { + pr_info("evtlog skipping %d entries, last=%d\n", + evtlog->last_dump - evtlog->first - + SDE_EVTLOG_PRINT_ENTRY, + evtlog->last_dump - 1); + evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY; } - next = evtlog->first + 1; + evtlog->next = evtlog->first + 1; dump_exit: - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); return need_dump; } -static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) +ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size, + bool update_last_entry) { int i; ssize_t off = 0; - struct tlog *log, *prev_log; + struct sde_dbg_evtlog_log *log, *prev_log; unsigned long flags; - spin_lock_irqsave(&sde_evtloglock, flags); + if (!evtlog || !evtlog_buf) + return 0; - log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.first % - SDE_EVTLOG_ENTRY]; + /* update markers, exit if nothing to print */ + if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry)) + return 0; - prev_log = &sde_dbg_evtlog.logs[(sde_dbg_evtlog.first - 1) % + spin_lock_irqsave(&evtlog->spin_lock, flags); + + log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY]; + + prev_log = &evtlog->logs[(evtlog->first - 1) % SDE_EVTLOG_ENTRY]; off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d", @@ -175,7 +149,7 @@ static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) } off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), - "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_dbg_evtlog.first, + "=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first, log->time, (log->time - prev_log->time), log->pid); for (i = 0; i < log->data_cnt; i++) @@ -184,143 +158,41 @@ static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n"); - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); return off; } -static void _sde_evtlog_dump_all(void) -{ - char evtlog_buf[SDE_EVTLOG_BUF_MAX]; - - while (_sde_evtlog_dump_calc_range()) { - sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX); - pr_info("%s", evtlog_buf); - } -} - -static void _sde_dump_array(bool dead, const char *name) -{ - _sde_evtlog_dump_all(); - - if (dead && sde_dbg_evtlog.panic_on_err) - panic(name); -} - -static void _sde_dump_work(struct work_struct *work) +void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog) { - _sde_dump_array(sde_dbg_evtlog.work_panic, "evtlog_workitem"); -} + char buf[SDE_EVTLOG_BUF_MAX]; + bool update_last_entry = true; -void sde_dbg_dump(bool queue, const char *name, ...) -{ - int i; - bool dead = false; - va_list args; - char *blk_name = NULL; - - if (!sde_evtlog_is_enabled(SDE_EVTLOG_DEFAULT)) + if (!evtlog) return; - if (queue && work_pending(&sde_dbg_evtlog.evtlog_dump_work)) - return; - - va_start(args, name); - for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) { - blk_name = va_arg(args, char*); - if (IS_ERR_OR_NULL(blk_name)) - break; - - if (!strcmp(blk_name, "panic")) - dead = true; - } - va_end(args); - - if (queue) { - /* schedule work to dump later */ - sde_dbg_evtlog.work_panic = dead; - schedule_work(&sde_dbg_evtlog.evtlog_dump_work); - } else { - _sde_dump_array(dead, name); + while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf), + update_last_entry)) { + pr_info("%s", buf); + update_last_entry = false; } } -static int sde_evtlog_dump_open(struct inode *inode, struct file *file) +struct sde_dbg_evtlog *sde_evtlog_init(void) { - /* non-seekable */ - file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - file->private_data = inode->i_private; - return 0; -} - -static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, - size_t count, loff_t *ppos) -{ - ssize_t len = 0; - char evtlog_buf[SDE_EVTLOG_BUF_MAX]; - - if (_sde_evtlog_dump_calc_range()) { - len = sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX); - if (copy_to_user(buff, evtlog_buf, len)) - return -EFAULT; - *ppos += len; - } - - return len; -} - -static ssize_t sde_evtlog_dump_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - _sde_evtlog_dump_all(); - - if (sde_dbg_evtlog.panic_on_err) - panic("sde"); - - return count; -} - -static const struct file_operations sde_evtlog_fops = { - .open = sde_evtlog_dump_open, - .read = sde_evtlog_dump_read, - .write = sde_evtlog_dump_write, -}; - -int sde_evtlog_init(struct dentry *debugfs_root) -{ - int i; - - sde_dbg_evtlog.evtlog = debugfs_create_dir("evt_dbg", debugfs_root); - if (IS_ERR_OR_NULL(sde_dbg_evtlog.evtlog)) { - pr_err("debugfs_create_dir fail, error %ld\n", - PTR_ERR(sde_dbg_evtlog.evtlog)); - sde_dbg_evtlog.evtlog = NULL; - return -ENODEV; - } - - INIT_WORK(&sde_dbg_evtlog.evtlog_dump_work, _sde_dump_work); - sde_dbg_evtlog.work_panic = false; - - for (i = 0; i < SDE_EVTLOG_ENTRY; i++) - sde_dbg_evtlog.logs[i].counter = i; - - debugfs_create_file("dump", 0644, sde_dbg_evtlog.evtlog, NULL, - &sde_evtlog_fops); - debugfs_create_u32("enable", 0644, sde_dbg_evtlog.evtlog, - &sde_dbg_evtlog.evtlog_enable); - debugfs_create_u32("panic", 0644, sde_dbg_evtlog.evtlog, - &sde_dbg_evtlog.panic_on_err); + struct sde_dbg_evtlog *evtlog; - sde_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE; - sde_dbg_evtlog.panic_on_err = SDE_DBG_DEFAULT_PANIC; + evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL); + if (!evtlog) + return ERR_PTR(-ENOMEM); - pr_info("evtlog_status: enable:%d, panic:%d\n", - sde_dbg_evtlog.evtlog_enable, sde_dbg_evtlog.panic_on_err); + spin_lock_init(&evtlog->spin_lock); + evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE; - return 0; + return evtlog; } -void sde_evtlog_destroy(void) +void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog) { - debugfs_remove(sde_dbg_evtlog.evtlog); + kfree(evtlog); } diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 4ec04001ae7e222e8787d45cdd02524d8b0214c3..bc3f794555a54d76dccae4fd5bad699c31de7101 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -299,6 +299,22 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .num_protected_regs = 0x20, .busy_mask = 0xFFFFFFFE, }, + { + .gpurev = ADRENO_REV_A509, + .core = 5, + .major = 0, + .minor = 9, + .patchid = ANY_ID, + .features = ADRENO_PREEMPTION | ADRENO_64BIT | + ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION, + .pm4fw_name = "a530_pm4.fw", + .pfpfw_name = "a530_pfp.fw", + .zap_name = "a512_zap", + .gpudev = &adreno_a5xx_gpudev, + .gmem_size = (SZ_256K + SZ_16K), + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + }, { .gpurev = ADRENO_REV_A508, .core = 5, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 7cab049771de30c4fa97acac88bc4fc4aa149a05..45d433e77b0e0e200bee79d4ff12ff02a0639ea8 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1290,7 +1290,7 @@ static void _set_secvid(struct kgsl_device *device) adreno_writereg64(adreno_dev, ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE, ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI, - KGSL_IOMMU_SECURE_BASE); + KGSL_IOMMU_SECURE_BASE(&device->mmu)); adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE, KGSL_IOMMU_SECURE_SIZE); @@ -1693,7 +1693,7 @@ static int adreno_getproperty(struct kgsl_device *device, * anything to mmap(). */ shadowprop.gpuaddr = - (unsigned int) device->memstore.gpuaddr; + (unsigned long)device->memstore.gpuaddr; shadowprop.size = device->memstore.size; /* GSL needs this to be set, even if it appears to be meaningless */ diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 4a0acdcf8844d7cdf3e6a06c275e346f74582763..2a0940bc3c371df72afe42c06f5d353f4bd9fead 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -170,6 +170,7 @@ enum adreno_gpurev { ADRENO_REV_A505 = 505, ADRENO_REV_A506 = 506, ADRENO_REV_A508 = 508, + ADRENO_REV_A509 = 509, ADRENO_REV_A510 = 510, ADRENO_REV_A512 = 512, ADRENO_REV_A530 = 530, @@ -1006,6 +1007,7 @@ static inline int adreno_is_a5xx(struct adreno_device *adreno_dev) ADRENO_TARGET(a505, ADRENO_REV_A505) ADRENO_TARGET(a506, ADRENO_REV_A506) ADRENO_TARGET(a508, ADRENO_REV_A508) +ADRENO_TARGET(a509, ADRENO_REV_A509) ADRENO_TARGET(a510, ADRENO_REV_A510) ADRENO_TARGET(a512, ADRENO_REV_A512) ADRENO_TARGET(a530, ADRENO_REV_A530) diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 78f74b8838777c31e1e8edb9be9a599ba5b5600c..4daf1fad6ee1e6c2c9533b3db4cec38d8248ee72 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -59,6 +59,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = { { adreno_is_a530, a530_vbif }, { adreno_is_a512, a540_vbif }, { adreno_is_a510, a530_vbif }, + { adreno_is_a509, a540_vbif }, { adreno_is_a508, a530_vbif }, { adreno_is_a505, a530_vbif }, { adreno_is_a506, a530_vbif }, @@ -161,6 +162,7 @@ static const struct { { adreno_is_a530, a530_efuse_speed_bin }, { adreno_is_a505, a530_efuse_speed_bin }, { adreno_is_a512, a530_efuse_speed_bin }, + { adreno_is_a509, a530_efuse_speed_bin }, { adreno_is_a508, a530_efuse_speed_bin }, }; @@ -201,7 +203,8 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev) gpudev->vbif_xin_halt_ctrl0_mask = A510_VBIF_XIN_HALT_CTRL0_MASK; } else if (adreno_is_a540(adreno_dev) || - adreno_is_a512(adreno_dev)) { + adreno_is_a512(adreno_dev) || + adreno_is_a509(adreno_dev)) { gpudev->snapshot_data->sect_sizes->cp_merciu = 1024; } @@ -539,7 +542,8 @@ static void a5xx_regulator_disable(struct adreno_device *adreno_dev) unsigned int reg; struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - if (adreno_is_a512(adreno_dev) || adreno_is_a508(adreno_dev)) + if (adreno_is_a512(adreno_dev) || adreno_is_a509(adreno_dev) || + adreno_is_a508(adreno_dev)) return; /* If feature is not supported or not enabled */ @@ -1199,6 +1203,7 @@ static const struct { { adreno_is_a540, a540_hwcg_regs, ARRAY_SIZE(a540_hwcg_regs) }, { adreno_is_a530, a530_hwcg_regs, ARRAY_SIZE(a530_hwcg_regs) }, { adreno_is_a512, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) }, + { adreno_is_a509, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) }, { adreno_is_a510, a510_hwcg_regs, ARRAY_SIZE(a510_hwcg_regs) }, { adreno_is_a505, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) }, { adreno_is_a506, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) }, @@ -1376,31 +1381,27 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev, /* todo double check the reg writes */ while ((cur - opcode) < length) { - switch (cur[0]) { - /* Write a 32 bit value to a 64 bit reg */ - case 1: + if (cur[0] == 1 && (length - (cur - opcode) >= 4)) { + /* Write a 32 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]); cur += 4; - break; - /* Write a 64 bit value to a 64 bit reg */ - case 2: + } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) { + /* Write a 64 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; val = cur[4]; val = (val << 32) | cur[3]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val); cur += 5; - break; - /* Delay for X usec */ - case 3: + } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) { + /* Delay for X usec */ udelay(cur[1]); cur += 2; - break; - default: + } else return -EINVAL; - } } + } return 0; } @@ -1655,7 +1656,7 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev, { if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) && - !adreno_is_a508(adreno_dev)) + !adreno_is_a508(adreno_dev) && !adreno_is_a509(adreno_dev)) return; /* Handle clock settings for GFX PSCBCs */ @@ -1961,7 +1962,8 @@ static void a5xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x20); kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); - } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev)) { + } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) || + adreno_is_a509(adreno_dev)) { kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40); kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400); kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); @@ -1980,7 +1982,8 @@ static void a5xx_start(struct adreno_device *adreno_dev) if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev)) kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL, (0x100 << 11 | 0x100 << 22)); - else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev)) + else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev) || + adreno_is_a509(adreno_dev)) kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL, (0x200 << 11 | 0x200 << 22)); else @@ -2073,7 +2076,8 @@ static void a5xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A5XX_TPL1_MODE_CNTL, bit << 7); kgsl_regwrite(device, A5XX_RB_MODE_CNTL, bit << 1); if (adreno_is_a540(adreno_dev) || - adreno_is_a512(adreno_dev)) + adreno_is_a512(adreno_dev) || + adreno_is_a509(adreno_dev)) kgsl_regwrite(device, A5XX_UCHE_DBG_ECO_CNTL_2, bit); } @@ -2489,8 +2493,8 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev, adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, A5XX_CP_RB_CNTL_DEFAULT); - adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE, - rb->buffer_desc.gpuaddr); + adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE, + ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr); ret = a5xx_microcode_load(adreno_dev); if (ret) diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 57d99c4519528d722fe62d15e2883fd5dce8be1d..91465e4e251be4f52ca5342afecb20e9653680ec 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -38,9 +38,10 @@ #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu)) -#define ADDR_IN_GLOBAL(_a) \ - (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ - ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) +#define ADDR_IN_GLOBAL(_mmu, _a) \ + (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \ + ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \ + KGSL_IOMMU_GLOBAL_MEM_SIZE))) static struct kgsl_mmu_pt_ops iommu_pt_ops; static bool need_iommu_sync; @@ -200,7 +201,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, BUG_ON(global_pt_count >= GLOBAL_PT_ENTRIES); BUG_ON((global_pt_alloc + memdesc->size) >= KGSL_IOMMU_GLOBAL_MEM_SIZE); - memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc; + memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc; memdesc->priv |= KGSL_MEMDESC_GLOBAL; global_pt_alloc += memdesc->size; @@ -213,7 +214,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, void kgsl_add_global_secure_entry(struct kgsl_device *device, struct kgsl_memdesc *memdesc) { - memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE; + memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu); kgsl_global_secure_pt_entry = memdesc; } @@ -686,7 +687,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, /* Set the maximum possible size as an initial value */ nextentry->gpuaddr = (uint64_t) -1; - if (ADDR_IN_GLOBAL(faultaddr)) { + if (ADDR_IN_GLOBAL(mmu, faultaddr)) { _get_global_entries(faultaddr, preventry, nextentry); } else if (context) { private = context->proc_priv; @@ -1056,14 +1057,14 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu, unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ? kgsl_global_secure_pt_entry->size : 0; if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) { - pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + + pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size; - pt->compat_va_end = KGSL_IOMMU_SECURE_END; - pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size; - pt->va_end = KGSL_IOMMU_SECURE_END; + pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu); + pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size; + pt->va_end = KGSL_IOMMU_SECURE_END(mmu); } else { pt->compat_va_start = KGSL_IOMMU_SVM_BASE32; - pt->compat_va_end = KGSL_IOMMU_SVM_END32; + pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu); pt->va_start = KGSL_IOMMU_VA_BASE64; pt->va_end = KGSL_IOMMU_VA_END64; } @@ -1072,7 +1073,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu, pagetable->name != KGSL_MMU_SECURE_PT) { if ((BITS_PER_LONG == 32) || is_compat_task()) { pt->svm_start = KGSL_IOMMU_SVM_BASE32; - pt->svm_end = KGSL_IOMMU_SVM_END32; + pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu); } else { pt->svm_start = KGSL_IOMMU_SVM_BASE64; pt->svm_end = KGSL_IOMMU_SVM_END64; @@ -1088,22 +1089,22 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu, kgsl_global_secure_pt_entry->size : 0; if (mmu->secured) { if (pagetable->name == KGSL_MMU_SECURE_PT) { - pt->compat_va_start = KGSL_IOMMU_SECURE_BASE + + pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size; - pt->compat_va_end = KGSL_IOMMU_SECURE_END; - pt->va_start = KGSL_IOMMU_SECURE_BASE + + pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu); + pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size; - pt->va_end = KGSL_IOMMU_SECURE_END; + pt->va_end = KGSL_IOMMU_SECURE_END(mmu); } else { pt->va_start = KGSL_IOMMU_SVM_BASE32; - pt->va_end = KGSL_IOMMU_SECURE_BASE + + pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size; pt->compat_va_start = pt->va_start; pt->compat_va_end = pt->va_end; } } else { pt->va_start = KGSL_IOMMU_SVM_BASE32; - pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE; + pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu); pt->compat_va_start = pt->va_start; pt->compat_va_end = pt->va_end; } @@ -2354,7 +2355,8 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, struct rb_node *node; /* Make sure the requested address doesn't fall in the global range */ - if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size)) + if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) || + ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size)) return -ENOMEM; spin_lock(&pagetable->lock); diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h index 06f6d65effad482b4a76a3a08b819505e33c283a..a21e74f92d7c049940d0af7671d97e2b239ccc83 100644 --- a/drivers/gpu/msm/kgsl_iommu.h +++ b/drivers/gpu/msm/kgsl_iommu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016,2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,12 +24,17 @@ * are mapped into all pagetables. */ #define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M -#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000 +#define KGSL_IOMMU_GLOBAL_MEM_BASE32 0xf8000000 +#define KGSL_IOMMU_GLOBAL_MEM_BASE64 0xfc000000 + +#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu) \ + (MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \ + KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32) #define KGSL_IOMMU_SECURE_SIZE SZ_256M -#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE -#define KGSL_IOMMU_SECURE_BASE \ - (KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE) +#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) +#define KGSL_IOMMU_SECURE_BASE(_mmu) \ + (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE) #define KGSL_IOMMU_SVM_BASE32 0x300000 #define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M) diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 0df32fe0e3459a0db81661a9241bdd3f12d801d2..b0eeb5090c91ef9de283b283626e497b1a228468 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -971,6 +971,8 @@ static int usbhid_parse(struct hid_device *hid) unsigned int rsize = 0; char *rdesc; int ret, n; + int num_descriptors; + size_t offset = offsetof(struct hid_descriptor, desc); quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); @@ -993,10 +995,18 @@ static int usbhid_parse(struct hid_device *hid) return -ENODEV; } + if (hdesc->bLength < sizeof(struct hid_descriptor)) { + dbg_hid("hid descriptor is too short\n"); + return -EINVAL; + } + hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; - for (n = 0; n < hdesc->bNumDescriptors; n++) + num_descriptors = min_t(int, hdesc->bNumDescriptors, + (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); + + for (n = 0; n < num_descriptors; n++) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index 15c931bbbf65ccdef26024362c332768488521f3..7fe4b1879738b48f1af7f83280a7db13ce87e52b 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -386,7 +386,7 @@ led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev) static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) { int rc, i, addr_offset; - u8 val = 0, mask; + u8 val = 0, mask, strobe_mask = 0; for (i = 0; i < led->num_fnodes; i++) { addr_offset = led->fnode[i].id; @@ -397,6 +397,31 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; val |= 0x1 << led->fnode[i].id; + + if (led->fnode[i].strobe_sel == HW_STROBE) { + if (led->fnode[i].id == LED3) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + else + strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT; + } + + if (led->fnode[i].id == LED3 && + led->fnode[i].strobe_sel == LPG_STROBE) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + } + + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), + strobe_mask, 0); + if (rc < 0) + return rc; + + if (led->fnode[LED3].strobe_sel == LPG_STROBE) { + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_LPG_INPUT_CTRL(led->base), + LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); + if (rc < 0) + return rc; } rc = qpnp_flash_led_write(led, @@ -590,19 +615,6 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; } - if (led->fnode[LED3].strobe_sel == LPG_STROBE) { - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), - LED3_FLASH_ONCE_ONLY_BIT, 0); - if (rc < 0) - return rc; - - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_LPG_INPUT_CTRL(led->base), - LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); - if (rc < 0) - return rc; - } return 0; } diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c index 1e3f38fd3859b8a9c7d08112180717f9ae0bbf68..d8cc1b75d0b522bc00c97d912a85c8b1248d62b5 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c @@ -1032,16 +1032,18 @@ int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev, vfe_dev->buf_mgr, fe_cfg->session_id, fe_cfg->stream_id); vfe_dev->fetch_engine_info.bufq_handle = bufq_handle; - + mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); @@ -1094,14 +1096,15 @@ int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); - mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c index a85ee30769c47213fa9e503deb3538793e5259d2..8c401f8b5e215fbfa313b5ab0b1727eb91241cd1 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3821,10 +3821,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = &axi_data->stream_info[HANDLE_TO_IDX( update_info->stream_handle)]; + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_request_frame(vfe_dev, stream_info, update_info->user_stream_id, update_info->frame_id, MSM_ISP_INVALID_BUF_INDEX); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc) pr_err("%s failed to request frame!\n", __func__); @@ -3897,10 +3899,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) stream_info = &axi_data->stream_info[HANDLE_TO_IDX( req_frm->stream_handle)]; + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_request_frame(vfe_dev, stream_info, req_frm->user_stream_id, req_frm->frame_id, req_frm->buf_index); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc) pr_err("%s failed to request frame!\n", __func__); diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c index 9e5317eb29201f2aded6a51ac4e1d7ba65e6065e..ec8224b837edf0ae039dae45a1ee4a362c6cf969 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -392,9 +392,10 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1); msm_isp_reset_framedrop(vfe_dev, stream_info); - + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info, VFE_PING_FLAG, fe_cfg->output_buf_idx); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0) { pr_err("%s: Fetch engine config failed\n", __func__); return -EINVAL; @@ -917,7 +918,9 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, break; case VIDIOC_MSM_ISP_CFG_STREAM: mutex_lock(&vfe_dev->core_mutex); + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_axi_stream(vfe_dev, arg); + mutex_unlock(&vfe_dev->buf_mgr->lock); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_CFG_HW_STATE: @@ -947,6 +950,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, break; case VIDIOC_MSM_ISP_AXI_RESTART: mutex_lock(&vfe_dev->core_mutex); + mutex_lock(&vfe_dev->buf_mgr->lock); if (atomic_read(&vfe_dev->error_info.overflow_state) != HALT_ENFORCED) { rc = msm_isp_stats_restart(vfe_dev); @@ -957,6 +961,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, pr_err_ratelimited("%s: no AXI restart, halt enforced.\n", __func__); } + mutex_unlock(&vfe_dev->buf_mgr->lock); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_INPUT_CFG: @@ -1016,7 +1021,9 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, break; case VIDIOC_MSM_ISP_CFG_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_stats_stream(vfe_dev, arg); + mutex_unlock(&vfe_dev->buf_mgr->lock); mutex_unlock(&vfe_dev->core_mutex); break; case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM: diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h index d336e1ef1bd742a12c2a7ee5184ff2b6d2172316..acf0a90ed93dcf60fe0aa00ffe9f92be723fea8e 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -155,9 +155,11 @@ struct msm_vfe_irq_ops { struct msm_isp_timestamp *ts); void (*process_axi_irq)(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, + uint32_t pingpong_status, struct msm_isp_timestamp *ts); void (*process_stats_irq)(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, + uint32_t pingpong_status, struct msm_isp_timestamp *ts); void (*config_irq)(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, @@ -596,6 +598,7 @@ struct msm_vfe_tasklet_queue_cmd { struct list_head list; uint32_t vfeInterruptStatus0; uint32_t vfeInterruptStatus1; + uint32_t vfe_pingpong_status; struct msm_isp_timestamp ts; uint8_t cmd_used; struct vfe_device *vfe_dev; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c index f4d68b69f94976e4295ba403ea1be476669dc857..2eab3dd5f812f276e12151659033540bd253d2a2 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c @@ -1051,15 +1051,18 @@ static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev, fe_cfg->stream_id); vfe_dev->fetch_engine_info.bufq_handle = bufq_handle; + mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); @@ -1112,14 +1115,15 @@ static int msm_vfe40_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); - mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c index 1d50354681346b8fb50cc2e85c9e25df63b693d6..3b8de1a13c88a3b3c41931f26ddef6c82de2ed32 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c @@ -895,13 +895,14 @@ static int msm_vfe44_fetch_engine_start(struct vfe_device *vfe_dev, mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); - mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0) { pr_err("%s: No fetch buffer\n", __func__); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c index 42787c6c47dbeaa3b32ede4590d28cf0904602c5..f8866b01e617f47c0f5e545078e48a7efa3d6675 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c @@ -836,14 +836,15 @@ static int msm_vfe46_start_fetch_engine(struct vfe_device *vfe_dev, mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); - mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index b362476162038bf97394123085e721ac01ffc1c3..0e091f67681f1c6ed592af28acc272ae5c71c05b 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -570,6 +570,7 @@ void msm_vfe47_process_error_status(struct vfe_device *vfe_dev) void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev, uint32_t *irq_status0, uint32_t *irq_status1) { + uint32_t count = 0; *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C); *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70); /* Mask off bits that are not enabled */ @@ -578,6 +579,14 @@ void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev, msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58); *irq_status0 &= vfe_dev->irq0_mask; *irq_status1 &= vfe_dev->irq1_mask; + /* check if status register is cleared if not clear again*/ + while (*irq_status0 && + (*irq_status0 & msm_camera_io_r(vfe_dev->vfe_base + 0x6C)) && + (count < MAX_RECOVERY_THRESHOLD)) { + msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64); + msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58); + count++; + } if (*irq_status1 & (1 << 0)) { vfe_dev->error_info.camif_status = @@ -1095,15 +1104,18 @@ int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev, fe_cfg->stream_id); vfe_dev->fetch_engine_info.bufq_handle = bufq_handle; + mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); @@ -1156,14 +1168,15 @@ int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, mutex_lock(&vfe_dev->buf_mgr->lock); rc = vfe_dev->buf_mgr->ops->get_buf_by_index( vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf); - mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0 || !buf) { pr_err("%s: No fetch buffer rc= %d buf= %pK\n", __func__, rc, buf); + mutex_unlock(&vfe_dev->buf_mgr->lock); return -EINVAL; } mapped_info = buf->mapped_info[0]; buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED; + mutex_unlock(&vfe_dev->buf_mgr->lock); } else { rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr, &mapped_info, fe_cfg->fd); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 18613954088f90164c4c0a6dcbecd27cabe14729..39bb84529f5b013806c77265cbcb4d09551fb302 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3148,12 +3148,18 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, return -EINVAL; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); - + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < stream_cfg_cmd->num_streams; i++) { if (stream_cfg_cmd->stream_handle[i] == 0) continue; stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); + + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); + return -EINVAL; + } if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX) src_state = axi_data->src_info[ SRC_TO_INTF(stream_info->stream_src)].active; @@ -3161,6 +3167,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, else { ISP_DBG("%s: invalid src info index\n", __func__); rc = -EINVAL; + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } spin_lock_irqsave(&stream_info->lock, flags); @@ -3172,6 +3179,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } @@ -3194,6 +3202,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, HANDLE_TO_IDX( stream_cfg_cmd->stream_handle[i])); spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } for (k = 0; k < stream_info->num_isp; k++) { @@ -3252,6 +3261,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, spin_unlock_irqrestore(&stream_info->lock, flags); streams[num_streams++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < MAX_VFE; i++) { vfe_dev = update_vfes[i]; @@ -4004,10 +4014,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_request_frame(vfe_dev, stream_info, update_info->user_stream_id, update_info->frame_id, MSM_ISP_INVALID_BUF_INDEX); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc) pr_err("%s failed to request frame!\n", __func__); @@ -4053,10 +4065,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) rc = -EINVAL; break; } + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_request_frame(vfe_dev, stream_info, req_frm->user_stream_id, req_frm->frame_id, req_frm->buf_index); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc) pr_err("%s failed to request frame!\n", __func__); @@ -4255,11 +4269,11 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev, void msm_isp_process_axi_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, - struct msm_isp_timestamp *ts) + uint32_t pingpong_status, struct msm_isp_timestamp *ts) { int i, rc = 0; uint32_t comp_mask = 0, wm_mask = 0; - uint32_t pingpong_status, stream_idx; + uint32_t stream_idx; struct msm_vfe_axi_stream *stream_info; struct msm_vfe_axi_composite_info *comp_info; struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data; @@ -4273,8 +4287,6 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev, return; ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0); - pingpong_status = - vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev); for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) { rc = 0; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h index 0f029c0d517849a323a71c98ff3b1612ca45d995..9794db5a1b9ca4f4740b9da4c01f7c444992a270 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,7 +54,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type, void msm_isp_process_axi_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, - struct msm_isp_timestamp *ts); + uint32_t pingpong_status, struct msm_isp_timestamp *ts); void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index f0831e64f250a13af036c9e79e9a1e001bf482d7..3e8220005f774c5b22d214b8382e9786f998a429 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -256,13 +256,12 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev, static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev, uint32_t stats_irq_mask, struct msm_isp_timestamp *ts, - bool is_composite) + uint32_t pingpong_status, bool is_composite) { int i, rc = 0; struct msm_isp_event_data buf_event; struct msm_isp_stats_event *stats_event = &buf_event.u.stats; struct msm_vfe_stats_stream *stream_info = NULL; - uint32_t pingpong_status; uint32_t comp_stats_type_mask = 0; int result = 0; @@ -271,8 +270,6 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev, buf_event.mono_timestamp = ts->buf_time; buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id; - pingpong_status = vfe_dev->hw_info-> - vfe_ops.stats_ops.get_pingpong_status(vfe_dev); for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) { if (!(stats_irq_mask & (1 << i))) @@ -309,7 +306,7 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev, void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, - struct msm_isp_timestamp *ts) + uint32_t pingpong_status, struct msm_isp_timestamp *ts) { int j, rc; uint32_t atomic_stats_mask = 0; @@ -337,7 +334,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, /* Process non-composite irq */ if (stats_irq_mask) { rc = msm_isp_stats_configure(vfe_dev, stats_irq_mask, ts, - comp_flag); + pingpong_status, comp_flag); } /* Process composite irq */ @@ -350,7 +347,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, &vfe_dev->stats_data.stats_comp_mask[j]); rc = msm_isp_stats_configure(vfe_dev, atomic_stats_mask, - ts, !comp_flag); + ts, pingpong_status, !comp_flag); } } } @@ -1105,6 +1102,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, struct vfe_device *vfe_dev; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); num_stats_comp_mask = vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask; @@ -1123,6 +1121,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } rc = msm_isp_init_stats_ping_pong_reg( @@ -1130,6 +1129,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, if (rc < 0) { spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: No buffer for stream%d\n", __func__, idx); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); return rc; } init_completion(&stream_info->active_comp); @@ -1164,6 +1164,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, stats_data->num_active_stream); streams[num_stream++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (k = 0; k < MAX_VFE; k++) { if (!update_vfes[k] || num_active_streams[k]) diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h index 2e3a24dd1f0d10a3ed481581893fdcd65517238f..3efd5b57a0299645883185ee6bbe1e1d6eefa7b6 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,7 +17,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, - struct msm_isp_timestamp *ts); + uint32_t pingpong_status, struct msm_isp_timestamp *ts); void msm_isp_stats_stream_update(struct vfe_device *vfe_dev); int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg); int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index 9cb8d440add41259b48e7ae7ee83b43f55fa9ad5..521bac5580c9bb48465eeb24b8c875ffbbe9cf4c 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -404,8 +404,10 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev, vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info); msm_isp_reset_framedrop(vfe_dev, stream_info); + mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info, VFE_PING_FLAG, fe_cfg->output_buf_idx); + mutex_unlock(&vfe_dev->buf_mgr->lock); if (rc < 0) { pr_err("%s: Fetch engine config failed\n", __func__); return -EINVAL; @@ -923,6 +925,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, case VIDIOC_MSM_ISP_AXI_RESTART: mutex_lock(&vfe_dev->core_mutex); MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev); + mutex_lock(&vfe_dev->buf_mgr->lock); if (atomic_read(&vfe_dev->error_info.overflow_state) != HALT_ENFORCED) { rc = msm_isp_stats_restart(vfe_dev); @@ -933,6 +936,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, pr_err_ratelimited("%s: no AXI restart, halt enforced.\n", __func__); } + mutex_unlock(&vfe_dev->buf_mgr->lock); MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev); mutex_unlock(&vfe_dev->core_mutex); break; @@ -2062,7 +2066,8 @@ void msm_isp_prepare_tasklet_debug_info(struct vfe_device *vfe_dev, } static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev, - uint32_t irq_status0, uint32_t irq_status1) + uint32_t irq_status0, uint32_t irq_status1, + uint32_t ping_pong_status) { unsigned long flags; struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL; @@ -2085,8 +2090,8 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev, } queue_cmd->vfeInterruptStatus0 = irq_status0; queue_cmd->vfeInterruptStatus1 = irq_status1; + queue_cmd->vfe_pingpong_status = ping_pong_status; msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev); - queue_cmd->cmd_used = 1; queue_cmd->vfe_dev = vfe_dev; @@ -2100,7 +2105,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev, irqreturn_t msm_isp_process_irq(int irq_num, void *data) { struct vfe_device *vfe_dev = (struct vfe_device *) data; - uint32_t irq_status0, irq_status1; + uint32_t irq_status0, irq_status1, ping_pong_status; uint32_t error_mask0, error_mask1; vfe_dev->hw_info->vfe_ops.irq_ops. @@ -2111,6 +2116,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data) __func__, vfe_dev->pdev->id); return IRQ_HANDLED; } + ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops. + get_pingpong_status(vfe_dev); if (vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq) { vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq( vfe_dev, irq_status0); @@ -2138,7 +2145,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data) return IRQ_HANDLED; } msm_isp_prepare_irq_debug_info(vfe_dev, irq_status0, irq_status1); - msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1); + msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1, + ping_pong_status); return IRQ_HANDLED; } @@ -2151,7 +2159,7 @@ void msm_isp_do_tasklet(unsigned long data) struct msm_vfe_irq_ops *irq_ops; struct msm_vfe_tasklet_queue_cmd *queue_cmd; struct msm_isp_timestamp ts; - uint32_t irq_status0, irq_status1; + uint32_t irq_status0, irq_status1, pingpong_status; while (1) { spin_lock_irqsave(&tasklet->tasklet_lock, flags); @@ -2167,6 +2175,7 @@ void msm_isp_do_tasklet(unsigned long data) queue_cmd->vfe_dev = NULL; irq_status0 = queue_cmd->vfeInterruptStatus0; irq_status1 = queue_cmd->vfeInterruptStatus1; + pingpong_status = queue_cmd->vfe_pingpong_status; ts = queue_cmd->ts; spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); if (vfe_dev->vfe_open_cnt == 0) { @@ -2191,9 +2200,11 @@ void msm_isp_do_tasklet(unsigned long data) } msm_isp_process_error_info(vfe_dev); irq_ops->process_stats_irq(vfe_dev, - irq_status0, irq_status1, &ts); + irq_status0, irq_status1, + pingpong_status, &ts); irq_ops->process_axi_irq(vfe_dev, - irq_status0, irq_status1, &ts); + irq_status0, irq_status1, + pingpong_status, &ts); irq_ops->process_camif_irq(vfe_dev, irq_status0, irq_status1, &ts); irq_ops->process_reg_update(vfe_dev, diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c index 8d091320cbca459d6e1076bf544530c7ed3bbfda..57bc2cfcb6bf338401bee0daeb251b0a0904ce6a 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -52,6 +52,7 @@ #define MAX_DPHY_DATA_LN 4 #define CLOCK_OFFSET 0x700 #define CSIPHY_SOF_DEBUG_COUNT 2 +#define GBPS 1000000000 #undef CDBG #define CDBG(fmt, args...) pr_debug(fmt, ##args) @@ -134,8 +135,10 @@ static int msm_csiphy_3phase_lane_config( uint8_t i = 0; uint16_t lane_mask = 0, lane_enable = 0, temp; void __iomem *csiphybase; + uint64_t two_gbps = 0; csiphybase = csiphy_dev->base; + two_gbps = 2 * (uint64_t)csiphy_params->lane_cnt * GBPS; lane_mask = csiphy_params->lane_mask & 0x7; while (lane_mask != 0) { temp = (i << 1)+1; @@ -281,11 +284,20 @@ static int msm_csiphy_3phase_lane_config( csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr + 0x200*i); } - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + if ((csiphy_dev->hw_version == CSIPHY_VERSION_V35) && + (csiphy_params->data_rate > two_gbps)) { + msm_camera_io_w(0x40, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } else { + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } lane_mask >>= 1; i++; } @@ -782,10 +794,10 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev, ratio = csiphy_dev->csiphy_max_clk/clk_rate; csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio; } - CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n", + CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %llu\n", __func__, csiphy_params->lane_mask, - csiphy_params->lane_cnt); + csiphy_params->lane_cnt, csiphy_params->data_rate); CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n", __func__, csiphy_params->settle_cnt, csiphy_params->csid_core); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c index 1966fa9805c0d14e347f4132253929e2b9189c29..a2381557070d13931dfafcae2afe6cd305ad3e6b 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -482,6 +482,11 @@ static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff, if (__sde_rot_evtlog_dump_calc_range()) { len = sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX); + if (len < 0 || len > count) { + pr_err("len is more than the user buffer size\n"); + return 0; + } + if (copy_to_user(buff, evtlog_buf, len)) return -EFAULT; *ppos += len; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 7ae3a1ea5ebad5752c10ed552968fcab6df0b817..3af6e53b21e752278ef654629bb4d80ce3b19950 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -3116,7 +3116,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst, { int rc = 0; struct msm_smem *handle; - struct internal_buf *binfo; + struct internal_buf *binfo = NULL; u32 smem_flags = 0, buffer_size; struct hal_buffer_requirements *output_buf, *extradata_buf; int i; @@ -3222,10 +3222,10 @@ static int set_output_buffers(struct msm_vidc_inst *inst, } return rc; fail_set_buffers: - kfree(binfo); -fail_kzalloc: msm_comm_smem_free(inst, handle); err_no_mem: + kfree(binfo); +fail_kzalloc: return rc; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index c2ea4e5666fb965fd089f27cfbdee5715cd66538..c0bcfa9792535e49f729cb0585aca66d185bf418 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -42,7 +42,6 @@ #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> -#include <linux/usb/cdc.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/pm_runtime.h> @@ -1964,143 +1963,6 @@ out: return err; } -int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, - struct usb_interface *intf, - u8 *buffer, - int buflen) -{ - /* duplicates are ignored */ - struct usb_cdc_union_desc *union_header = NULL; - - /* duplicates are not tolerated */ - struct usb_cdc_header_desc *header = NULL; - struct usb_cdc_ether_desc *ether = NULL; - struct usb_cdc_mdlm_detail_desc *detail = NULL; - struct usb_cdc_mdlm_desc *desc = NULL; - - unsigned int elength; - int cnt = 0; - - memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); - hdr->phonet_magic_present = false; - while (buflen > 0) { - elength = buffer[0]; - if (!elength) { - dev_err(&intf->dev, "skipping garbage byte\n"); - elength = 1; - goto next_desc; - } - if (buffer[1] != USB_DT_CS_INTERFACE) { - dev_err(&intf->dev, "skipping garbage\n"); - goto next_desc; - } - - switch (buffer[2]) { - case USB_CDC_UNION_TYPE: /* we've found it */ - if (elength < sizeof(struct usb_cdc_union_desc)) - goto next_desc; - if (union_header) { - dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); - goto next_desc; - } - union_header = (struct usb_cdc_union_desc *)buffer; - break; - case USB_CDC_COUNTRY_TYPE: - if (elength < sizeof(struct usb_cdc_country_functional_desc)) - goto next_desc; - hdr->usb_cdc_country_functional_desc = - (struct usb_cdc_country_functional_desc *)buffer; - break; - case USB_CDC_HEADER_TYPE: - if (elength != sizeof(struct usb_cdc_header_desc)) - goto next_desc; - if (header) - return -EINVAL; - header = (struct usb_cdc_header_desc *)buffer; - break; - case USB_CDC_ACM_TYPE: - if (elength < sizeof(struct usb_cdc_acm_descriptor)) - goto next_desc; - hdr->usb_cdc_acm_descriptor = - (struct usb_cdc_acm_descriptor *)buffer; - break; - case USB_CDC_ETHERNET_TYPE: - if (elength != sizeof(struct usb_cdc_ether_desc)) - goto next_desc; - if (ether) - return -EINVAL; - ether = (struct usb_cdc_ether_desc *)buffer; - break; - case USB_CDC_CALL_MANAGEMENT_TYPE: - if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) - goto next_desc; - hdr->usb_cdc_call_mgmt_descriptor = - (struct usb_cdc_call_mgmt_descriptor *)buffer; - break; - case USB_CDC_DMM_TYPE: - if (elength < sizeof(struct usb_cdc_dmm_desc)) - goto next_desc; - hdr->usb_cdc_dmm_desc = - (struct usb_cdc_dmm_desc *)buffer; - break; - case USB_CDC_MDLM_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) - goto next_desc; - if (desc) - return -EINVAL; - desc = (struct usb_cdc_mdlm_desc *)buffer; - break; - case USB_CDC_MDLM_DETAIL_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) - goto next_desc; - if (detail) - return -EINVAL; - detail = (struct usb_cdc_mdlm_detail_desc *)buffer; - break; - case USB_CDC_NCM_TYPE: - if (elength < sizeof(struct usb_cdc_ncm_desc)) - goto next_desc; - hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; - break; - case USB_CDC_MBIM_TYPE: - if (elength < sizeof(struct usb_cdc_mbim_desc)) - goto next_desc; - - hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; - break; - case USB_CDC_MBIM_EXTENDED_TYPE: - if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) - break; - hdr->usb_cdc_mbim_extended_desc = - (struct usb_cdc_mbim_extended_desc *)buffer; - break; - case CDC_PHONET_MAGIC_NUMBER: - hdr->phonet_magic_present = true; - break; - default: - /* - * there are LOTS more CDC descriptors that - * could legitimately be found here. - */ - dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", - buffer[2], elength); - goto next_desc; - } - cnt++; -next_desc: - buflen -= elength; - buffer += elength; - } - hdr->usb_cdc_union_desc = union_header; - hdr->usb_cdc_header_desc = header; - hdr->usb_cdc_mdlm_detail_desc = detail; - hdr->usb_cdc_mdlm_desc = desc; - hdr->usb_cdc_ether_desc = ether; - return cnt; -} - -EXPORT_SYMBOL(cdc_parse_cdc_header); - /* * The function can't be called inside suspend/resume callback, * otherwise deadlock will be caused. diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 73e2df1b0926a7707a73fe64723890454cdddd6b..00781772a175c2ce96b505e3a2335954a19e157d 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1300,7 +1300,7 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) int ret; struct ath10k_fw_file *fw_file; - if (!ar->is_bmi && QCA_REV_WCN3990(ar)) { + if (!ar->is_bmi) { fw_file = &ar->normal_mode_fw.fw_file; fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV; fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 68c1ae4a01a2f509ea6f50b9dbf5f6519e183661..d445233fff8a36522c20b95443b410591a2e2d03 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5641,6 +5641,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, arvif->vdev_id, ret); } +static void ath10k_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + mutex_lock(&ar->conf_mutex); + memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); + arvif->gtk_rekey_data.replay_ctr = + be64_to_cpup((__be64 *)data->replay_ctr); + arvif->gtk_rekey_data.valid = true; + mutex_unlock(&ar->conf_mutex); +} + static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -7568,6 +7584,7 @@ static const struct ieee80211_ops ath10k_ops = { .bss_info_changed = ath10k_bss_info_changed, .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, + .set_rekey_data = ath10k_set_rekey_data, .set_key = ath10k_set_key, .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, @@ -7603,7 +7620,6 @@ static const struct ieee80211_ops ath10k_ops = { .suspend = ath10k_wow_op_suspend, .resume = ath10k_wow_op_resume, .set_wakeup = ath10k_wow_op_set_wakeup, - .set_rekey_data = ath10k_wow_op_set_rekey_data, #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, @@ -8277,6 +8293,7 @@ err_free: void ath10k_mac_unregister(struct ath10k *ar) { + ath10k_wow_deinit(ar); ieee80211_unregister_hw(ar->hw); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index de7b50714480a0af60b592d8eb0454a81010a243..1317d184e1e839284b208c93faf72155027ff2f1 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1,6 +1,6 @@ /* Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. - * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1063,6 +1063,7 @@ static int ath10k_snoc_get_soc_info(struct ath10k *ar) static int ath10k_snoc_wlan_enable(struct ath10k *ar) { struct ath10k_wlan_enable_cfg cfg; + enum ath10k_driver_mode mode; int pipe_num; struct ath10k_ce_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX]; @@ -1093,8 +1094,9 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar) cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *) &target_shadow_reg_cfg_map; - return ath10k_snoc_qmi_wlan_enable(ar, &cfg, - ATH10K_MISSION, "5.1.0.26N"); + mode = ar->testmode.utf_monitor ? ATH10K_FTM : ATH10K_MISSION; + return ath10k_snoc_qmi_wlan_enable(ar, &cfg, mode, + "5.1.0.26N"); } static int ath10k_snoc_bus_configure(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index ed85f938e3c0799795ccf117e572330ea2635c78..1a067a4ece4d5558be9cca10914035d3f8a70cf3 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -137,6 +137,13 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) return ret; } + ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION, + ar->normal_mode_fw.fw_file.wmi_op_version); + if (ret) { + kfree_skb(skb); + return ret; + } + return cfg80211_testmode_reply(skb); } @@ -174,8 +181,15 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, static int ath10k_tm_fetch_firmware(struct ath10k *ar) { struct ath10k_fw_components *utf_mode_fw; + struct ath10k_fw_file *fw_file; int ret; + if (!ar->is_bmi) { + fw_file = &ar->testmode.utf_mode_fw.fw_file; + fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV; + fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; + return 0; + } ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE, &ar->testmode.utf_mode_fw.fw_file); if (ret == 0) { diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h index ba81bf66ce85aade4b60fe7ac90ac8c3b96dea40..191a8f34c8ea0831ca6105e87379a73b7303fdbc 100644 --- a/drivers/net/wireless/ath/ath10k/testmode_i.h +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -33,6 +33,7 @@ enum ath10k_tm_attr { ATH10K_TM_ATTR_WMI_CMDID = 3, ATH10K_TM_ATTR_VERSION_MAJOR = 4, ATH10K_TM_ATTR_VERSION_MINOR = 5, + ATH10K_TM_ATTR_WMI_OP_VERSION = 6, /* keep last */ __ATH10K_TM_ATTR_AFTER_LAST, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 8952d9162ace065a51ae70a5f7a9b6054d41862a..b4ef2542bb9dec1729c480fc0ca6ba4c1a3e4de9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3117,13 +3117,14 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, void *ptr; int i; struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload; + struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload; struct wmi_ns_offload *ns_tuple; struct wmi_arp_offload *arp_tuple; len = sizeof(*cmd) + sizeof(*tlv) + - sizeof(*tlv) + WMI_MAX_NS_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv)) + - sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv)); skb = ath10k_wmi_alloc_skb(ar, len); @@ -3141,33 +3142,49 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, ptr += (sizeof(*tlv) + sizeof(*cmd)); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_NS_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload)); ns_tuple = (struct wmi_ns_offload *)tlv->value; - ns_tuple->flags |= __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + if (ns->enable_offload) { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); + if (ns->info.target_addr_valid.s6_addr[i]) { + memcpy(&ns_tuple->target_ipaddr[0], + &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + memcpy(&ns_tuple->solicitation_ipaddr, + &ns->info.self_addr[i], sizeof(struct in6_addr)); + if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY) + ns_tuple->flags |= + __cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST); + } else { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + } ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload)); tlv = ptr; } tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_ARP_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload)); arp_tuple = (struct wmi_arp_offload *)tlv->value; if (arp->enable_offload && (i == 0)) { arp_tuple->flags |= - __cpu_to_le32(WMI_ARPOFF_FLAGS_VALID); + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); memcpy(&arp_tuple->target_ipaddr, &arp->params.ipv4_addr, sizeof(arp_tuple->target_ipaddr)); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 57b81b8bae824870a6063dbc1d0e8338e597fe7a..291969db777d7015015414c7664184f9445ff8d4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -21,6 +21,7 @@ #include <linux/types.h> #include <net/mac80211.h> #include <linux/ipv6.h> +#include <net/ipv6.h> #include <linux/in.h> /* @@ -2887,13 +2888,12 @@ struct wmi_start_scan_common { } __packed; /* ARP-NS offload data structure */ -#define WMI_NSOFF_MAX_TARGET_IPS 2 -#define WMI_MAX_NS_OFFLOADS 2 -#define WMI_MAX_ARP_OFFLOADS 2 -#define WMI_ARPOFF_FLAGS_VALID BIT(0) +#define WMI_NS_ARP_OFFLOAD 2 +#define WMI_ARP_NS_OFF_FLAGS_VALID BIT(0) #define WMI_IPV4_ARP_REPLY_OFFLOAD 0 #define WMI_ARP_NS_OFFLOAD_DISABLE 0 #define WMI_ARP_NS_OFFLOAD_ENABLE 1 +#define WMI_NSOFF_IPV6_ANYCAST BIT(3) struct wmi_ns_offload_info { struct in6_addr src_addr; @@ -2902,7 +2902,7 @@ struct wmi_ns_offload_info { struct wmi_mac_addr self_macaddr; u8 src_ipv6_addr_valid; struct in6_addr target_addr_valid; - struct in6_addr target_addr_ac_type; + struct in6_addr target_ipv6_ac; u8 slot_idx; } __packed; @@ -2914,13 +2914,13 @@ struct wmi_ns_arp_offload_req { struct in_addr ipv4_addr; struct in6_addr ipv6_addr; } params; - struct wmi_ns_offload_info offload_info; + struct wmi_ns_offload_info info; struct wmi_mac_addr bssid; } __packed; struct wmi_ns_offload { __le32 flags; - struct in6_addr target_ipaddr[WMI_NSOFF_MAX_TARGET_IPS]; + struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD]; struct in6_addr solicitation_ipaddr; struct in6_addr remote_ipaddr; struct wmi_mac_addr target_mac; diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 262a1a19196e68e363359f04ec4eca448539af5e..2280f47dc22721492cf028ff279a98e3bdd23d53 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -17,6 +17,7 @@ #include "mac.h" #include <net/mac80211.h> +#include <net/addrconf.h> #include "hif.h" #include "core.h" #include "debug.h" @@ -231,6 +232,116 @@ static int ath10k_wow_wakeup(struct ath10k *ar) return 0; } +static int +ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif, + bool enable_offload) +{ + struct in6_addr addr[TARGET_NUM_STATIONS]; + struct wmi_ns_arp_offload_req *ns; + struct wireless_dev *wdev; + struct inet6_dev *in6_dev; + struct in6_addr addr_type; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *ifaca; + struct list_head *addr_list; + u32 scope, count = 0; + int i; + + ns = &arvif->ns_offload; + if (!enable_offload) { + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE); + return 0; + } + + wdev = ieee80211_vif_to_wdev(arvif->vif); + if (!wdev) + return -ENODEV; + + in6_dev = __in6_dev_get(wdev->netdev); + if (!in6_dev) + return -ENODEV; + + memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr)); + memset(&addr_type, 0, sizeof(struct in6_addr)); + + /* Unicast Addresses */ + read_lock_bh(&in6_dev->lock); + list_for_each(addr_list, &in6_dev->addr_list) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + ifa = list_entry(addr_list, struct inet6_ifaddr, if_list); + if (ifa->flags & IFA_F_DADFAILED) + continue; + scope = ipv6_addr_src_scope(&ifa->addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifa->addr.s6_addr, + sizeof(ifa->addr.s6_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_UNICAST; + count += 1; + break; + } + } + + /* Anycast Addresses */ + for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + scope = ipv6_addr_src_scope(&ifaca->aca_addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifaca->aca_addr, + sizeof(ifaca->aca_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_ANY; + count += 1; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + /* Filling up the request structure + * Filling the self_addr with solicited address + * A Solicited-Node multicast address is created by + * taking the last 24 bits of a unicast or anycast + * address and appending them to the prefix + * + * FF02:0000:0000:0000:0000:0001:FFXX:XXXX + * + * here XX is the unicast/anycast bits + */ + for (i = 0; i < count; i++) { + ns->info.self_addr[i].s6_addr[0] = 0xFF; + ns->info.self_addr[i].s6_addr[1] = 0x02; + ns->info.self_addr[i].s6_addr[11] = 0x01; + ns->info.self_addr[i].s6_addr[12] = 0xFF; + ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13]; + ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14]; + ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15]; + ns->info.slot_idx = i; + memcpy(&ns->info.target_addr[i], &addr[i], + sizeof(struct in6_addr)); + ns->info.target_addr_valid.s6_addr[i] = 1; + ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i]; + memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE); + ns->num_ns_offload_count = __cpu_to_le16(count); + + return 0; +} + static int ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif, bool enable_offload) @@ -291,6 +402,13 @@ static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload) return ret; } + ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload); + if (ret) { + ath10k_err(ar, "NS-offload config failed, vdev: %d\n", + arvif->vdev_id); + return ret; + } + ret = ath10k_wmi_set_arp_ns_offload(ar, arvif); if (ret) { ath10k_err(ar, "failed to send offload cmd, vdev: %d\n", @@ -327,22 +445,6 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar) return 0; } -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - - mutex_lock(&ar->conf_mutex); - memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); - memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); - arvif->gtk_rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); - arvif->gtk_rekey_data.valid = true; - mutex_unlock(&ar->conf_mutex); -} - static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload) { struct ath10k_vif *arvif; @@ -391,6 +493,13 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto exit; } + ret = ath10k_wow_cleanup(ar); + if (ret) { + ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + ret = ath10k_wow_config_gtk_offload(ar, true); if (ret) { ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret); @@ -403,18 +512,11 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto disable_gtk_offload; } - ret = ath10k_wow_cleanup(ar); - if (ret) { - ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", - ret); - goto disable_ns_arp_offload; - } - ret = ath10k_wow_set_wakeups(ar, wowlan); if (ret) { ath10k_warn(ar, "failed to set wow wakeup events: %d\n", ret); - goto cleanup; + goto disable_ns_arp_offload; } ret = ath10k_config_wow_listen_interval(ar); @@ -577,8 +679,15 @@ int ath10k_wow_init(struct ath10k *ar) ar->wow.wowlan_support = ath10k_wowlan_support; ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; - - device_set_wakeup_capable(ar->dev, true); + device_init_wakeup(ar->dev, true); return 0; } + +void ath10k_wow_deinit(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features) && + test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)) + device_init_wakeup(ar->dev, false); +} diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index b53211584052e8f5f4dab481f6e0b6dae1653efd..2ca4ba4848c9361cd39f768fa5982a26014914f9 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -27,13 +27,11 @@ struct ath10k_wow { #ifdef CONFIG_PM int ath10k_wow_init(struct ath10k *ar); +void ath10k_wow_deinit(struct ath10k *ar); int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int ath10k_wow_op_resume(struct ieee80211_hw *hw); void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data); #else static inline int ath10k_wow_init(struct ath10k *ar) @@ -41,5 +39,8 @@ static inline int ath10k_wow_init(struct ath10k *ar) return 0; } +void ath10k_wow_deinit(struct ath10k *ar) +{ +} #endif /* CONFIG_PM */ #endif /* _WOW_H_ */ diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index f8b2b5987ea9cf95bb35d1dbcc73255dafbc527a..ec91cd17bf3491ae39b01d7c3184f14252b6349a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -522,7 +522,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; /* We need to keep extra room for a newline */ if (count >= (PAGE_SIZE - 1)) @@ -536,12 +536,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -552,8 +555,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t len; - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c index 40204e104031b294eb73220c8bf819809f3086b5..f01ab2499a75a8fe83e6f164de22bddb52d9d6cd 100644 --- a/drivers/soc/qcom/qdsp6v2/voice_svc.c +++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c @@ -773,7 +773,7 @@ static int voice_svc_probe(struct platform_device *pdev) if (ret) { pr_err("%s: Failed to alloc chrdev\n", __func__); ret = -ENODEV; - goto chrdev_err; + goto done; } voice_svc_dev->major = MAJOR(device_num); @@ -820,8 +820,6 @@ dev_err: class_destroy(voice_svc_class); class_err: unregister_chrdev_region(0, MINOR_NUMBER); -chrdev_err: - kfree(voice_svc_dev); done: return ret; } @@ -835,7 +833,6 @@ static int voice_svc_remove(struct platform_device *pdev) device_destroy(voice_svc_class, device_num); class_destroy(voice_svc_class); unregister_chrdev_region(0, MINOR_NUMBER); - kfree(voice_svc_dev); return 0; } diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c index 90ae7eb65b65992bc234f4c85a4783f9fba3265f..6102b1765182c797bf338dbf9c3a7b553c342252 100644 --- a/drivers/staging/android/ion/ion_cma_secure_heap.c +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -3,7 +3,7 @@ * * Copyright (C) Linaro 2012 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. - * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -344,7 +344,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap, } -void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) +static unsigned long +__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) { struct list_head *entry, *_n; unsigned long drained_size = 0, skipped_size = 0; @@ -368,6 +369,7 @@ void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) } trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); + return drained_size; } int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) @@ -385,6 +387,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, struct shrink_control *sc) { + unsigned long freed; struct ion_cma_secure_heap *sheap = container_of(shrinker, struct ion_cma_secure_heap, shrinker); int nr_to_scan = sc->nr_to_scan; @@ -397,11 +400,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, if (!mutex_trylock(&sheap->chunk_lock)) return -1; - __ion_secure_cma_shrink_pool(sheap, nr_to_scan); + freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan); mutex_unlock(&sheap->chunk_lock); - return atomic_read(&sheap->total_pool_size); + return freed; } static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker, diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 513d015a5ace112217e2772a91e28ca4ef3179df..c19b87d10df0be7791352d67ce12b3f9f03147d2 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -183,7 +183,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, freed += (1 << pool->order); } - return ion_page_pool_total(pool, high); + return freed; } struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask, diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c index a1dd5ccc810941bebf68075ad28c5a0bc8196cac..9b38b73a6c0f7fcc2ecb744918c357dd7cd8cd0b 100644 --- a/drivers/staging/android/ion/msm/msm_ion.c +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -742,7 +742,7 @@ long msm_ion_custom_ioctl(struct ion_client *client, data.flush_data.offset; end = start + data.flush_data.length; - if (check_vaddr_bounds(start, end)) { + if (start && check_vaddr_bounds(start, end)) { pr_err("%s: virtual address %pK is out of bounds\n", __func__, data.flush_data.vaddr); ret = -EINVAL; diff --git a/drivers/staging/fw-api/fw/htt.h b/drivers/staging/fw-api/fw/htt.h index 23f026624d2db1fed54d3c858fdf0896289ac574..e56ca9d38ff45559dffd8e32cf825a97cb16e9ad 100644 --- a/drivers/staging/fw-api/fw/htt.h +++ b/drivers/staging/fw-api/fw/htt.h @@ -5079,23 +5079,23 @@ PREPACK struct htt_rx_ring_selection_cfg_t { #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0111_S 23 /* Block Ack Request */ -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_M 0x01000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_M 0x01000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_S 24 -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_M 0x02000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_M 0x02000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_S 25 -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_M 0x00000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_M 0x04000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_S 26 /* Block Ack*/ -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_M 0x00000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_M 0x08000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_S 27 -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_M 0x00000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_M 0x10000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_S 28 -#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_M 0x00000001 +#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_M 0x20000000 #define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_S 29 /* PS-POLL */ diff --git a/drivers/staging/fw-api/fw/htt_ppdu_stats.h b/drivers/staging/fw-api/fw/htt_ppdu_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..7499827dd978ac3dda5606b591a06f665aa39403 --- /dev/null +++ b/drivers/staging/fw-api/fw/htt_ppdu_stats.h @@ -0,0 +1,1595 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * @file htt_ppdu_stats.h + * + * @details the public header file of HTT STATS + */ +#ifndef __HTT_PPDU_STATS_H__ +#define __HTT_PPDU_STATS_H__ + +#include <htt.h> +#include <htt_stats.h> + +#define HTT_BA_64_BIT_MAP_SIZE_DWORDS 2 +#define HTT_BA_256_BIT_MAP_SIZE_DWORDS 8 +enum htt_ppdu_stats_tlv_tag { + HTT_PPDU_STATS_COMMON_TLV, + HTT_PPDU_STATS_USR_COMMON_TLV, + HTT_PPDU_STATS_USR_RATE_TLV, + HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV, + HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV, + HTT_PPDU_STATS_SCH_CMD_STATUS_TLV, + HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV, + HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV, + HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV, + HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV, + HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV, + HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV, + HTT_PPDU_STATS_INFO_TLV, + HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV, + + /* New TLV's are added above to this line */ + HTT_PPDU_STATS_MAX_TAG, +}; +typedef enum htt_ppdu_stats_tlv_tag htt_ppdu_stats_tlv_tag_t; + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_M 0x000000ff +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S 0 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_M 0x00000100 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S 8 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_M 0x00000600 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S 9 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_M 0x00003800 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S 11 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_M 0x00004000 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S 14 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_M 0xffff0000 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S 16 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_M 0x0000ffff +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S 0 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_M 0xffff0000 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S 16 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_M 0x0000ffff +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S 0 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_M 0xffff0000 +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S 16 + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_M) >> \ + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S) + +#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S)); \ + } while (0) + +PREPACK struct htt_tx_ppdu_stats_info { + htt_tlv_hdr_t tlv_hdr; + A_UINT32 tx_success_bytes; + A_UINT32 tx_retry_bytes; + A_UINT32 tx_failed_bytes; + A_UINT32 tx_ratecode: 8, + is_ampdu: 1, + ba_ack_failed: 2, + /* 0: 20 MHz + 1: 40 MHz + 2: 80 MHz + 3: 160 MHz or 80+80 MHz */ + bw: 3, + sgi: 1, + reserved0: 1, + peer_id: 16; + A_UINT32 tx_success_msdus: 16, + tx_retry_msdus: 16; + A_UINT32 tx_failed_msdus: 16, + /* united in us */ + tx_duration: 16; +} POSTPACK; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + A_UINT32 number_of_ppdu_stats; + /* + * tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info + * elements. + * tx_ppdu_stats_info is variable length, with length = + * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info) + */ + A_UINT32 tx_ppdu_stats_info[1/*number_of_ppdu_stats*/]; +} htt_ppdu_stats_usr_common_array_tlv_v; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* Refer bmi_msg.h */ + A_UINT32 target_type; + A_UINT32 hw[1]; /* Variable length, refer to struct scheduler_cmd_status */ +} htt_ppdu_stats_sch_cmd_tlv_v; + +#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_M 0x0000ffff +#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S 0 + +#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S) + +#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_M 0x00ff0000 +#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_S 16 + +#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_RING_ID_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_RING_ID_S) + +#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_RING_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_RING_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_M 0xff000000 +#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S 24 + +#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S) + +#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_NUM_USERS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S)); \ + } while (0) + +/* HW queue type */ +enum HTT_TX_QUEUE_TYPE { + HTT_TX_QUEUE_INACTIVE, + HTT_TX_QUEUE_DATA, + HTT_TX_QUEUE_BEACON, + HTT_TX_QUEUE_PSPOLL, + HTT_TX_QUEUE_CAB, + HTT_TX_QUEUE_HALPHY, + HTT_TX_QUEUE_QBOOST_RESP, + HTT_TX_QUEUE_NAN_BEACON, + HTT_TX_QUEUE_NAN_MGMT, + HTT_TX_QUEUE_UL_DATA, + HTT_TX_QUEUE_UL_BSR_RESP, + HTT_TX_QUEUE_MGMT, + HTT_TX_QUEUE_MAX, +}; +typedef enum HTT_TX_QUEUE_TYPE HTT_TX_QUEUE_TYPE; + +/* frame_type */ +enum HTT_STATS_FTYPE { + HTT_STATS_FTYPE_SGEN_NDPA = 0, + HTT_STATS_FTYPE_SGEN_NDP, + HTT_STATS_FTYPE_SGEN_BRP, + HTT_STATS_FTYPE_SGEN_BAR, + HTT_STATS_FTYPE_SGEN_RTS, + HTT_STATS_FTYPE_SGEN_CTS, + HTT_STATS_FTYPE_SGEN_CFEND, + HTT_STATS_FTYPE_SGEN_AX_NDPA, + HTT_STATS_FTYPE_SGEN_AX_NDP, + HTT_STATS_FTYPE_SGEN_MU_TRIG, + HTT_STATS_FTYPE_SGEN_MU_BAR, + HTT_STATS_FTYPE_SGEN_MU_BRP, + HTT_STATS_FTYPE_SGEN_MU_RTS, + HTT_STATS_FTYPE_SGEN_MU_BSR, + HTT_STATS_FTYPE_SGEN_UL_BSR, + HTT_STATS_FTYPE_TIDQ_DATA_SU, + HTT_STATS_FTYPE_TIDQ_DATA_MU, + HTT_STATS_FTYPE_MAX, +}; +typedef enum HTT_STATS_FTYPE HTT_STATS_FTYPE; + +/* FRM_TYPE defined in HTT_STATS_FTYPE */ +#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_M 0x000000ff +#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S 0 + +#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S) + +#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S)); \ + } while (0) + +/* QTYPE defined in HTT_TX_QUEUE_TYPE */ +#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_M 0x0000ff00 +#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_S 8 + +#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_QTYPE_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_QTYPE_S) + +#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_QTYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_QTYPE_S)); \ + } while (0) + + +enum HTT_PPDU_STATS_BW { + HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0, + HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1, + HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2, + HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3, + HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4, + HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */ + HTT_PPDU_STATS_BANDWIDTH_DYN = 6, +}; +typedef enum HTT_PPDU_STATS_BW HTT_PPDU_STATS_BW; + +#define HTT_PPDU_STATS_COMMON_TLV_BW_M 0x000f0000 +#define HTT_PPDU_STATS_COMMON_TLV_BW_S 16 + +#define HTT_PPDU_STATS_COMMON_TLV_BW_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_BW_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_BW_S) + +#define HTT_PPDU_STATS_COMMON_TLV_BW_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_BW, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_BW_S)); \ + } while (0) + +#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_M 0x0000ffff +#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S 0 + +#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S) + +#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_PHY_MODE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_M 0xffff0000 +#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S 16 + +#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(_var) \ + (((_var) & HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_M) >> \ + HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S) + +#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S)); \ + } while (0) + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + A_UINT32 ppdu_id; + /* BIT [ 15 : 0] :- sched_cmdid + * BIT [ 23 : 16] :- ring_id + * BIT [ 31 : 24] :- num_users + */ + union { + A_UINT32 ring_id__sched_cmdid; + struct { + A_UINT32 sched_cmdid: 16, + ring_id: 8, + num_users: 8; + }; + }; + /* BIT [ 7 : 0] :- frame_type - HTT_STATS_FTYPE + * BIT [ 15: 8] :- queue_type - HTT_TX_QUEUE_TYPE + * BIT [ 19: 16] :- bw - HTT_PPDU_STATS_BW + * BIT [ 31: 20] :- reserved + */ + union { + A_UINT32 bw__queue_type__frame_type; + struct { + A_UINT32 frame_type: 8, + queue_type: 8, + bw: 4, + reserved0: 12; + }; + }; + A_UINT32 chain_mask; + A_UINT32 fes_duration_us; /* frame exchange sequence */ + A_UINT32 ppdu_sch_eval_start_tstmp_us; + A_UINT32 ppdu_sch_end_tstmp_us; + A_UINT32 ppdu_start_tstmp_us; + /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted + * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted + */ + union { + A_UINT32 chan_mhz__phy_mode; + struct { + A_UINT32 phy_mode: 16, + chan_mhz: 16; + }; + }; +} htt_ppdu_stats_common_tlv; + +#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_M 0x000000ff +#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S 0 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_M 0x0000ff00 +#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S 8 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_M 0x00000001 +#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S 0 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_MCAST, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_M 0x000003fe +#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S 1 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_M 0x00003c00 +#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_S 10 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_BW_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_BW_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_BW, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_BW_S)); \ + } while (0) + + +#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_M 0x0000ffff +#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S 0 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_M 0xffff0000 +#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S 16 + +#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_M) >> \ + HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S) + +#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S)); \ + } while (0) + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- vap_id + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__vapid__tid_num; + struct { + A_UINT32 tid_num: 8, + vap_id: 8, + sw_peer_id: 16; + }; + }; + + /* BIT [ 0 : 0] :- mcast + * BIT [ 9 : 1] :- mpdus_tried + * BIT [ 13: 10] :- bw - HTT_PPDU_STATS_BW + * BIT [ 31: 14] :- rsvd + */ + union { + A_UINT32 bw__mpdus_tried__mcast; + struct { + A_UINT32 mcast: 1, + mpdus_tried: 9, + bw: 4, + reserved0: 18; + }; + }; + + /* BIT [ 15: 0] :- frame_ctrl + * BIT [ 31: 16] :- qos_ctrl + */ + union { + A_UINT32 qos_ctrl_frame_ctrl; + struct { + A_UINT32 frame_ctrl: 16, + qos_ctrl: 16; + }; + }; + +} htt_ppdu_stats_user_common_tlv; + +#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_M 0x000000ff +#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S 0 + +#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESERVED_M 0x0000ff00 +#define HTT_PPDU_STATS_USER_RATE_TLV_RESERVED_S 8 + + +#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_M 0x0000000f +#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S 0 + +#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_USER_POS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_M 0x00000ff0 +#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S 4 + +#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_M 0x0000ffff +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S 0 + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RU_END_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RU_END, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_M 0xffff0000 +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S 16 + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RU_START_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RU_START, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_M 0x00000001 +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S 0 + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S)); \ + } while (0) + +enum HTT_PPDU_STATS_PPDU_TYPE { + HTT_PPDU_STATS_PPDU_TYPE_SU, + HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO, + HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA, + HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA, + HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG, + HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN, + HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP, + HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG, + HTT_PPDU_STATS_PPDU_TYPE_UL_RESP, + + HTT_PPDU_STATS_PPDU_TYPE_UNKNOWN = 0x1F, +}; +typedef enum HTT_PPDU_STATS_PPDU_TYPE HTT_PPDU_STATS_PPDU_TYPE; + +#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_M 0x0000003E +#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S 1 + +#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S)); \ + } while (0) + +enum HTT_PPDU_STATS_TXBF_TYPE { + HTT_PPDU_STATS_TXBF_OPEN_LOOP, + HTT_PPDU_STATS_TXBF_IMPLICIT, + HTT_PPDU_STATS_TXBF_EXPLICIT, + HTT_PPDU_STATS_TXBF_MAX, +}; + +#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_M 0x00000003 +#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S 0 + +#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_M 0x00000004 +#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_S 2 + +#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_STBC_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_STBC_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_STBC, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_STBC_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_M 0x00000008 +#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S 3 + +#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_HE_RE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_M 0x000000f0 +#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S 4 + +#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_TXBF_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_TXBF, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_BW_M 0x00000f00 +#define HTT_PPDU_STATS_USER_RATE_TLV_BW_S 8 + +#define HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_BW_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_BW_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_BW_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_BW, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_BW_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_M 0x0000f000 +#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_S 12 + +#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_NSS_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_NSS_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_NSS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_NSS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_M 0x000f0000 +#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_S 16 + +#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_MCS_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_MCS_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_MCS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_MCS_S)); \ + } while (0) + +/* Refer HTT_STATS_PREAM_TYPE */ +#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_M 0x00f00000 +#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S 20 + +#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S)); \ + } while (0) + +/* Guard Intervals */ +enum HTT_PPDU_STATS_GI { + HTT_PPDU_STATS_GI_800, + HTT_PPDU_STATS_GI_400, + HTT_PPDU_STATS_GI_1600, + HTT_PPDU_STATS_GI_3200, + HTT_PPDU_STATS_GI_CNT, +}; +typedef enum HTT_PPDU_STATS_GI HTT_PPDU_STATS_GI; + +/* Refer HTT_PPDU_STATS_GI */ +#define HTT_PPDU_STATS_USER_RATE_TLV_GI_M 0x0f000000 +#define HTT_PPDU_STATS_USER_RATE_TLV_GI_S 24 + +#define HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_GI_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_GI_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_GI_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_GI, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_GI_S)); \ + } while (0) + + +#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_M 0x10000000 +#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_S 28 + +#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_DCM_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_DCM_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_DCM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_DCM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_M 0x20000000 +#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S 29 + +#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_LDPC_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_LDPC, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S)); \ + } while (0) + +enum HTT_PPDU_STATS_RESP_PPDU_TYPE { + HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_MIMO_UL, + HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_OFDMA_UL, +}; +typedef enum HTT_PPDU_STATS_RESP_PPDU_TYPE HTT_PPDU_STATS_RESP_PPDU_TYPE; + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_M 0xC0000000 +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S 30 + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_M) >> \ + HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S) + +#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S)); \ + } while (0) + + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- reserved0 + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num; + struct { + A_UINT32 tid_num: 8, + reserved0: 8, + sw_peer_id: 16; + }; + }; + + /* BIT [ 3 : 0] :- user_pos + * BIT [ 11: 4] :- mu_group_id + * BIT [ 31: 12] :- reserved1 + */ + union { + A_UINT32 mu_group_id__user_pos; + struct { + A_UINT32 user_pos: 4, + mu_group_id: 8, + reserved1: 20; + }; + }; + + /* BIT [ 15 : 0] :- ru_end + * BIT [ 31 : 16] :- ru_start + */ + union { + A_UINT32 ru_start__ru_end; + struct { + A_UINT32 ru_end: 16, + ru_start: 16; + }; + }; + + /* BIT [ 15 : 0] :- ru_end + * BIT [ 31 : 16] :- ru_start + */ + union { + A_UINT32 resp_ru_start__ru_end; + struct { + A_UINT32 resp_ru_end: 16, + resp_ru_start: 16; + }; + }; + + /* BIT [ 0 : 0 ] :- resp_type_valid + * BIT [ 5 : 1 ] :- ppdu_type - HTT_PPDU_STAT_PPDU_TYPE + * BIT [ 31: 6 ] :- reserved2 + */ + union { + A_UINT32 resp_type_vld_ppdu_type; + struct { + A_UINT32 resp_type_vld: 1, + ppdu_type: 5, + reserved2: 26; + }; + }; + + /* BIT [ 1 : 0 ] :- ltf_size + * BIT [ 2 : 2 ] :- stbc + * BIT [ 3 : 3 ] :- he_re (range extension) + * BIT [ 7 : 4 ] :- txbf + * BIT [ 11: 8 ] :- bw + * BIT [ 15: 12] :- nss NSS 1,2, ...8 + * BIT [ 19: 16] :- mcs + * BIT [ 23: 20] :- preamble + * BIT [ 27: 24] :- gi - HTT_PPDU_STATS_GI + * BIT [ 28: 28] :- dcm + * BIT [ 29: 29] :- ldpc + * BIT [ 31: 30] :- reserved4 + */ + union { + A_UINT32 rate_info; + struct { + A_UINT32 ltf_size: 2, + stbc: 1, + he_re: 1, + txbf: 4, + bw: 4, + nss: 4, + mcs: 4, + preamble: 4, + gi: 4, + dcm: 1, + ldpc: 1, + reserved4: 2; + }; + }; + + /* Note: resp_rate_info is only valid for if resp_type is UL + * BIT [ 1 : 0 ] :- ltf_size + * BIT [ 2 : 2 ] :- stbc + * BIT [ 3 : 3 ] :- he_re (range extension) + * BIT [ 7 : 4 ] :- reserved3 + * BIT [ 11: 8 ] :- bw + * BIT [ 15: 12] :- nss NSS 1,2, ...8 + * BIT [ 19: 16] :- mcs + * BIT [ 23: 20] :- preamble + * BIT [ 27: 24] :- gi + * BIT [ 28: 28] :- dcm + * BIT [ 29: 29] :- ldpc + * BIT [ 31: 30] :- resp_ppdu_type - HTT_PPDU_STATS_RESP_PPDU_TYPE + */ + union { + A_UINT32 resp_rate_info; + struct { + A_UINT32 resp_ltf_size: 2, + resp_stbc: 1, + resp_he_re: 1, + reserved3: 4, + resp_bw: 4, + resp_nss: 4, + resp_mcs: 4, + resp_preamble: 4, + resp_gi: 4, + resp_dcm: 1, + resp_ldpc: 1, + resp_ppdu_type: 2; + }; + }; +} htt_ppdu_stats_user_rate_tlv; + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_M 0x000000ff +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S 0 + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_EMQ_MPDU_BITMAP_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_RESERVED_M 0x0000ff00 +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_RESERVED_S 8 + + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S)); \ + } while (0) + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- reserved0 + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num; + struct { + A_UINT32 tid_num: 8, + reserved0: 8, + sw_peer_id: 16; + }; + }; + A_UINT32 start_seq; + A_UINT32 enq_bitmap[HTT_BA_64_BIT_MAP_SIZE_DWORDS]; +} htt_ppdu_stats_enq_mpdu_bitmap_64_tlv; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- reserved0 + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num; + struct { + A_UINT32 tid_num: 8, + reserved0: 8, + sw_peer_id: 16; + }; + }; + A_UINT32 start_seq; + A_UINT32 enq_bitmap[HTT_BA_256_BIT_MAP_SIZE_DWORDS]; +} htt_ppdu_stats_enq_mpdu_bitmap_256_tlv; + +/* COMPLETION_STATUS defined in HTT_PPDU_STATS_USER_COMPLETION_STATUS */ +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_M 0x000000ff +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_M 0x0000ff00 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S 8 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_M 0x0000ffff +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_M 0xffff0000 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S 16 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_M 0x0000000f +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_M 0x000000f0 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S 4 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_M 0x00000100 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S 8 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S)); \ + } while (0) + +enum HTT_PPDU_STATS_RESP_TYPE { + HTT_PPDU_STATS_NO_RESPONSE_EXPECTED_E = 0, + HTT_PPDU_STATS_ACK_EXPECTED_E = 1, + HTT_PPDU_STATS_BA_BITMAP_EXPECTED_E = 2, + HTT_PPDU_STATS_UL_MU_BA_EXPECTED_E = 3, + HTT_PPDU_STATS_UL_MU_BA_AND_DATA_EXPECTED_E = 4, + HTT_PPDU_STATS_CTS_EXPECTED_E = 5, + HTT_PPDU_STATS_MU_CBF_EXPECTED_E = 6, +}; +typedef enum HTT_PPDU_STATS_RESP_TYPE HTT_PPDU_STATS_RESP_TYPE; + +/* Refer HTT_PPDU_STATS_RESP_TYPE */ +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_M 0x00001e00 +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S 9 + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S)); \ + } while (0) + +enum HTT_PPDU_STATS_USER_COMPLETION_STATUS { + HTT_PPDU_STATS_USER_STATUS_OK, + HTT_PPDU_STATS_USER_STATUS_FILTERED, + HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT, + HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH, + HTT_PPDU_STATS_USER_STATUS_ABORT, +}; +typedef enum HTT_PPDU_STATS_USER_COMPLETION_STATUS HTT_PPDU_STATS_USER_COMPLETION_STATUS; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* BIT [ 7 : 0] :- completion_status + * BIT [ 15: 8] :- tid_num + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num__completion_status; + struct { + A_UINT32 completion_status: 8, + tid_num: 8, + sw_peer_id: 16; + }; + }; + + /* RSSI value of last ack packet (units = dB above noise floor) */ + A_UINT32 ack_rssi; + + /* BIT [ 15 : 0] :- mpdu_tried + * BIT [ 31 : 16] :- mpdu_success + */ + union { + A_UINT32 mpdu_tried__mpdu_success; + struct { + A_UINT32 mpdu_tried: 16, + mpdu_success: 16; + }; + }; + + /* BIT [ 3 : 0] :- long_retries + * BIT [ 7 : 4] :- short_retries + * BIT [ 8 : 8] :- is_ampdu + * BIT [ 12: 9] :- resp_type + * BIT [ 31: 13] :- reserved0 + */ + union { + A_UINT32 resp_type_is_ampdu__short_retry__long_retry; + struct { + A_UINT32 long_retries: 4, + short_retries: 4, + is_ampdu: 1, + resp_type: 4, + reserved0: 19; + }; + }; +} htt_ppdu_stats_user_cmpltn_common_tlv; + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_M 0x000000ff +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_EMQ_MPDU_BITMAP_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_RESERVED_M 0x0000ff00 +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_RESERVED_S 8 + + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S)); \ + } while (0) + + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- reserved0 + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num; + struct { + A_UINT32 tid_num: 8, + reserved0: 8, + sw_peer_id: 16; + }; + }; + A_UINT32 ba_seq_no; + A_UINT32 ba_bitmap[HTT_BA_64_BIT_MAP_SIZE_DWORDS]; +} htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15: 8] :- reserved0 + * BIT [ 31: 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__tid_num; + struct { + A_UINT32 tid_num: 8, + reserved0: 8, + sw_peer_id: 16; + }; + }; + A_UINT32 ba_seq_no; + A_UINT32 ba_bitmap[HTT_BA_256_BIT_MAP_SIZE_DWORDS]; +} htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv; + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_M 0x0000ffff +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_M 0x000001ff +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_M 0x01fffe00 +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S 9 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_M 0xfe000000 +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S 25 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_M 0x0000ffff +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S 0 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S)); \ + } while (0) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_M 0xffff0000 +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S 16 + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(_var) \ + (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_M) >> \ + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S) + +#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S)); \ + } while (0) + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + A_UINT32 ppdu_id; + + /* BIT [ 15 : 0] :- sw_peer_id + * BIT [ 31 : 16] :- reserved0 + */ + union { + A_UINT32 rsvd_sw_peer_id; + struct { + A_UINT32 sw_peer_id: 16, + reserved0: 16; + }; + }; + + /* BIT [ 8 : 0] :- num_mpdu + * BIT [ 24 : 9] :- num_msdu + * BIT [ 31 : 25] :- tid_num + */ + union { + A_UINT32 tid_num__num_msdu__num_mpdu; + struct { + A_UINT32 num_mpdu: 9, + num_msdu: 16, + tid_num: 7; + }; + }; + + /* BIT [ 15 : 0] :- current_seq + * BIT [ 31 : 16] :- start_seq + */ + union { + A_UINT32 start_seq__current_seq; + struct { + A_UINT32 current_seq: 16, + start_seq: 16; + }; + }; + + A_UINT32 success_bytes; +} htt_ppdu_stats_user_compltn_ack_ba_status_tlv; + +/* FLOW_TYPE defined in HTT_TX_FLOW_TYPE */ +#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_M 0x000000ff +#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S 0 + +#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_M 0x0001ff00 +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S 8 + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S)); \ + } while (0) + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_M 0x01fe0000 +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S 17 + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S)); \ + } while (0) + +#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_M 0x000000ff +#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S 0 + +#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_TID_NUM, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S)); \ + } while (0) + +#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_M 0x0000ff00 +#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S 8 + +#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S)); \ + } while (0) + +#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_M 0xffff0000 +#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S 16 + +#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(_var) \ + (((_var) & HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_M) >> \ + HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S) + +#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID, _val); \ + ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S)); \ + } while (0) + +enum HTT_TX_FLOW_TYPE { + HTT_TX_TID_FRAMEQ, + HTT_TX_TQM_MSDUQ, + HTT_TQM_MPDUQ, +}; + +enum HTT_FLUSH_STATUS_DROP_REASON { + HTT_FLUSH_PEER_DELETE, + HTT_FLUSH_TID_DELETE, + HTT_FLUSH_TTL_EXCEEDED, + HTT_FLUSH_EXCESS_RETRIES, + HTT_FLUSH_REINJECT, +}; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + A_UINT32 drop_reason; + /* BIT [ 7 : 0] :- flow_type + * BIT [ 16: 8] :- num_mpdu + * BIT [ 30: 17] :- num_msdu + * BIT [ 31: 31] :- reserved0 + */ + union { + A_UINT32 num_msdu__num_mpdu__flow_type; + struct { + A_UINT32 flow_type: 8, + num_mpdu: 9, + num_msdu: 14, + reserved0: 1; + }; + }; + + /* BIT [ 7 : 0] :- tid_num + * BIT [ 15 : 8] :- queue_type + * BIT [ 31 : 16] :- sw_peer_id + */ + union { + A_UINT32 sw_peer_id__queue_type__tid_num; + struct { + A_UINT32 tid_num: 8, + queue_type: 8, + sw_peer_id: 16; + }; + }; +} htt_ppdu_stats_flush_tlv; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* Future purpose */ + A_UINT32 reserved1; /* set to 0x0 */ + A_UINT32 reserved2; /* set to 0x0 */ + A_UINT32 reserved3; /* set to 0x0 */ + + /* mgmt/ctrl frame payload + * The size of payload (in bytes) can be derived from the length in + * tlv parametes, minus the 12 bytes of the above fields. + */ + A_UINT32 payload[1]; +} htt_ppdu_stats_tx_mgmtctrl_payload_tlv; + + +#endif //__HTT_PPDU_STATS_H__ diff --git a/drivers/staging/fw-api/fw/htt_stats.h b/drivers/staging/fw-api/fw/htt_stats.h index a68df97ad134968f240e8c11adfd51482c42210c..d343aa0242f667762e7828afed6aa537005c8965 100644 --- a/drivers/staging/fw-api/fw/htt_stats.h +++ b/drivers/staging/fw-api/fw/htt_stats.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -97,7 +97,9 @@ enum htt_dbg_ext_stats_type { /* HTT_DBG_EXT_STATS_TQM_CMDQ * PARAMS: - * - No Params + * - config_param0: + * [Bit15: Bit0 ] cmdq id :if 0xFFFF print all cmdq's + * [Bit31: Bit16] reserved * RESP MSG: * - htt_tx_tqm_cmdq_stats_t */ @@ -139,7 +141,7 @@ enum htt_dbg_ext_stats_type { * 1 bit htt_peer_details_tlv * 2 bit htt_tx_peer_rate_stats_tlv * 3 bit htt_rx_peer_rate_stats_tlv - * 4 bit htt_tx_tid_stats_tlv + * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv * 5 bit htt_rx_tid_stats_tlv * 6 bit htt_msdu_flow_stats_tlv * - config_param2: [Bit31 : Bit0] mac_addr31to0 @@ -168,7 +170,9 @@ enum htt_dbg_ext_stats_type { /* HTT_DBG_EXT_STATS_RING_IF_INFO * PARAMS: - * - No Params + * - config_param0: + * [Bit15: Bit0 ] ring id :if 0xFFFF print all rings + * [Bit31: Bit16] reserved * RESP MSG: * - htt_ring_if_stats_t */ @@ -176,6 +180,9 @@ enum htt_dbg_ext_stats_type { /* HTT_DBG_EXT_STATS_SRNG_INFO * PARAMS: + * - config_param0: + * [Bit15: Bit0 ] ring id :if 0xFFFF print all rings + * [Bit31: Bit16] reserved * - No Params * RESP MSG: * - htt_sring_stats_t @@ -209,6 +216,50 @@ enum htt_dbg_ext_stats_type { */ HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST = 18, + /* HTT_DBG_EXT_STATS_PDEV_CCA_STATS + * PARAMS: + * - config_param0: + * [Bit0] - 1 sec interval histogram + * [Bit1] - 100ms interval histogram + * [Bit3] - Cumulative CCA stats + * RESP MSG: + * - htt_pdev_cca_stats_t + */ + HTT_DBG_EXT_STATS_PDEV_CCA_STATS = 19, + + /* HTT_DBG_EXT_STATS_TWT_SESSIONS + * PARAMS: + * - config_param0: + * No params + * RESP MSG: + * - htt_pdev_twt_sessions_stats_t + */ + HTT_DBG_EXT_STATS_TWT_SESSIONS = 20, + + /* HTT_DBG_EXT_STATS_REO_CNTS + * PARAMS: + * - config_param0: + * No params + * RESP MSG: + * - htt_soc_reo_resource_stats_t + */ + HTT_DBG_EXT_STATS_REO_RESOURCE_STATS = 21, + + /* HTT_DBG_EXT_STATS_TX_SOUNDING_INFO + * PARAMS: + * - config_param0: + * [Bit0] vdev_id_set:1 + * set to 1 if vdev_id is set and vdev stats are requested + * [Bit8 : Bit1] vdev_id:8 + * note:0xFF to get all active vdevs based on pdev_mask. + * [Bit31 : Bit9] rsvd:22 + + * RESP MSG: + * - htt_tx_sounding_stats_t + */ + HTT_DBG_EXT_STATS_TX_SOUNDING_INFO = 22, + + /* keep this last */ HTT_DBG_NUM_EXT_STATS = 256, }; @@ -282,6 +333,19 @@ typedef enum { HTT_STATS_TX_DE_COMPL_STATS_TAG = 65, /* htt_tx_de_compl_stats_tlv */ HTT_STATS_WHAL_TX_TAG = 66, /* htt_hw_stats_whal_tx_tlv */ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, /* htt_tx_pdev_stats_sifs_hist_tlv_v */ + HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68, /* htt_rx_pdev_fw_stats_phy_err_tlv */ + HTT_STATS_TX_TID_DETAILS_V1_TAG = 69, /* htt_tx_tid_stats_v1_tlv */ + HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70, /* htt_pdev_cca_stats_hist_tlv (for 1 sec interval stats) */ + HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71, /* htt_pdev_cca_stats_hist_tlv (for 100 msec interval stats) */ + HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, /* htt_pdev_stats_cca_stats_tlv */ + HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, /* htt_pdev_stats_cca_counters_tlv */ + HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, /* htt_tx_pdev_mpdu_stats_tlv */ + HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75, /* htt_pdev_stats_twt_sessions_tlv */ + HTT_STATS_PDEV_TWT_SESSION_TAG = 76, /* htt_pdev_stats_twt_session_tlv */ + HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77, /* htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v */ + HTT_STATS_RX_REFILL_REO_ERR_TAG = 78, /* htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v */ + HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79, /* htt_rx_reo_debug_stats_tlv_v */ + HTT_STATS_TX_SOUNDING_STATS_TAG = 80, /* htt_tx_sounding_stats_tlv */ HTT_STATS_MAX_TAG, } htt_tlv_tag_t; @@ -333,10 +397,11 @@ typedef enum { HTT_TX_PDEV_MAX_URRN_STATS = 3, } htt_tx_pdev_underrun_enum; -#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71 -#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9 -#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18 -#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4 +#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71 +#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9 +#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10 +#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18 +#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4 #define HTT_RX_STATS_REFILL_MAX_RING 4 #define HTT_RX_STATS_RXDMA_MAX_ERR 16 @@ -454,6 +519,22 @@ typedef struct { A_UINT32 num_total_ppdus_tried_ota; /* Number of data PPDUs tried over the air (OTA) */ A_UINT32 num_data_ppdus_tried_ota; + /* Num Local control/mgmt frames (MSDUs) queued */ + A_UINT32 local_ctrl_mgmt_enqued; + /* local_ctrl_mgmt_freed: + * Num Local control/mgmt frames (MSDUs) done + * It includes all local ctrl/mgmt completions + * (acked, no ack, flush, TTL, etc) + */ + A_UINT32 local_ctrl_mgmt_freed; + /* Num Local data frames (MSDUs) queued */ + A_UINT32 local_data_enqued; + /* local_data_freed: + * Num Local data frames (MSDUs) done + * It includes all local data completions + * (acked, no ack, flush, TTL, etc) + */ + A_UINT32 local_data_freed; } htt_tx_pdev_stats_cmn_tlv; #define HTT_TX_PDEV_STATS_URRN_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems)) @@ -511,6 +592,7 @@ typedef struct _htt_tx_pdev_stats { htt_tx_pdev_stats_sifs_tlv_v sifs_tlv; htt_tx_pdev_stats_flush_tlv_v flush_tlv; htt_tx_pdev_stats_phy_err_tlv_v phy_err_tlv; + htt_tx_pdev_stats_sifs_hist_tlv_v sifs_hist_tlv; } htt_tx_pdev_stats_t; /* == SOC ERROR STATS == */ @@ -658,6 +740,13 @@ typedef struct _htt_msdu_flow_stats_tlv { * BIT [31 : 21] :- reserved */ A_UINT32 tx_flow_no__tid_num__drop_rule; + A_UINT32 last_cycle_enqueue_count; + A_UINT32 last_cycle_dequeue_count; + A_UINT32 last_cycle_drop_count; + /* BIT [15 : 0] :- current_drop_th + * BIT [31 : 16] :- reserved + */ + A_UINT32 current_drop_th; } htt_msdu_flow_stats_tlv; #define MAX_HTT_TID_NAME 8 @@ -749,6 +838,43 @@ typedef struct _htt_tx_tid_stats_tlv { A_UINT32 tid_tx_airtime; } htt_tx_tid_stats_tlv; +/* Tidq stats */ +typedef struct _htt_tx_tid_stats_v1_tlv { + htt_tlv_hdr_t tlv_hdr; + /* Stored as little endian */ + A_UINT8 tid_name[MAX_HTT_TID_NAME]; + /* BIT [15 : 0] :- sw_peer_id + * BIT [31 : 16] :- tid_num + */ + A_UINT32 sw_peer_id__tid_num; + /* BIT [ 7 : 0] :- num_sched_pending + * BIT [15 : 8] :- num_ppdu_in_hwq + * BIT [31 : 16] :- reserved + */ + A_UINT32 num_sched_pending__num_ppdu_in_hwq; + A_UINT32 tid_flags; + /* Max qdepth in bytes reached by this tid*/ + A_UINT32 max_qdepth_bytes; + /* number of msdus qdepth reached max */ + A_UINT32 max_qdepth_n_msdus; + /* Made reserved this field */ + A_UINT32 rsvd; + + A_UINT32 qdepth_bytes; + A_UINT32 qdepth_num_msdu; + A_UINT32 qdepth_num_mpdu; + A_UINT32 last_scheduled_tsmp; + A_UINT32 pause_module_id; + A_UINT32 block_module_id; + /* tid tx airtime in sec */ + A_UINT32 tid_tx_airtime; + A_UINT32 allow_n_flags; + /* BIT [15 : 0] :- sendn_frms_allowed + * BIT [31 : 16] :- reserved + */ + A_UINT32 sendn_frms_allowed; +} htt_tx_tid_stats_v1_tlv; + #define HTT_RX_TID_STATS_SW_PEER_ID_M 0x0000ffff #define HTT_RX_TID_STATS_SW_PEER_ID_S 0 #define HTT_RX_TID_STATS_TID_NUM_M 0xffff0000 @@ -823,6 +949,16 @@ typedef struct { A_UINT32 peer_rx_airtime; /* Peer current rssi in dBm */ A_INT32 rssi; + /* Total enqueued, dequeued and dropped msdu's for peer */ + A_UINT32 peer_enqueued_count_low; + A_UINT32 peer_enqueued_count_high; + A_UINT32 peer_dequeued_count_low; + A_UINT32 peer_dequeued_count_high; + A_UINT32 peer_dropped_count_low; + A_UINT32 peer_dropped_count_high; + /* Total ppdu transmitted bytes for peer: includes MAC header overhead */ + A_UINT32 ppdu_transmitted_bytes_low; + A_UINT32 ppdu_transmitted_bytes_high; } htt_peer_stats_cmn_tlv; typedef struct { @@ -962,7 +1098,7 @@ typedef enum { #define HTT_DBG_EXT_STATS_PEER_REQ_MODE_SET(_var, _val) \ do { \ - ((_var) |= ((_val) << HTT_DBG_EXT_STATS_PEER_REQ_MODE_M)); \ + ((_var) |= ((_val) << HTT_DBG_EXT_STATS_PEER_REQ_MODE_S)); \ } while (0) #define HTT_DBG_EXT_STATS_PEER_INFO_SW_PEER_ID_GET(_var) \ @@ -980,9 +1116,10 @@ typedef enum { * - HTT_STATS_PEER_DETAILS_TAG * - HTT_STATS_PEER_TX_RATE_STATS_TAG * - HTT_STATS_PEER_RX_RATE_STATS_TAG - * - HTT_STATS_TX_TID_DETAILS_TAG (multiple) + * - HTT_STATS_TX_TID_DETAILS_TAG (multiple) (deprecated, so 0 elements in updated systems) * - HTT_STATS_RX_TID_DETAILS_TAG (multiple) * - HTT_STATS_PEER_MSDU_FLOWQ_TAG (multiple) + * - HTT_STATS_TX_TID_DETAILS_V1_TAG (multiple) */ /* NOTE: * This structure is for documentation, and cannot be safely used directly. @@ -997,6 +1134,7 @@ typedef struct _htt_peer_stats { htt_tx_tid_stats_tlv tx_tid_stats[1]; htt_rx_tid_stats_tlv rx_tid_stats[1]; htt_msdu_flow_stats_tlv msdu_flowq[1]; + htt_tx_tid_stats_v1_tlv tx_tid_stats_v1[1]; } htt_peer_stats_t; /* =========== ACTIVE PEER LIST ========== */ @@ -1327,6 +1465,10 @@ typedef struct { /* == TX MU STATS == */ +#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4 +#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8 +#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74 + typedef struct { htt_tlv_hdr_t tlv_hdr; /* mu-mimo sw sched cmd stats */ @@ -1334,11 +1476,24 @@ typedef struct { A_UINT32 mu_mimo_sch_failed; /* MU PPDU stats per hwQ */ A_UINT32 mu_mimo_ppdu_posted; + /* + * Counts the number of users in each transmission of + * the given TX mode. + * + * Index is the number of users - 1. + */ + A_UINT32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS]; + A_UINT32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS]; + A_UINT32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; } htt_tx_pdev_mu_mimo_sch_stats_tlv; typedef struct { htt_tlv_hdr_t tlv_hdr; /* mu-mimo mpdu level stats */ + /* + * This first block of stats is limited to 11ac + * MU-MIMO transmission. + */ A_UINT32 mu_mimo_mpdus_queued_usr; A_UINT32 mu_mimo_mpdus_tried_usr; A_UINT32 mu_mimo_mpdus_failed_usr; @@ -1346,12 +1501,46 @@ typedef struct { A_UINT32 mu_mimo_err_no_ba_usr; A_UINT32 mu_mimo_mpdu_underrun_usr; A_UINT32 mu_mimo_ampdu_underrun_usr; + + A_UINT32 ax_mu_mimo_mpdus_queued_usr; + A_UINT32 ax_mu_mimo_mpdus_tried_usr; + A_UINT32 ax_mu_mimo_mpdus_failed_usr; + A_UINT32 ax_mu_mimo_mpdus_requeued_usr; + A_UINT32 ax_mu_mimo_err_no_ba_usr; + A_UINT32 ax_mu_mimo_mpdu_underrun_usr; + A_UINT32 ax_mu_mimo_ampdu_underrun_usr; + + A_UINT32 ax_ofdma_mpdus_queued_usr; + A_UINT32 ax_ofdma_mpdus_tried_usr; + A_UINT32 ax_ofdma_mpdus_failed_usr; + A_UINT32 ax_ofdma_mpdus_requeued_usr; + A_UINT32 ax_ofdma_err_no_ba_usr; + A_UINT32 ax_ofdma_mpdu_underrun_usr; + A_UINT32 ax_ofdma_ampdu_underrun_usr; } htt_tx_pdev_mu_mimo_mpdu_stats_tlv; +#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1 /* SCHED_TX_MODE_MU_MIMO_AC */ +#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2 /* SCHED_TX_MODE_MU_MIMO_AX */ +#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3 /* SCHED_TX_MODE_MU_OFDMA_AX */ + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* mpdu level stats */ + A_UINT32 mpdus_queued_usr; + A_UINT32 mpdus_tried_usr; + A_UINT32 mpdus_failed_usr; + A_UINT32 mpdus_requeued_usr; + A_UINT32 err_no_ba_usr; + A_UINT32 mpdu_underrun_usr; + A_UINT32 ampdu_underrun_usr; + A_UINT32 user_index; + A_UINT32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */ +} htt_tx_pdev_mpdu_stats_tlv; + /* STATS_TYPE : HTT_DBG_EXT_STATS_PDEV_TX_MU * TLV_TAGS: * - HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG (multiple) - * - HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG (multiple) + * - HTT_STATS_TX_PDEV_MPDU_STATS_TAG (multiple) */ /* NOTE: * This structure is for documentation, and cannot be safely used directly. @@ -1359,7 +1548,11 @@ typedef struct { */ typedef struct { htt_tx_pdev_mu_mimo_sch_stats_tlv mu_mimo_sch_stats_tlv[1]; /* WAL_TX_STATS_MAX_GROUP_SIZE */ - htt_tx_pdev_mu_mimo_mpdu_stats_tlv mu_mimo_mpdu_stats_tlv[1]; /* WAL_TX_STATS_MAX_NUM_USERS */ + /* + * Note that though mu_mimo_mpdu_stats_tlv is named MU-MIMO, + * it can also hold MU-OFDMA stats. + */ + htt_tx_pdev_mpdu_stats_tlv mu_mimo_mpdu_stats_tlv[1]; /* WAL_TX_STATS_MAX_NUM_USERS */ } htt_tx_pdev_mu_mimo_stats_t; /* == TX SCHED STATS == */ @@ -2337,6 +2530,12 @@ typedef struct { #define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4 #define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 #define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT +#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4 +#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 +#define HTT_TX_PDEV_STATS_NUM_LTF 4 +#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \ + (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \ + HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) #define HTT_TX_PDEV_RATE_STATS_MAC_ID_M 0x000000ff #define HTT_TX_PDEV_RATE_STATS_MAC_ID_S 0 @@ -2366,8 +2565,11 @@ typedef struct { A_UINT32 ack_rssi; A_UINT32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + + /* tx_xx_mcs: currently unused */ A_UINT32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; A_UINT32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_UINT32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; /* element 0,1, ...7 -> NSS 1,2, ...8 */ A_UINT32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */ A_UINT32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; @@ -2378,9 +2580,50 @@ typedef struct { /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */ A_UINT32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS]; - /* Number of CTS-acknowledged RTS packets */ A_UINT32 rts_success; + + /* + * Counters for legacy 11a and 11b transmissions. + * + * The index corresponds to: + * + * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps + * + * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps, + * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps + */ + A_UINT32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS]; + A_UINT32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; + + A_UINT32 ac_mu_mimo_tx_ldpc; + A_UINT32 ax_mu_mimo_tx_ldpc; + A_UINT32 ofdma_tx_ldpc; + + /* + * Counters for 11ax HE LTF selection during TX. + * + * The index corresponds to: + * + * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF + */ + A_UINT32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF]; + + A_UINT32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_UINT32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_UINT32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + + A_UINT32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + A_UINT32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + A_UINT32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + + A_UINT32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + A_UINT32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + A_UINT32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + + A_UINT32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_UINT32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_UINT32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; } htt_tx_pdev_rate_stats_tlv; /* STATS_TYPE : HTT_DBG_EXT_STATS_PDEV_TX_RATE @@ -2445,6 +2688,7 @@ typedef struct { /* Counters to track number of rx packets in each GI in each mcs (0-11) */ A_UINT32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + A_INT32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */ } htt_rx_pdev_rate_stats_tlv; @@ -2475,6 +2719,21 @@ typedef struct { A_UINT32 ofld_remote_data_buf_recycle_cnt; /* Num remote free buf given to offload */ A_UINT32 ofld_remote_free_buf_indication_cnt; + + /* Num unicast packets from local path indicated to host */ + A_UINT32 ofld_buf_to_host_data_msdu_uc; + /* Num unicast packets from REO indicated to host */ + A_UINT32 reo_fw_ring_to_host_data_msdu_uc; + + /* Num Packets received from WBM SW1 ring */ + A_UINT32 wbm_sw_ring_reap; + /* Num packets from WBM forwarded from fw to host via WBM */ + A_UINT32 wbm_forward_to_host_cnt; + /* Num packets from WBM recycled to target refill ring */ + A_UINT32 wbm_target_recycle_cnt; + + /* Total Num of recycled to refill ring, including packets from WBM and REO */ + A_UINT32 target_refill_ring_recycle_cnt; } htt_rx_soc_fw_stats_tlv; #define HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems)) @@ -2496,6 +2755,89 @@ typedef struct { A_UINT32 refill_ring_num_refill[1]; /* HTT_RX_STATS_REFILL_MAX_RING */ } htt_rx_soc_fw_refill_ring_num_refill_tlv_v; +/* RXDMA error code from WBM released packets */ +typedef enum { + HTT_RX_RXDMA_OVERFLOW_ERR = 0, + HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1, + HTT_RX_RXDMA_FCS_ERR = 2, + HTT_RX_RXDMA_DECRYPT_ERR = 3, + HTT_RX_RXDMA_TKIP_MIC_ERR = 4, + HTT_RX_RXDMA_UNECRYPTED_ERR = 5, + HTT_RX_RXDMA_MSDU_LEN_ERR = 6, + HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7, + HTT_RX_RXDMA_WIFI_PARSE_ERR = 8, + HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9, + HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10, + HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11, + HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12, + HTT_RX_RXDMA_FLUSH_REQUEST = 13, + HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14, + HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15, + + /* + * This MAX_ERR_CODE should not be used in any host/target messages, + * so that even though it is defined within a host/target interface + * definition header file, it isn't actually part of the host/target + * interface, and thus can be modified. + */ + HTT_RX_RXDMA_MAX_ERR_CODE +} htt_rx_rxdma_error_code_enum; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* NOTE: + * The mapping of RXDMA error types to rxdma_err array elements is HW dependent. + * It is expected but not required that the target will provide a rxdma_err element + * for each of the htt_rx_rxdma_error_code_enum values, up to but not including + * MAX_ERR_CODE. The host should ignore any array elements whose + * indices are >= the MAX_ERR_CODE value the host was compiled with. + */ + A_UINT32 rxdma_err[1]; /* HTT_RX_RXDMA_MAX_ERR_CODE */ +} htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v; + +/* REO error code from WBM released packets */ +typedef enum { + HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0, + HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1, + HTT_RX_AMPDU_IN_NON_BA = 2, + HTT_RX_NON_BA_DUPLICATE = 3, + HTT_RX_BA_DUPLICATE = 4, + HTT_RX_REGULAR_FRAME_2K_JUMP = 5, + HTT_RX_BAR_FRAME_2K_JUMP = 6, + HTT_RX_REGULAR_FRAME_OOR = 7, + HTT_RX_BAR_FRAME_OOR = 8, + HTT_RX_BAR_FRAME_NO_BA_SESSION = 9, + HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10, + HTT_RX_PN_CHECK_FAILED = 11, + HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12, + HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13, + HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14, + HTT_RX_REO_ERR_CODE_RVSD = 15, + + /* + * This MAX_ERR_CODE should not be used in any host/target messages, + * so that even though it is defined within a host/target interface + * definition header file, it isn't actually part of the host/target + * interface, and thus can be modified. + */ + HTT_RX_REO_MAX_ERR_CODE +} htt_rx_reo_error_code_enum; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* NOTE: + * The mapping of REO error types to reo_err array elements is HW dependent. + * It is expected but not required that the target will provide a rxdma_err element + * for each of the htt_rx_reo_error_code_enum values, up to but not including + * MAX_ERR_CODE. The host should ignore any array elements whose + * indices are >= the MAX_ERR_CODE value the host was compiled with. + */ + A_UINT32 reo_err[1]; /* HTT_RX_REO_MAX_ERR_CODE */ +} htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v; /* NOTE: * This structure is for documentation, and cannot be safely used directly. @@ -2505,6 +2847,8 @@ typedef struct { htt_rx_soc_fw_stats_tlv fw_tlv; htt_rx_soc_fw_refill_ring_empty_tlv_v fw_refill_ring_empty_tlv; htt_rx_soc_fw_refill_ring_num_refill_tlv_v fw_refill_ring_num_refill_tlv; + htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v fw_refill_ring_num_rxdma_err_tlv; + htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v fw_refill_ring_num_reo_err_tlv; } htt_rx_soc_stats_t; /* == RX PDEV STATS == */ @@ -2622,8 +2966,72 @@ typedef struct { A_UINT32 rx_ring_restore_cnt; /* Num rx flush issued */ A_UINT32 rx_flush_cnt; + /* Num rx recovery */ + A_UINT32 rx_recovery_reset_cnt; } htt_rx_pdev_fw_stats_tlv; +#define HTT_STATS_PHY_ERR_MAX 43 + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + A_UINT32 mac_id__word; + /* Num of phy err */ + A_UINT32 total_phy_err_cnt; + /* Counts of different types of phy errs + * The mapping of PHY error types to phy_err array elements is HW dependent. + * The only currently-supported mapping is shown below: + * + * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV + * 1 phyrx_err_synth_off + * 2 phyrx_err_ofdma_timing + * 3 phyrx_err_ofdma_signal_parity + * 4 phyrx_err_ofdma_rate_illegal + * 5 phyrx_err_ofdma_length_illegal + * 6 phyrx_err_ofdma_restart + * 7 phyrx_err_ofdma_service + * 8 phyrx_err_ppdu_ofdma_power_drop + * 9 phyrx_err_cck_blokker + * 10 phyrx_err_cck_timing + * 11 phyrx_err_cck_header_crc + * 12 phyrx_err_cck_rate_illegal + * 13 phyrx_err_cck_length_illegal + * 14 phyrx_err_cck_restart + * 15 phyrx_err_cck_service + * 16 phyrx_err_cck_power_drop + * 17 phyrx_err_ht_crc_err + * 18 phyrx_err_ht_length_illegal + * 19 phyrx_err_ht_rate_illegal + * 20 phyrx_err_ht_zlf + * 21 phyrx_err_false_radar_ext + * 22 phyrx_err_green_field + * 23 phyrx_err_bw_gt_dyn_bw + * 24 phyrx_err_leg_ht_mismatch + * 25 phyrx_err_vht_crc_error + * 26 phyrx_err_vht_siga_unsupported + * 27 phyrx_err_vht_lsig_len_invalid + * 28 phyrx_err_vht_ndp_or_zlf + * 29 phyrx_err_vht_nsym_lt_zero + * 30 phyrx_err_vht_rx_extra_symbol_mismatch + * 31 phyrx_err_vht_rx_skip_group_id0 + * 32 phyrx_err_vht_rx_skip_group_id1to62 + * 33 phyrx_err_vht_rx_skip_group_id63 + * 34 phyrx_err_ofdm_ldpc_decoder_disabled + * 35 phyrx_err_defer_nap + * 36 phyrx_err_fdomain_timeout + * 37 phyrx_err_lsig_rel_check + * 38 phyrx_err_bt_collision + * 39 phyrx_err_unsupported_mu_feedback + * 40 phyrx_err_ppdu_tx_interrupt_rx + * 41 phyrx_err_unsupported_cbf + * 42 phyrx_err_other + */ + A_UINT32 phy_err[HTT_STATS_PHY_ERR_MAX]; +} htt_rx_pdev_fw_stats_phy_err_tlv; + #define HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems)) /* NOTE: Variable length TLV, use length spec to infer array size */ @@ -2660,6 +3068,323 @@ typedef struct { htt_rx_pdev_fw_stats_tlv fw_stats_tlv; htt_rx_pdev_fw_ring_mpdu_err_tlv_v fw_ring_mpdu_err_tlv; htt_rx_pdev_fw_mpdu_drop_tlv_v fw_ring_mpdu_drop; + htt_rx_pdev_fw_stats_phy_err_tlv fw_stats_phy_err_tlv; } htt_rx_pdev_stats_t; +#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1) +#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2) +#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4) +#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8) +#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10) +#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20) +#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40) +#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80) + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* Below values are obtained from the HW Cycles counter registers */ + A_UINT32 tx_frame_usec; + A_UINT32 rx_frame_usec; + A_UINT32 rx_clear_usec; + A_UINT32 my_rx_frame_usec; + A_UINT32 usec_cnt; + A_UINT32 med_rx_idle_usec; + A_UINT32 med_tx_idle_global_usec; + A_UINT32 cca_obss_usec; +} htt_pdev_stats_cca_counters_tlv; + +/* NOTE: THIS htt_pdev_cca_stats_hist_tlv STRUCTURE IS DEPRECATED, + * due to lack of support in some host stats infrastructures for + * TLVs nested within TLVs. + */ +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* The channel number on which these stats were collected */ + A_UINT32 chan_num; + + /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/ + A_UINT32 num_records; + + /* + * Bit map of valid CCA counters + * Bit0 - tx_frame_usec + * Bit1 - rx_frame_usec + * Bit2 - rx_clear_usec + * Bit3 - my_rx_frame_usec + * bit4 - usec_cnt + * Bit5 - med_rx_idle_usec + * Bit6 - med_tx_idle_global_usec + * Bit7 - cca_obss_usec + * + * See HTT_PDEV_CCA_STATS_xxx_INFO_PRESENT defs + */ + A_UINT32 valid_cca_counters_bitmap; + + /* Indicates the stats collection interval + * Valid Values: + * 100 - For the 100ms interval CCA stats histogram + * 1000 - For 1sec interval CCA histogram + * 0xFFFFFFFF - For Cumulative CCA Stats + */ + A_UINT32 collection_interval; + + /** + * This will be followed by an array which contains the CCA stats + * collected in the last N intervals, + * if the indication is for last N intervals CCA stats. + * Then the pdev_cca_stats[0] element contains the oldest CCA stats + * and pdev_cca_stats[N-1] will have the most recent CCA stats. + */ + htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1]; +} htt_pdev_cca_stats_hist_tlv; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + /* The channel number on which these stats were collected */ + A_UINT32 chan_num; + + /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/ + A_UINT32 num_records; + + /* + * Bit map of valid CCA counters + * Bit0 - tx_frame_usec + * Bit1 - rx_frame_usec + * Bit2 - rx_clear_usec + * Bit3 - my_rx_frame_usec + * bit4 - usec_cnt + * Bit5 - med_rx_idle_usec + * Bit6 - med_tx_idle_global_usec + * Bit7 - cca_obss_usec + * + * See HTT_PDEV_CCA_STATS_xxx_INFO_PRESENT defs + */ + A_UINT32 valid_cca_counters_bitmap; + + /* Indicates the stats collection interval + * Valid Values: + * 100 - For the 100ms interval CCA stats histogram + * 1000 - For 1sec interval CCA histogram + * 0xFFFFFFFF - For Cumulative CCA Stats + */ + A_UINT32 collection_interval; + + /** + * This will be followed by an array which contains the CCA stats + * collected in the last N intervals, + * if the indication is for last N intervals CCA stats. + * Then the pdev_cca_stats[0] element contains the oldest CCA stats + * and pdev_cca_stats[N-1] will have the most recent CCA stats. + * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1]; + */ +} htt_pdev_cca_stats_hist_v1_tlv; + +#define HTT_TWT_SESSION_FLAG_FLOW_ID_M 0x0000ffff +#define HTT_TWT_SESSION_FLAG_FLOW_ID_S 0 + +#define HTT_TWT_SESSION_FLAG_BCAST_TWT_M 0x00010000 +#define HTT_TWT_SESSION_FLAG_BCAST_TWT_S 16 + +#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_M 0x00020000 +#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S 17 + +#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_M 0x00040000 +#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S 18 + +#define HTT_TWT_SESSION_FLAG_FLOW_ID_GET(_var) \ + (((_var) & HTT_TWT_SESSION_FLAG_FLOW_ID_M) >> \ + HTT_TWT_SESSION_FLAG_FLOW_ID_S) + +#define HTT_TWT_SESSION_FLAG_FLOW_ID_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_FLOW_ID, _val); \ + ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_FLOW_ID_S)); \ + } while (0) + +#define HTT_TWT_SESSION_FLAG_BCAST_TWT_GET(_var) \ + (((_var) & HTT_TWT_SESSION_FLAG_BCAST_TWT_M) >> \ + HTT_TWT_SESSION_FLAG_BCAST_TWT_S) + +#define HTT_TWT_SESSION_FLAG_BCAST_TWT_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_BCAST_TWT, _val); \ + ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_BCAST_TWT_S)); \ + } while (0) + +#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_GET(_var) \ + (((_var) & HTT_TWT_SESSION_FLAG_TRIGGER_TWT_M) >> \ + HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S) + +#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_TRIGGER_TWT, _val); \ + ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S)); \ + } while (0) + +#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_GET(_var) \ + (((_var) & HTT_TWT_SESSION_FLAG_ANNOUN_TWT_M) >> \ + HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S) + +#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_SET(_var, _val) \ + do { \ + HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_ANNOUN_TWT, _val); \ + ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S)); \ + } while (0) + +#define TWT_DIALOG_ID_UNAVAILABLE 0xFFFFFFFF + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + A_UINT32 vdev_id; + htt_mac_addr peer_mac; + A_UINT32 flow_id_flags; + A_UINT32 dialog_id; /* TWT_DIALOG_ID_UNAVAILABLE is used when TWT session is not initiated by host */ + A_UINT32 wake_dura_us; + A_UINT32 wake_intvl_us; + A_UINT32 sp_offset_us; +} htt_pdev_stats_twt_session_tlv; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + + A_UINT32 pdev_id; + A_UINT32 num_sessions; + + htt_pdev_stats_twt_session_tlv twt_session[1]; +} htt_pdev_stats_twt_sessions_tlv; + +/* STATS_TYPE: HTT_DBG_EXT_STATS_TWT_SESSIONS + * TLV_TAGS: + * - HTT_STATS_PDEV_TWT_SESSIONS_TAG + * - HTT_STATS_PDEV_TWT_SESSION_TAG + */ +/* NOTE: + * This structure is for documentation, and cannot be safely used directly. + * Instead, use the constituent TLV structures to fill/parse. + */ +typedef struct { + htt_pdev_stats_twt_sessions_tlv twt_sessions[1]; +} htt_pdev_twt_sessions_stats_t; + +typedef enum { + /* Global link descriptor queued in REO */ + HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0, + HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1, + HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2, + /*Number of queue descriptors of this aging group */ + HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3, + HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4, + HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5, + HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6, + /* Total number of MSDUs buffered in AC */ + HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7, + HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8, + HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9, + HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10, + + HTT_RX_REO_RESOURCE_STATS_MAX = 16 +} htt_rx_reo_resource_sample_id_enum; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */ + /* htt_rx_reo_debug_sample_id_enum */ + A_UINT32 sample_id; + /* Max value of all samples */ + A_UINT32 total_max; + /* Average value of total samples */ + A_UINT32 total_avg; + /* Num of samples including both zeros and non zeros ones*/ + A_UINT32 total_sample; + /* Average value of all non zeros samples */ + A_UINT32 non_zeros_avg; + /* Num of non zeros samples */ + A_UINT32 non_zeros_sample; + /* Max value of last N non zero samples (N = last_non_zeros_sample) */ + A_UINT32 last_non_zeros_max; + /* Min value of last N non zero samples (N = last_non_zeros_sample) */ + A_UINT32 last_non_zeros_min; + /* Average value of last N non zero samples (N = last_non_zeros_sample) */ + A_UINT32 last_non_zeros_avg; + /* Num of last non zero samples */ + A_UINT32 last_non_zeros_sample; +} htt_rx_reo_resource_stats_tlv_v; + +/* STATS_TYPE: HTT_DBG_EXT_STATS_REO_RESOURCE_STATS + * TLV_TAGS: + * - HTT_STATS_RX_REO_RESOURCE_STATS_TAG + */ +/* NOTE: + * This structure is for documentation, and cannot be safely used directly. + * Instead, use the constituent TLV structures to fill/parse. + */ +typedef struct { + htt_rx_reo_resource_stats_tlv_v reo_resource_stats; +} htt_soc_reo_resource_stats_t; + +/* == TX SOUNDING STATS == */ + +/* config_param0 */ + +#define HTT_DBG_EXT_STATS_SET_VDEV_MASK(_var) ((_var << 1) | 0x1) +#define HTT_DBG_EXT_STATS_GET_VDEV_ID_FROM_VDEV_MASK(_var) ((_var >> 1) & 0xFF) +#define HTT_DBG_EXT_STATS_IS_VDEV_ID_SET(_var) ((_var) & 0x1) + +typedef enum { + /* Implicit beamforming stats */ + HTT_IMPLICIT_TXBF_STEER_STATS = 0, + /* Single user short inter frame sequence steer stats */ + HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1, + /* Single user random back off steer stats */ + HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2, + /* Multi user short inter frame sequence steer stats */ + HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3, + /* Multi user random back off steer stats */ + HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4, + /* For backward compatability new modes cannot be added */ + HTT_TXBF_MAX_NUM_OF_MODES = 5 +} htt_txbf_sound_steer_modes; + +typedef enum { + HTT_TX_AC_SOUNDING_MODE = 0, + HTT_TX_AX_SOUNDING_MODE = 1, +} htt_stats_sounding_tx_mode; + +typedef struct { + htt_tlv_hdr_t tlv_hdr; + A_UINT32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */ + /* Counts number of soundings for all steering modes in each bw */ + A_UINT32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES]; + A_UINT32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES]; + A_UINT32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES]; + A_UINT32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES]; + /* + * The sounding array is a 2-D array stored as an 1-D array of + * A_UINT32. The stats for a particular user/bw combination is + * referenced with the following: + * + * sounding[(user* max_bw) + bw] + * + * ... where max_bw == 4 for 160mhz + */ + A_UINT32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS]; +} htt_tx_sounding_stats_tlv; + +/* STATS_TYPE : HTT_DBG_EXT_STATS_TX_SOUNDING_INFO + * TLV_TAGS: + * - HTT_STATS_TX_SOUNDING_STATS_TAG + */ +/* NOTE: + * This structure is for documentation, and cannot be safely used directly. + * Instead, use the constituent TLV structures to fill/parse. + */ +typedef struct { + htt_tx_sounding_stats_tlv sounding_tlv; +} htt_tx_sounding_stats_t; + + #endif /* __HTT_STATS_H__ */ diff --git a/drivers/staging/fw-api/fw/wmi_services.h b/drivers/staging/fw-api/fw/wmi_services.h index 3f3792102d8cb269d380b15cb35ecadbd49d73f0..70b6e9daf6795c42c10258233b4c13ec80cb2169 100755 --- a/drivers/staging/fw-api/fw/wmi_services.h +++ b/drivers/staging/fw-api/fw/wmi_services.h @@ -249,6 +249,11 @@ typedef enum { WMI_SERVICE_AP_TWT=153, /* support for TWT (Target Wake Time) on AP */ WMI_SERVICE_GMAC_OFFLOAD_SUPPORT=154, /* Support for GMAC */ WMI_SERVICE_SPOOF_MAC_SUPPORT=155, /* support for SERVICE_SPOOF_MAC */ + WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT=156, /* Support TID specific configurations per peer (ack,aggr,retry,rate) */ + WMI_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT=157, /* Support vdev software retries configuration per AC (non aggr retry/aggr retry) */ + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT=158, /* Support dual beacon on same channel on single MAC */ + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT=159, /* Support dual beacon on different channel on single MAC */ + WMI_SERVICE_MOTION_DET=160, /* support for motion detection config */ /******* ADD NEW SERVICES HERE *******/ @@ -309,8 +314,8 @@ typedef enum { (svc_id) < WMI_MAX_SERVICE ? \ WMI_SERVICE_IS_ENABLED(pwmi_svc_bmap, svc_id) : \ /* If service ID is in the extended range, check ext_bmap */ \ - (pwmi_svc_ext_bmap)[((svc_id) - WMI_MAX_SERVICE) / 32] >> \ - ((svc_id) & 0x1f)) + (((pwmi_svc_ext_bmap)[((svc_id) - WMI_MAX_SERVICE) / 32] >> \ + ((svc_id) & 0x1f)) & 0x1)) #ifdef __cplusplus diff --git a/drivers/staging/fw-api/fw/wmi_tlv_defs.h b/drivers/staging/fw-api/fw/wmi_tlv_defs.h index ada1cdc001b4174fc379807edbb5d3ebac5c9648..fb8fa29a713f89dc30f020e7a32eae93e14ee0e3 100755 --- a/drivers/staging/fw-api/fw/wmi_tlv_defs.h +++ b/drivers/staging/fw-api/fw/wmi_tlv_defs.h @@ -909,6 +909,19 @@ typedef enum { WMITLV_TAG_STRUC_wmi_twt_del_dialog_complete_event_fixed_param, WMITLV_TAG_STRUC_wmi_twt_pause_dialog_complete_event_fixed_param, WMITLV_TAG_STRUC_wmi_twt_resume_dialog_complete_event_fixed_param, + WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param, + WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_vdev_set_custom_sw_retry_th_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_get_tpc_power_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_get_tpc_power_evt_fixed_param, + WMITLV_TAG_STRUC_wmi_dma_buf_release_spectral_meta_data, + WMITLV_TAG_STRUC_wmi_motion_det_config_params_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_motion_det_base_line_config_params_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_motion_det_start_stop_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_motion_det_base_line_start_stop_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_motion_det_event, + WMITLV_TAG_STRUC_wmi_motion_det_base_line_event, } WMITLV_TAG_ID; /* @@ -1278,6 +1291,14 @@ typedef enum { OP(WMI_TWT_DEL_DIALOG_CMDID) \ OP(WMI_TWT_PAUSE_DIALOG_CMDID) \ OP(WMI_TWT_RESUME_DIALOG_CMDID) \ + OP(WMI_REQUEST_ROAM_SCAN_STATS_CMDID) \ + OP(WMI_PEER_TID_CONFIGURATIONS_CMDID) \ + OP(WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID) \ + OP(WMI_GET_TPC_POWER_CMDID) \ + OP(WMI_MOTION_DET_CONFIG_PARAM_CMDID) \ + OP(WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID) \ + OP(WMI_MOTION_DET_START_STOP_CMDID) \ + OP(WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID) \ /* add new CMD_LIST elements above this line */ @@ -1483,6 +1504,10 @@ typedef enum { OP(WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID) \ OP(WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID) \ OP(WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID) \ + OP(WMI_ROAM_SCAN_STATS_EVENTID) \ + OP(WMI_GET_TPC_POWER_EVENTID) \ + OP(WMI_MOTION_DET_HOST_EVENTID) \ + OP(WMI_MOTION_DET_BASE_LINE_HOST_EVENTID) \ /* add new EVT_LIST elements above this line */ @@ -2383,6 +2408,12 @@ WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_GET_TX_POWER_CMDID); WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_LIMIT_OFFCHAN_CMDID); +/* vdev per-AC SW retry configuration cmd */ +#define WMITLV_TABLE_WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_set_custom_sw_retry_th_cmd_fixed_param, wmi_vdev_set_custom_sw_retry_th_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) + +WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID); + /* PDEV Set Base Mac Address Cmd */ #define WMITLV_TABLE_WMI_PDEV_SET_BASE_MACADDR_CMDID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_set_base_macaddr_cmd_fixed_param, wmi_pdev_set_base_macaddr_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) @@ -2858,6 +2889,12 @@ WMITLV_CREATE_PARAM_STRUC(WMI_LPI_START_SCAN_CMDID); WMITLV_CREATE_PARAM_STRUC(WMI_LPI_STOP_SCAN_CMDID); +/* Request for roam stats Cmd */ +#define WMITLV_TABLE_WMI_REQUEST_ROAM_SCAN_STATS_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param, wmi_request_roam_scan_stats_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) + +WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_ROAM_SCAN_STATS_CMDID); + #define WMITLV_TABLE_WMI_LPI_RESULT_EVENTID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_lpi_result_event_fixed_param, wmi_lpi_result_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR) @@ -3624,6 +3661,23 @@ WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_BTM_CONFIG_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_wlm_config_cmd_fixed_param, wmi_wlm_config_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_WLM_CONFIG_CMDID); +/* Motion detection cmd */ +#define WMITLV_TABLE_WMI_MOTION_DET_CONFIG_PARAM_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_config_params_cmd_fixed_param, wmi_motion_det_config_params_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_CONFIG_PARAM_CMDID); + +#define WMITLV_TABLE_WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_base_line_config_params_cmd_fixed_param, wmi_motion_det_base_line_config_params_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID); + +#define WMITLV_TABLE_WMI_MOTION_DET_START_STOP_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_start_stop_cmd_fixed_param, wmi_motion_det_start_stop_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_START_STOP_CMDID); + +#define WMITLV_TABLE_WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_base_line_start_stop_cmd_fixed_param, wmi_motion_det_base_line_start_stop_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID); + /* Pdev Set AC TX Queue Optimized Cmd */ #define WMITLV_TABLE_WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_set_ac_tx_queue_optimized_cmd_fixed_param, wmi_pdev_set_ac_tx_queue_optimized_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) @@ -3646,6 +3700,12 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID); WMITLV_CREATE_PARAM_STRUC(WMI_RUNTIME_DPD_RECAL_CMDID); +/** Get TX power Cmd */ +#define WMITLV_TABLE_WMI_GET_TPC_POWER_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_get_tpc_power_cmd_fixed_param, wmi_get_tpc_power_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) + +WMITLV_CREATE_PARAM_STRUC(WMI_GET_TPC_POWER_CMDID); + /* TWT enable cmd */ #define WMITLV_TABLE_WMI_TWT_ENABLE_CMDID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param, wmi_twt_enable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) @@ -3676,6 +3736,11 @@ WMITLV_CREATE_PARAM_STRUC(WMI_TWT_PAUSE_DIALOG_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param, wmi_twt_resume_dialog_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_TWT_RESUME_DIALOG_CMDID); +/* Set peer tid configurations Cmd */ +#define WMITLV_TABLE_WMI_PEER_TID_CONFIGURATIONS_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param, wmi_peer_tid_configurations_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_PEER_TID_CONFIGURATIONS_CMDID); + /************************** TLV definitions of WMI events *******************************/ @@ -3840,7 +3905,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID); #define WMITLV_TABLE_WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mgmt_tx_compl_bundle_event_fixed_param, wmi_mgmt_tx_compl_bundle_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, desc_ids, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, ppdu_id, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID); /* VDEV Start response Event */ @@ -3945,7 +4011,9 @@ WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_SYNCH_FRAME_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_hb_ind_event_fixed_param, hb_indevt, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param, wow_gtkigtk, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_oic_ping_handoff_event, wow_oic_ping_handoff, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dhcp_lease_renew_event, wow_dhcp_lease_renew, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dhcp_lease_renew_event, wow_dhcp_lease_renew, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_motion_det_event, md_indevt, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_motion_det_base_line_event, bl_indevt, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_WOW_WAKEUP_HOST_EVENTID); #define WMITLV_TABLE_WMI_WOW_INITIAL_WAKEUP_EVENTID(id,op,buf,len) \ @@ -3973,6 +4041,11 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_FTM_INTG_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_get_keepalive_event_fixed_param, wmi_vdev_get_keepalive_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_GET_KEEPALIVE_EVENTID); +/** Get TX power Event */ +#define WMITLV_TABLE_WMI_GET_TPC_POWER_EVENTID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_get_tpc_power_evt_fixed_param, wmi_get_tpc_power_evt_fixed_param, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_GET_TPC_POWER_EVENTID); + /* GPIO Input Event */ #define WMITLV_TABLE_WMI_GPIO_INPUT_EVENTID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_gpio_input_event_fixed_param, wmi_gpio_input_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) @@ -4092,6 +4165,16 @@ WMITLV_CREATE_PARAM_STRUC(WMI_OEM_DMA_BUF_RELEASE_EVENTID); WMITLV_CREATE_PARAM_STRUC(WMI_HOST_SWBA_EVENTID); +/* HOST SWBA Event v2 */ +#define WMITLV_TABLE_WMI_HOST_SWBA_V2_EVENTID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_host_swba_event_fixed_param, wmi_host_swba_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_tim_info_v2, tim_info, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_p2p_noa_info, p2p_noa_info, WMITLV_SIZE_VAR) + +WMITLV_CREATE_PARAM_STRUC(WMI_HOST_SWBA_V2_EVENTID); + + /* HOST SWFDA Event requesting host to queue a FILS Discovery frame for transmission */ #define WMITLV_TABLE_WMI_HOST_SWFDA_EVENTID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_host_swfda_event_fixed_param, wmi_host_swfda_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ @@ -4884,7 +4967,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DMA_RING_CFG_RSP_EVENTID); /* dma buffer release event */ #define WMITLV_TABLE_WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_dma_buf_release_fixed_param, wmi_dma_buf_release_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dma_buf_release_entry, entries, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dma_buf_release_entry, entries, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dma_buf_release_spectral_meta_data, meta_data, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID); @@ -4925,6 +5009,32 @@ WMITLV_CREATE_PARAM_STRUC(WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_resume_dialog_complete_event_fixed_param, wmi_twt_resume_dialog_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID); +/* Event to send roam scan stats */ +#define WMITLV_TABLE_WMI_ROAM_SCAN_STATS_EVENTID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param, wmi_roam_scan_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, client_id, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_roaming_timestamp, timestamp, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, num_channels, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, chan_info, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, old_bssid, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, is_roaming_success, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, new_bssid, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, num_roam_candidates, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_roam_scan_trigger_reason, roam_reason, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, bssid, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, score, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, channel, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, rssi, WMITLV_SIZE_VAR) +WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_SCAN_STATS_EVENTID); + +/* Motion detection events */ +#define WMITLV_TABLE_WMI_MOTION_DET_HOST_EVENTID(id,op,buf,len) \ +WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_event, wmi_motion_det_event, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_HOST_EVENTID); + +#define WMITLV_TABLE_WMI_MOTION_DET_BASE_LINE_HOST_EVENTID(id,op,buf,len) \ +WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_motion_det_base_line_event, wmi_motion_det_base_line_event, fixed_param, WMITLV_SIZE_FIX) +WMITLV_CREATE_PARAM_STRUC(WMI_MOTION_DET_BASE_LINE_HOST_EVENTID); #ifdef __cplusplus } diff --git a/drivers/staging/fw-api/fw/wmi_unified.h b/drivers/staging/fw-api/fw/wmi_unified.h index d744bdf803186c8964845556c7486248c214fc72..ea97d6e01ae07cb637ac90c85a9b398710060eb5 100755 --- a/drivers/staging/fw-api/fw/wmi_unified.h +++ b/drivers/staging/fw-api/fw/wmi_unified.h @@ -243,6 +243,7 @@ typedef enum { WMI_GRP_WLM, /* 0x3c WLAN Latency Manager */ WMI_GRP_11K_OFFLOAD, /* 0x3d */ WMI_GRP_TWT, /* 0x3e TWT (Target Wake Time) for STA and AP */ + WMI_GRP_MOTION_DET, /* 0x3f */ } WMI_GRP_ID; #define WMI_CMD_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) @@ -450,6 +451,8 @@ typedef enum { WMI_VDEV_GET_TX_POWER_CMDID, /* limit STA offchannel activity */ WMI_VDEV_LIMIT_OFFCHAN_CMDID, + /** To set custom software retries per-AC for vdev */ + WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID, /* peer specific commands */ @@ -517,6 +520,10 @@ typedef enum { WMI_PEER_RESERVED0_CMDID, /** Peer/Tid/Msduq threshold update */ WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID, + /** TID specific configurations per peer of type + * wmi_peer_tid_configurations_cmd_fixed_param + */ + WMI_PEER_TID_CONFIGURATIONS_CMDID, /* beacon/management specific commands */ @@ -624,6 +631,8 @@ typedef enum { WMI_ROAM_BTM_CONFIG_CMDID, /** Enable or Disable Fast Initial Link Setup (FILS) feature */ WMI_ENABLE_FILS_CMDID, + /** Request for roam scan stats */ + WMI_REQUEST_ROAM_SCAN_STATS_CMDID, /** offload scan specific commands */ /** set offload scan AP profile */ @@ -893,6 +902,8 @@ typedef enum { WMI_THERM_THROT_SET_CONF_CMDID, /* set runtime dpd recalibration params */ WMI_RUNTIME_DPD_RECAL_CMDID, + /* get TX power for input HALPHY parameters */ + WMI_GET_TPC_POWER_CMDID, /* Offload 11k related requests */ WMI_11K_OFFLOAD_REPORT_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_11K_OFFLOAD), @@ -1122,6 +1133,12 @@ typedef enum { WMI_TWT_DEL_DIALOG_CMDID, WMI_TWT_PAUSE_DIALOG_CMDID, WMI_TWT_RESUME_DIALOG_CMDID, + + /** WMI commands related to motion detection **/ + WMI_MOTION_DET_CONFIG_PARAM_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_MOTION_DET), + WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID, + WMI_MOTION_DET_START_STOP_CMDID, + WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID, } WMI_CMD_ID; typedef enum { @@ -1339,6 +1356,10 @@ typedef enum { /** software FILS Discovery Frame alert event to Host, requesting host to Queue an FD frame for transmission */ WMI_HOST_SWFDA_EVENTID, + /** software beacon alert event to Host requesting host to Queue a beacon for transmission. + * Used only in host beacon mode. */ + WMI_HOST_SWBA_V2_EVENTID, + /* ADDBA Related WMI Events*/ /** Indication the completion of the prior WMI_PEER_TID_DELBA_CMDID(initiator) */ @@ -1362,6 +1383,8 @@ typedef enum { WMI_ROAM_SYNCH_EVENTID, /** roam synch frame event */ WMI_ROAM_SYNCH_FRAME_EVENTID, + /** various roam scan stats */ + WMI_ROAM_SCAN_STATS_EVENTID, /** P2P disc found */ WMI_P2P_DISC_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_P2P), @@ -1553,6 +1576,9 @@ typedef enum { /** event to report result of host configure SAR2 */ WMI_SAR2_RESULT_EVENTID, + /** event to get TX power per input HALPHY parameters */ + WMI_GET_TPC_POWER_EVENTID, + /* GPIO Event */ WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), /** upload H_CV info WMI event @@ -1670,6 +1696,10 @@ typedef enum { WMI_NDP_END_INDICATION_EVENTID, WMI_WLAN_COEX_BT_ACTIVITY_EVENTID, WMI_NDL_SCHEDULE_UPDATE_EVENTID, + + /** WMI events related to motion detection */ + WMI_MOTION_DET_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MOTION_DET), + WMI_MOTION_DET_BASE_LINE_HOST_EVENTID, } WMI_EVT_ID; /* defines for OEM message sub-types */ @@ -2114,6 +2144,34 @@ typedef struct _wmi_abi_version { #define WMI_PDEV_ID_2ND 2 /* second pdev (pdev 1) */ #define WMI_PDEV_ID_3RD 3 /* third pdev (pdev 2) */ +/* + * Enum regarding which BDF elements are provided in which elements of the + * wmi_service_ready_event_fixed_param.hw_bd_info[] array + */ +typedef enum { + BDF_VERSION = 0, + REF_DESIGN_ID = 1, + CUSTOMER_ID = 2, + PROJECT_ID = 3, + BOARD_DATA_REV = 4, +} wmi_hw_bd_info_e; + +/* + * Macros to get/set BDF details within the + * wmi_service_ready_event_fixed_param.hw_bd_info[] array + */ +#define WMI_GET_BDF_VERSION(hw_bd_info) ((hw_bd_info)[BDF_VERSION]) +#define WMI_GET_REF_DESIGN(hw_bd_info) ((hw_bd_info)[REF_DESIGN_ID]) +#define WMI_GET_CUSTOMER_ID(hw_bd_info) ((hw_bd_info)[CUSTOMER_ID]) +#define WMI_GET_PROJECT_ID(hw_bd_info) ((hw_bd_info)[PROJECT_ID]) +#define WMI_GET_BOARD_DATA_REV(hw_bd_info) ((hw_bd_info)[BOARD_DATA_REV]) + +#define WMI_SET_BDF_VERSION(hw_bd_info, val) ((hw_bd_info)[BDF_VERSION] = (val)) +#define WMI_SET_REF_DESIGN(hw_bd_info, val) ((hw_bd_info)[REF_DESIGN_ID] = (val)) +#define WMI_SET_CUSTOMER_ID(hw_bd_info, val) ((hw_bd_info)[CUSTOMER_ID] = (val)) +#define WMI_SET_PROJECT_ID(hw_bd_info, val) ((hw_bd_info)[PROJECT_ID] = (val)) +#define WMI_SET_BOARD_DATA_REV(hw_bd_info, val) ((hw_bd_info)[BOARD_DATA_REV] = (val)) + /** * The following struct holds optional payload for * wmi_service_ready_event_fixed_param,e.g., 11ac pass some of the @@ -2696,6 +2754,9 @@ typedef struct { #define WMI_RSRC_CFG_FLAG_TCL_CCE_DISABLE_S 12 #define WMI_RSRC_CFG_FLAG_TCL_CCE_DISABLE_M 0x1000 + #define WMI_RSRC_CFG_FLAG_TIM_V2_SUPPORT_ENABLE_S 13 + #define WMI_RSRC_CFG_FLAG_TIM_V2_SUPPORT_ENABLE_M 0x2000 + A_UINT32 flag1; /** @brief smart_ant_cap - Smart Antenna capabilities information @@ -2729,8 +2790,11 @@ typedef struct { A_UINT32 num_ns_ext_tuples_cfg; /** - * size (in bytes) of the buffer the FW shall allocate to store - * packet filtering instructions + * size (in bytes) of the buffer the FW shall allocate per vdev + * firmware can dynamic allocate memory (or disable) + * packet filtering feature. + * 0 - fw chooses its default value + * -1 (0XFFFFFFFF) - disable APF */ A_UINT32 bpf_instruction_size; @@ -2793,6 +2857,26 @@ typedef struct { * configuration specification. */ A_UINT32 max_nlo_ssids; + + /** + * num_packet_filters: the num that host requests fw to support for + * pktfilter in total, then firmware can dynamic allocate + * memory(or disable) pktfilter feature. + * + * 0 - fw chooses its default value. + * -1(0XFFFFFFFF)- disable pktfilter. + */ + A_UINT32 num_packet_filters; + + /** + * num_max_sta_vdevs: the max num for the sta vdevs + * fw will use it to config the memory of offload features that + * are only for sta vdevs. + * p2p client should be included. + * + * 0 - fw chooses its default value: 'num_vdevs' of this structure. + */ + A_UINT32 num_max_sta_vdevs; } wmi_resource_config; #define WMI_RSRC_CFG_FLAG_SET(word32, flag, value) \ @@ -2865,6 +2949,11 @@ typedef struct { #define WMI_RSRC_CFG_FLAG_TCL_CCE_DISABLE_GET(word32) \ WMI_RSRC_CFG_FLAG_GET((word32), TCL_CCE_DISABLE) +#define WMI_RSRC_CFG_FLAG_TIM_V2_SUPPORT_ENABLE_SET(word32, value) \ + WMI_RSRC_CFG_FLAG_SET((word32), TIM_V2_SUPPORT_ENABLE, (value)) +#define WMI_RSRC_CFG_FLAG_TIM_V2_SUPPORT_ENABLE_GET(word32) \ + WMI_RSRC_CFG_FLAG_GET((word32), TIM_V2_SUPPORT_ENABLE) + typedef struct { A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_init_cmd_fixed_param */ @@ -4270,6 +4359,20 @@ typedef enum { WMI_GET_BITS(param, WMI_VDEV_CUSTOM_TX_AC_EN_BITPOS, \ WMI_VDEV_CUSTOM_TX_AC_EN_NUM_BITS) +typedef enum { + WMI_VDEV_CUSTOM_SW_RETRY_TYPE_NONAGGR = 0, + WMI_VDEV_CUSTOM_SW_RETRY_TYPE_AGGR = 1, + WMI_VDEV_CUSTOM_SW_RETRY_TYPE_MAX, +} wmi_vdev_custom_sw_retry_type_t; + +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_vdev_set_custom_sw_retry_th_cmd_fixed_param */ + A_UINT32 vdev_id; /* vdev id indicating to which the vdev custom software retries will be applied. */ + A_UINT32 ac_type; /* access category (VI, VO, BE, BK) enum wmi_traffic_ac */ + A_UINT32 sw_retry_type; /* 0 = non-aggr retry, 1 = aggr retry (wmi_vdev_custom_sw_retry_type_t enum) */ + A_UINT32 sw_retry_th; /* max retry count per AC base on ac_type for the vdev mentioned in vdev id*/ +} wmi_vdev_set_custom_sw_retry_th_cmd_fixed_param; + /* * Command to enable/disable Green AP Power Save. * This helps conserve power during AP operation. When the AP has no @@ -4372,6 +4475,8 @@ typedef struct { #define WMI_BEACON_CTRL_TX_DISABLE 0 #define WMI_BEACON_CTRL_TX_ENABLE 1 +#define WMI_BEACON_CTRL_SWBA_EVENT_DISABLE 2 +#define WMI_BEACON_CTRL_SWBA_EVENT_ENABLE 3 typedef struct { A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_bcn_offload_ctrl_cmd_fixed_param */ @@ -4467,6 +4572,27 @@ typedef enum { PKT_PWR_SAVE_FSM_ENABLE = 0x80000000, } WMI_PDEV_PKT_PWR_SAVE_LEVEL; +/** MACROs to get user setting for enabling/disabling Secondary Rate Feature set + * Bit-0 : Enable/Disable Control for "PPDU Secondary Retry Support" + * Bit-1 : Enable/Disable Control for "RTS Black/White-listing Support" + * Bit-2 : Enable/Disable Control for "Higher MCS retry restriction on XRETRY failures" + * Bit 3-5 : "Xretry threshold" to use + * Bit 6~31 : reserved for future use. + */ +#define WMI_PDEV_PARAM_SECONDARY_RATE_ENABLE_BIT_S 0 +#define WMI_PDEV_PARAM_SECONDARY_RATE_ENABLE_BIT 0x00000001 +#define WMI_PDEV_PARAM_RTS_BL_WL_ENABLE_BIT_S 1 +#define WMI_PDEV_PARAM_RTS_BL_WL_ENABLE_BIT 0x00000002 +#define WMI_PDEV_PARAM_HIGHER_MCS_XRETRY_RESTRICTION_S 2 +#define WMI_PDEV_PARAM_HIGHER_MCS_XRETRY_RESTRICTION 0x00000004 +#define WMI_PDEV_PARAM_XRETRY_THRESHOLD_S 3 +#define WMI_PDEV_PARAM_XRETRY_THRESHOLD 0x00000038 + +#define WMI_PDEV_PARAM_IS_SECONDARY_RATE_ENABLED(word32) WMI_F_MS(word32, WMI_PDEV_PARAM_SECONDARY_RATE_ENABLE_BIT) +#define WMI_PDEV_PARAM_IS_RTS_BL_WL_ENABLED(word32) WMI_F_MS(word32, WMI_PDEV_PARAM_RTS_BL_WL_ENABLE_BIT) +#define WMI_PDEV_PARAM_IS_HIGHER_MCS_XRETRY_RESTRICTION_SET(word32) WMI_F_MS(word32, WMI_PDEV_PARAM_HIGHER_MCS_XRETRY_RESTRICTION) +#define WMI_PDEV_PARAM_GET_XRETRY_THRESHOLD(word32) WMI_F_MS(word32, WMI_PDEV_PARAM_XRETRY_THRESHOLD) + typedef enum { /** TX chain mask */ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, @@ -4910,6 +5036,22 @@ typedef enum { * rate will be used instead. */ WMI_PDEV_PARAM_CCK_TX_ENABLE, /* 0x9e */ + /* + * Set the user-specified antenna gain, but in 0.5 dB units. + * This is a finer-granularity version of WMI_PDEV_PARAM_ANTENNA_GAIN. + * E.g. to set a gain of 15.5 dB, a value of 31 could be provided as the + * value accompanying the PDEV_PARAM_ANTENNA_GAIN_HALF_DB parameter type. + */ + WMI_PDEV_PARAM_ANTENNA_GAIN_HALF_DB, /* 0x9f */ + /* + * Global Enable/Disable control for Secondary Retry Feature Set + * + * Bit-0 : Enable/Disable Control for "PPDU Secondary Retry Support" + * Bit-1 : Enable/Disable Control for "RTS Black/White-listing Support" + * Bit-2 : Enable/Disable Control for "Higher MCS retry restriction on XRETRY failures" + * Bit 3-5: "Xretry threshold" to use + */ + WMI_PDEV_PARAM_SECONDARY_RETRY_ENABLE, /* 0xA0 */ } WMI_PDEV_PARAM; typedef struct { @@ -5127,6 +5269,7 @@ typedef struct { /* tlv for completion * A_UINT32 desc_ids[num_reports]; <- from tx_send_cmd * A_UINT32 status[num_reports]; <- WMI_MGMT_TX_COMP_STATUS_TYPE + * A_UINT32 ppdu_id[num_reports]; <- list of PPDU IDs */ } wmi_mgmt_tx_compl_bundle_event_fixed_param; @@ -7002,8 +7145,14 @@ typedef struct { #define WMI_HEOPS_DEFPE_SET(he_ops, value) WMI_SET_BITS(he_ops, 6, 3, value) /* TWT required */ -#define WMI_HEOPS_TWT_GET(he_ops) WMI_GET_BITS(he_ops, 9, 1) -#define WMI_HEOPS_TWT_SET(he_ops, value) WMI_SET_BITS(he_ops, 9, 1, value) +#define WMI_HEOPS_TWT_REQUIRED_GET(he_ops) WMI_GET_BITS(he_ops, 9, 1) +#define WMI_HEOPS_TWT_REQUIRED_SET(he_ops, value) WMI_SET_BITS(he_ops, 9, 1, value) +/* DEPRECATED, use WMI_HEOPS_TWT_REQUIRED_GET instead */ +#define WMI_HEOPS_TWT_GET(he_ops) \ + WMI_HEOPS_TWT_REQUIRED_GET(he_ops) +/* DEPRECATED, use WMI_HEOPS_TWT_REQUIRED_SET instead */ +#define WMI_HEOPS_TWT_SET(he_ops, value) \ + WMI_HEOPS_TWT_REQUIRED_SET(he_ops, value) /* RTS threshold in units of 32 us,0 - always use RTS 1023 - this is disabled */ #define WMI_HEOPS_RTSTHLD_GET(he_ops) WMI_GET_BITS(he_ops, 10, 10) @@ -8263,9 +8412,10 @@ typedef enum { WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, /* 0x7d */ /** Parameter to configure BA mode. - * Default: Auto mode. * Valid values: 0- Auto mode, * 1- Manual mode(addba req not sent). + * 2- buffer size 64 + * 3- buffer size 256 */ WMI_VDEV_PARAM_BA_MODE, /* 0x7e */ @@ -8278,6 +8428,54 @@ typedef enum { */ WMI_VDEV_PARAM_FORCED_MODDTIM_ENABLE, /* 0x7f */ + /** specify the setting that are valid for auto rate transmissions. + * bits 7:0 (LTF): When bitmask is set, then corresponding LTF value is + * used for auto rate. + * BIT0 = 1 (WMI_HE_LTF_1X) + * BIT1 = 1 (WMI_HE_LTF_2X) + * BIT2 = 1 (WMI_HE_LTF_4X) + * BIT3-7 = Reserved bits. + * bits 15:8 (SGI): When bitmask is set, then corresponding SGI value is + * used for auto rate. + * BIT8 = 1 (400 NS) + * BIT9 = 1 (800 NS) + * BIT10 = 1 (1600 NS) + * BIT11 = 1 (3200 NS) + * BIT12-15 = Reserved bits. + * bits 31:16: Reserved bits. should be set to zero. + */ + WMI_VDEV_PARAM_AUTORATE_MISC_CFG, /* 0x80 */ + + /** VDEV parameter to enable or disable RTT initiator mac address + * randomization. + * Default: Disabled. + * valid values: 0-Disable random mac 1-Enable random mac + */ + WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_INITIATOR_RANDOM_MAC, /* 0x81 */ + + /** + * For each AC, configure how many tx retries to send without RTS + * before enabling RTS + * bits 0:7 :BE + * bits 8:15 :BK + * bits 16:23 :VI + * bits 24:31 :VO + * A value of 0 in specific AC means default configuration for that AC. + */ + WMI_VDEV_PARAM_TX_RETRIES_BEFORE_RTS_PER_AC, /* 0x82 */ + + /** + * Parameter to enable/disable AMSDU aggregation size auto-selection logic. + * We have logic where AMSDU aggregation size is dynamically decided + * based on MCS. That logic is enabled by default. + * For certain tests, we need a method to disable this optimization, + * and base AMSDU size only on the peer's capability rather than our logic. + * A value of 0 means disable internal optimization, + * 1 means enable internal optimzation. + */ + WMI_VDEV_PARAM_AMSDU_AGGREGATION_SIZE_OPTIMIZATION, /* 0x83 */ + + /*=== ADD NEW VDEV PARAM TYPES ABOVE THIS LINE === * The below vdev param types are used for prototyping, and are * prone to change. @@ -9114,6 +9312,21 @@ typedef struct { A_UINT32 vdev_id; } wmi_tim_info; +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_tim_info */ + /** TIM Partial Virtual Bitmap */ + A_UINT32 tim_mcast; + A_UINT32 tim_changed; + A_UINT32 tim_num_ps_pending; + /** Use the vdev_id only if vdev_id_valid is set */ + A_UINT32 vdev_id_valid; + /** unique id identifying the VDEV */ + A_UINT32 vdev_id; + /** TIM bitmap len (in bytes) */ + A_UINT32 tim_len; + /* followed by WMITLV_TAG_ARRAY_BYTE holding the TIM bitmap */ +} wmi_tim_info_v2; + typedef struct { /** Flag to enable quiet period IE support */ A_UINT32 is_enabled; @@ -10138,6 +10351,19 @@ typedef struct { A_UINT32 roam_scan_mode; A_UINT32 vdev_id; A_UINT32 flags; /* see WMI_ROAM_SCAN_MODE_FLAG defs */ + /* + * Minimum duration allowed between two consecutive roam scans. + * Roam scan is not allowed, if duration between two consecutive + * roam scans is less than this time. + */ + A_UINT32 min_delay_btw_scans; /* In msec */ + /* + * Bitmask (with enum WMI_ROAM_TRIGGER_REASON_ID identifying the bit + * positions) showing for which roam_trigger_reasons the + * min_delay_btw_scans constraint should be applied. + * 0x0 means there is no time restrictions between successive roam scans. + */ + A_UINT32 min_delay_roam_trigger_reason_bitmask; } wmi_roam_scan_mode_fixed_param; #define WMI_ROAM_SCAN_MODE_NONE 0x0 @@ -10846,6 +11072,7 @@ typedef struct { A_UINT32 qos_caps; A_UINT32 wmm_caps; A_UINT32 mcsset[ROAM_OFFLOAD_NUM_MCS_SET>>2]; /* since this 4 byte aligned, we don't declare it as tlv array */ + A_UINT32 handoff_delay_for_rx; /* In msec. Delay Hand-Off by this duration to receive pending Rx frames from current BSS */ } wmi_roam_offload_tlv_param; @@ -11600,6 +11827,7 @@ typedef enum event_type_e { WOW_11D_SCAN_EVENT, WOW_SAP_OBSS_DETECTION_EVENT, WOW_BSS_COLOR_COLLISION_DETECT_EVENT, + WOW_TKIP_MIC_ERR_FRAME_RECVD_EVENT, } WOW_WAKE_EVENT_TYPE; typedef enum wake_reason_e { @@ -11658,6 +11886,9 @@ typedef enum wake_reason_e { WOW_REASON_WLAN_DHCP_RENEW, WOW_REASON_SAP_OBSS_DETECTION, WOW_REASON_BSS_COLOR_COLLISION_DETECT, + WOW_REASON_TKIP_MIC_ERR_FRAME_RECVD_DETECT, + WOW_REASON_WLAN_MD, /* motion detected */ + WOW_REASON_WLAN_BL, /* baselining done */ WOW_REASON_DEBUG_TEST = 0xFF, } WOW_WAKE_REASON_TYPE; @@ -12976,6 +13207,8 @@ typedef enum WMI_VENDOR_OUI_ACTION_CONNECTION_1X1 = 0, /* Connect in 1X1 only */ WMI_VENDOR_OUI_ACTION_ITO_EXTENSION = 1, /* Extend the Immediate Time-Out (ITO) if data is not received from AP after beacon with TIM bit set */ WMI_VENDOR_OUI_ACTION_CCKM_1X1 = 2, /* TX (only) CCKM rates with 1 chain only */ + WMI_VENDOR_OUI_ACTION_ALT_ITO = 3, /* inactivity time-out */ + WMI_VENDOR_OUI_ACTION_SWITCH_TO_11N_MODE = 4, /* Switch from 11ac to 11n mode to avoid IOT issues with ONM frame */ /* Add any action before this line */ WMI_VENDOR_OUI_ACTION_MAX_ACTION_ID } wmi_vendor_oui_action_id; @@ -13596,8 +13829,15 @@ typedef struct { A_UINT32 tdls_puapsd_rx_frame_threshold; /**Duration (in ms) over which to check whether TDLS link needs to be torn down */ A_UINT32 teardown_notification_ms; - /**STA kickout threshold for TDLS peer */ + /** STA kickout threshold for TDLS peer */ A_UINT32 tdls_peer_kickout_threshold; + /* TDLS discovery WAKE timeout in ms. + * DUT will wake until this timeout to receive TDLS discovery response + * from peer. + * If tdls_discovery_wake_timeout is 0x0, the DUT will choose autonomously + * what wake timeout value to use. + */ + A_UINT32 tdls_discovery_wake_timeout; } wmi_tdls_set_state_cmd_fixed_param; /* WMI_TDLS_PEER_UPDATE_CMDID */ @@ -14293,10 +14533,13 @@ typedef struct #define LPI_IE_BITMAP_FLAGS 0x00200000 /* reserved as a bitmap to indicate more scan information; one such use being to indicate if the on-going scan is interrupted or not */ #define LPI_IE_BITMAP_CACHING_REQD 0x00400000 /* extscan will use this field to indicate if this frame info needs to be cached in LOWI LP or not */ #define LPI_IE_BITMAP_REPORT_CONTEXT_HUB 0x00800000 /* extscan will use this field to indicate to LOWI LP whether to report result to context hub or not. */ -#define LPI_IE_BITMAP_CHRE_ESS 0x010000000 /* ESS capability info for CHRE */ -#define LPI_IE_BITMAP_CHRE_SEC_MODE 0x020000000 /* Security capability info for CHRE */ -#define LPI_IE_BITMAP_CHRE_SUPPORTED_RATE 0x040000000 /* Hightest MCS corresponding NCC for TX and RX */ -#define LPI_IE_BITMAP_COUNTRY_STRING 0x080000000 /* send country string inside Country IE to LOWI LP */ +#define LPI_IE_BITMAP_CHRE_RADIO_CHAIN 0x01000000 /* include radio chain and rssi per chain information if this bit is set - for CHRE */ + +/* 0x02000000, 0x04000000, and 0x08000000 are unused / available */ + +#define LPI_IE_BITMAP_CHRE_ESS 0x10000000 /* ESS capability info for CHRE */ +#define LPI_IE_BITMAP_CHRE_SEC_MODE 0x20000000 /* Security capability info for CHRE */ +#define LPI_IE_BITMAP_CHRE_SUPPORTED_RATE 0x40000000 /* Highest MCS corresponding NCC for TX and RX */ #define LPI_IE_BITMAP_ALL 0xFFFFFFFF typedef struct { @@ -14361,6 +14604,8 @@ typedef struct { A_UINT32 num_ssids; /** number of bytes in ie data. In the TLV ie_data[] */ A_UINT32 ie_len; + /** Scan control flags extended (see WMI_SCAN_FLAG_EXT_xxx) */ + A_UINT32 scan_ctrl_flags_ext; /** * TLV (tag length value) parameters follow the scan_cmd @@ -14704,6 +14949,99 @@ typedef struct { */ } wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param; +/** + * ACK policy to be followed for the TID + */ +typedef enum { + /** Used when the host does not want to configure the ACK policy */ + WMI_PEER_TID_CONFIG_ACK_POLICY_IGNORE, + /** Allow ACK for the TID */ + WMI_PEER_TID_CONFIG_ACK, + /** Do not expect ACK for the TID */ + WMI_PEER_TID_CONFIG_NOACK, +} WMI_PEER_TID_CONFIG_ACK_POLICY; + +/** + * Aggregation control policy for the TID + */ +typedef enum { + /** Used when the host does not want to configure the aggregation policy */ + WMI_PEER_TID_CONFIG_AGGR_CONTROL_IGNORE, + /** Enable aggregation for the TID */ + WMI_PEER_TID_CONFIG_AGGR_CONTROL_ENABLE, + /** Disable aggregation for the TID */ + WMI_PEER_TID_CONFIG_AGGR_CONTROL_DISABLE, +} WMI_PEER_TID_CONFIG_AGGR_CONTROL; + +/** + * Rate control policy for the TID + */ +typedef enum { + /** Used when the host does not want to configure the rate control policy */ + WMI_PEER_TID_CONFIG_RATE_CONTROL_IGNORE, + /** Auto rate control */ + WMI_PEER_TID_CONFIG_RATE_CONTROL_AUTO, + /** Fixed rate control */ + WMI_PEER_TID_CONFIG_RATE_CONTROL_FIXED_RATE, +} WMI_PEER_TID_CONFIG_RATE_CONTROL; + +/** + * SW retry threshold for the TID + */ +typedef enum { + /** Used when the host does not want to configure the SW retry threshold */ + WMI_PEER_TID_SW_RETRY_IGNORE = 0, + WMI_PEER_TID_SW_RETRY_MIN = 1, + WMI_PEER_TID_SW_RETRY_MAX = 30, + /** No SW retry for the TID */ + WMI_PEER_TID_SW_RETRY_NO_RETRY = 0xFFFFFFFF, +} WMI_PEER_TID_CONFIG_SW_RETRY_THRESHOLD; + +/** + * Command format for the TID configuration + */ +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param + */ + A_UINT32 tlv_header; + + /** vdev id */ + A_UINT32 vdev_id; + + /** peer MAC address */ + wmi_mac_addr peer_mac_address; + + /** TID number, generated by the caller. + * Valid range for QoS TID : 0-15 + * Valid range for non QOS/Mgmt TID: 16-19 + * Any other TID number is invalid. + */ + A_UINT32 tid_num; + + /** ACK policy - of type WMI_PEER_TID_CONFIG_ACK_POLICY */ + A_UINT32 ack_policy; + + /** Aggregation control - of type WMI_PEER_TID_CONFIG_AGGR_CONTROL */ + A_UINT32 aggr_control; + + /** Rate control - of type WMI_PEER_TID_CONFIG_RATE_CONTROL */ + A_UINT32 rate_control; + + /** Fixed rate control parameters - of type WMI_PEER_PARAM_FIXED_RATE. + * This is applicable only when rate_control is + * WMI_PEER_TID_CONFIG_RATE_CONTROL_FIXED_RATE + */ + A_UINT32 rcode_rcflags; + + /** MPDU SW retry threshold - of type WMI_PEER_TID_CONFIG_SW_RETRY_THRESHOLD + * This SW retry threshold limits the total number of retransmits of + * nacked or unacked MPDUs, but it is up to the FW to decide what + * tx rate to use during each retransmission. + */ + A_UINT32 sw_retry_threshold; +} wmi_peer_tid_configurations_cmd_fixed_param; + typedef enum { WMI_PEER_IND_SMPS = 0x0, /* spatial multiplexing power save */ WMI_PEER_IND_OMN, /* operating mode notification */ @@ -14867,6 +15205,10 @@ typedef struct { wmi_mac_addr next_hop_mac_addr; } wmi_mhf_offload_routing_table_entry; +enum { + WMI_DFS_RADAR_PULSE_FLAG_MASK_PSIDX_DIFF_VALID = 0x00000001, +}; + typedef struct { /** tlv tag and len, tag equals * WMITLV_TAG_STRUC_wmi_dfs_radar_event */ @@ -14928,6 +15270,16 @@ typedef struct { /** Max pulse chirp velocity variance in delta bins */ A_INT32 pulse_delta_diff; + /** the difference in the FFT peak index between short FFT and the first long FFT + * psidx_diff = (first_long_fft_psidx - 4*first_short_fft_psidx), + */ + A_INT32 psidx_diff; + + /** pulse_flags: see WMI_DFS_RADAR_PULSE_FLAG_MASK enum values + * 0x0001 - set if psidx_diff is valid + */ + A_UINT32 pulse_flags; + } wmi_dfs_radar_event_fixed_param; enum { @@ -21383,6 +21735,14 @@ static INLINE A_UINT8 *wmi_id_to_name(A_UINT32 wmi_command) WMI_RETURN_STRING(WMI_TWT_DEL_DIALOG_CMDID); WMI_RETURN_STRING(WMI_TWT_PAUSE_DIALOG_CMDID); WMI_RETURN_STRING(WMI_TWT_RESUME_DIALOG_CMDID); + WMI_RETURN_STRING(WMI_REQUEST_ROAM_SCAN_STATS_CMDID); + WMI_RETURN_STRING(WMI_PEER_TID_CONFIGURATIONS_CMDID); + WMI_RETURN_STRING(WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID); + WMI_RETURN_STRING(WMI_GET_TPC_POWER_CMDID); + WMI_RETURN_STRING(WMI_MOTION_DET_CONFIG_PARAM_CMDID); + WMI_RETURN_STRING(WMI_MOTION_DET_BASE_LINE_CONFIG_PARAM_CMDID); + WMI_RETURN_STRING(WMI_MOTION_DET_START_STOP_CMDID); + WMI_RETURN_STRING(WMI_MOTION_DET_BASE_LINE_START_STOP_CMDID); } return "Invalid WMI cmd"; @@ -22062,8 +22422,8 @@ typedef struct { /* 1. wake_intvl_mantis must be <= 0xFFFF * 2. wake_intvl_us must be divided evenly by wake_intvl_mantis, * i.e., wake_intvl_us % wake_intvl_mantis == 0 - * 2. the quotient of wake_intvl_us/wake_intvl_mantis must be 2 to N-th(0<=N<=31) power, - i.e., wake_intvl_us/wake_intvl_mantis == 2^N, 0<=N<=31 + * 3. the quotient of wake_intvl_us/wake_intvl_mantis must be 2 to N-th(0<=N<=31) power, + * i.e., wake_intvl_us/wake_intvl_mantis == 2^N, 0<=N<=31 */ A_UINT32 wake_intvl_us; /* TWT Wake Interval in units of us */ A_UINT32 wake_intvl_mantis; /* TWT Wake Interval Mantissa */ @@ -22149,6 +22509,7 @@ typedef struct { A_UINT32 vdev_id; /* VDEV identifier */ A_UINT32 dialog_id; /* TWT dialog ID */ A_UINT32 sp_offset_us; /* this long time after TWT resumed the 1st SP will start */ + A_UINT32 next_twt_size; /* Next TWT subfield Size, refer to IEEE 802.11ax sectin "9.4.1.60 TWT Information field" */ } wmi_twt_resume_dialog_cmd_fixed_param; /* status code of resuming TWT dialog */ @@ -22262,8 +22623,10 @@ typedef struct { A_UINT32 pdev_id; /** ID of pdev whose DMA ring produced the data */ A_UINT32 mod_id; /* see WMI_DMA_RING_SUPPORTED_MODULE */ A_UINT32 num_buf_release_entry; + A_UINT32 num_meta_data_entry; /* This TLV is followed by another TLV of array of structs. - * wmi_dma_buf_release_entry entries; + * wmi_dma_buf_release_entry entries[num_buf_release_entry]; + * wmi_dma_buf_release_spectral_meta_data meta_datat[num_meta_data_entry]; */ } wmi_dma_buf_release_fixed_param; @@ -22280,6 +22643,19 @@ typedef struct { A_UINT32 paddr_hi; } wmi_dma_buf_release_entry; +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_dma_buf_release_spectral_meta_data */ + /** + * meta data information. + * Host uses the noise floor values as one of the major parameter + * to classify the spectral data. + * This information will not be provided by ucode unlike the fft reports + * which gets DMAed to DDR buffer. + * Hence sending the NF values in dBm units as meta data information. + */ + A_INT32 noise_floor[WMI_MAX_CHAINS]; +} wmi_dma_buf_release_spectral_meta_data; + typedef struct { A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_runtime_dpd_recal_cmd_fixed_param */ A_UINT32 enable; /* Enable/disable */ @@ -22303,6 +22679,249 @@ typedef struct { A_UINT32 dpd_dur_max_ms; } wmi_runtime_dpd_recal_cmd_fixed_param; +typedef enum { + WMI_ROAM_TRIGGER_REASON_NONE = 0, + WMI_ROAM_TRIGGER_REASON_PER, + WMI_ROAM_TRIGGER_REASON_BMISS, + WMI_ROAM_TRIGGER_REASON_LOW_RSSI, + WMI_ROAM_TRIGGER_REASON_HIGH_RSSI, + WMI_ROAM_TRIGGER_REASON_PERIODIC, + WMI_ROAM_TRIGGER_REASON_MAWC, + WMI_ROAM_TRIGGER_REASON_DENSE, + WMI_ROAM_TRIGGER_REASON_BACKGROUND, + WMI_ROAM_TRIGGER_REASON_FORCED, + WMI_ROAM_TRIGGER_REASON_BTM, + WMI_ROAM_TRIGGER_REASON_UNIT_TEST, + WMI_ROAM_TRIGGER_REASON_MAX, +} WMI_ROAM_TRIGGER_REASON_ID; + +/* value for DENSE roam trigger */ +#define WMI_RX_TRAFFIC_ABOVE_THRESHOLD 0x1 +#define WMI_TX_TRAFFIC_ABOVE_THRESHOLD 0x2 + +typedef struct { + A_UINT32 trigger_id; /* id from WMI_ROAM_TRIGGER_REASON_ID */ + /* interpretation of trigger value is as follows, for different trigger IDs + * ID = PER -> value = PER percentage + * ID = LOW_RSSI -> value = rssi in dB wrt noise floor, + * ID = HIGH_RSSI -> value = rssi in dB wrt to noise floor, + * ID = DENSE -> value = specification if it is tx or rx traffic threshold, + * (see WMI_[RX,TX]_TRAFFIC_ABOVE_THRESHOLD) + * ID = PERIODIC -> value = periodicity in ms + * + * for other IDs trigger_value would be 0 (invalid) + */ + A_UINT32 trigger_value; +} wmi_roam_scan_trigger_reason; + +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param */ + A_UINT32 vdev_id; +} wmi_request_roam_scan_stats_cmd_fixed_param; + +typedef struct { + /* + * The timestamp is in units of ticks of a 19.2MHz clock. + * The timestamp is taken at roam scan start. + */ + A_UINT32 lower32bit; + A_UINT32 upper32bit; +} wmi_roaming_timestamp; + +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param */ + A_UINT32 vdev_id; + /* number of roam scans */ + A_UINT32 num_roam_scans; + /* This TLV is followed by TLV's: + * A_UINT32 client_id[num_roam_scans]; based on WMI_SCAN_CLIENT_ID + * wmi_roaming_timestamp timestamp[num_roam_scans]; clock ticks at the time of scan start + * A_UINT32 num_channels[num_roam_scans]; number of channels that are scanned + * A_UINT32 chan_info[]; channel frequencies (MHz) in each scan + * The num_channels[] elements specify how many elements there are + * within chan_info[] for each scan. + * For example, if num_channels = [2, 3] then chan_info will have 5 + * elements, with the first 2 elements from the first scan, and + * the last 3 elements from the second scan. + * wmi_mac_addr old_bssid[num_roam_scans]; bssid we are connected to at the time of roaming + * A_UINT32 is_roaming_success[num_roam_scans]; value is 1 if roaming is successful, 0 if roaming failed + * wmi_mac_addr new_bssid[num_roam_scans]; bssid after roaming + * A_UINT32 num_of_roam_candidates[num_roam_scans]; number of candidates found in each roam scan + * roam_scan_trigger_reason roam_reason[num_roam_scans]; reason for each roam scan + * wmi_mac_addr bssid[]; bssids of candidates in each roam scan + * The num_of_roam_candidates[] elements specify how many elements + * there are within bssid[] for each scan. + * For example, if num_of_roam_candidates = [2, 3] then bssid will + * have 5 elements, with the first 2 elements from the first scan, + * and the last 3 elements from the second scan. + * A_UINT32 score[]; score of candidates in each roam scan + * The num_of_roam_candidates[] elements specify how many elements + * there are within score[] for each scan. + * For example, if num_of_roam_candidates = [2, 3] then score will + * have 5 elements, with the first 2 elements from the first scan, + * and the last 3 elements from the second scan. + * A_UINT32 channel[]; channel frequency (MHz) of candidates in each roam scan + * The num_of_roam_candidates[] elements specify how many elements + * there are within channel[] for each scan. + * For example, if num_of_roam_candidates = [2, 3] then channel will + * have 5 elements, with the first 2 elements from the first scan, + * and the last 3 elements from the second scan. + * A_UINT32 rssi[]; rssi in dB w.r.t. noise floor of candidates + * in each roam scan. + * The num_of_roam_candidates[] elements specify how many elements + * there are within rssi[] for each scan. + * For example, if num_of_roam_candidates = [2, 3] then rssi will + * have 5 elements, with the first 2 elements from the first scan, + * and the last 3 elements from the second scan. + */ +} wmi_roam_scan_stats_event_fixed_param; + +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals wmi_txpower_query_cmd_fixed_param */ + A_UINT32 request_id; /* unique request ID to distinguish the command / event set */ + + /* The mode value has the following meaning : + * 0 : 11a + * 1 : 11bg + * 2 : 11b + * 3 : 11g only + * 4 : 11a HT20 + * 5 : 11g HT20 + * 6 : 11a HT40 + * 7 : 11g HT40 + * 8 : 11a VHT20 + * 9 : 11a VHT40 + * 10 : 11a VHT80 + * 11 : 11g VHT20 + * 12 : 11g VHT40 + * 13 : 11g VHT80 + * 14 : unknown + */ + A_UINT32 mode; + A_UINT32 rate; /* rate index */ + A_UINT32 nss; /* number of spacial stream */ + A_UINT32 beamforming; /* beamforming parameter 0:disabled, 1:enabled */ + A_UINT32 chain_mask; /* mask for the antenna set to get power */ + A_UINT32 chain_index; /* index for the antenna */ +} wmi_get_tpc_power_cmd_fixed_param; + +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_get_tpc_power_evt_fixed_param */ + A_UINT32 request_id; /* request ID set by the command */ + A_INT32 tx_power; /* TX power for the specified HALPHY parameters in half dBm unit */ +} wmi_get_tpc_power_evt_fixed_param; + +/* below structures are related to Motion Detection. */ +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_config_params_cmd_fixed_param */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 time_t1; /** Time gap of successive bursts of + * measurement frames during coarse + * motion detection (in ms) */ + A_UINT32 time_t2; /** Time gap of successive bursts of + * measurement frames during fine + * motion detection (in ms) */ + A_UINT32 n1; /** number of measurement frames in one + * burst, for coarse detection */ + A_UINT32 n2; /** number of measurement frames in one + * burst, for fine detection */ + A_UINT32 time_t1_gap; /** gap between measurement frames in + * course detection (in ms) */ + A_UINT32 time_t2_gap; /** gap between measurement frames in + * fine detection (in ms) */ + A_UINT32 coarse_K; /** number of times motion detection has to + * be performed for coarse detection */ + A_UINT32 fine_K; /** number of times motion detection has to + * be performed for fine detection */ + A_UINT32 coarse_Q; /** number of times motion is expected + * to be detected for success case in + * coarse detection */ + A_UINT32 fine_Q; /** number of times motion is expected + * to be detected for success case in + * fine detection */ + A_UINT32 md_coarse_thr_high; /** higher threshold value (in percent) + * from host to FW, which will be used in + * coarse detection phase of motion detection. + * This is the threshold for the correlation + * of the old RF local-scattering environment + * with the current RF local-scattering + * environment. A value of 100(%) indicates + * that neither the transceiver nor any + * nearby objects have changed position. */ + A_UINT32 md_fine_thr_high; /** higher threshold value (in percent) + * from host to FW, which will be used in + * fine detection phase of motion detection. + * This is the threshold for correlation + * between the old and current RF environments, + * as explained above. */ + A_UINT32 md_coarse_thr_low; /** lower threshold value (in percent) + * for immediate detection of motion in + * coarse detection phase. + * This is the threshold for correlation + * between the old and current RF environments, + * as explained above. */ + A_UINT32 md_fine_thr_low; /** lower threshold value (in percent) + * for immediate detection of motion in + * fine detection phase. + * This is the threshold for correlation + * between the old and current RF environments, + * as explained above. */ +} wmi_motion_det_config_params_cmd_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_base_line_config_params_cmd_fixed_param */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 bl_time_t; /** time T for baseline (in ms) + * Every bl_time_t, bl_n packets are sent */ + A_UINT32 bl_packet_gap; /** gap between measurement frames for baseline + * (in ms) */ + A_UINT32 bl_n; /** number of measurement frames to be sent + * during one baseline */ + A_UINT32 bl_num_meas; /** number of times the baseline measurement + * to be done */ +} wmi_motion_det_base_line_config_params_cmd_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_start_stop_cmd_fixed_param */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 enable; /** start = 1, stop =0 */ +} wmi_motion_det_start_stop_cmd_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_base_line_start_stop_cmd_fixed_param */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 enable; /** start = 1, stop =0 */ +} wmi_motion_det_base_line_start_stop_cmd_fixed_param; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_event */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 status; /** status = 1 -> motion detected */ +} wmi_motion_det_event; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_motion_det_base_line_event */ + A_UINT32 tlv_header; /** TLV Header */ + A_UINT32 vdev_id; /** Vdev ID */ + A_UINT32 bl_baseline_value; /** baseline correlation value calculated + * during baselining phase (in %) */ + A_UINT32 bl_max_corr_reserved; /** max corr value obtained during baselining + * phase (in %); reserved for future */ + A_UINT32 bl_min_corr_reserved; /** min corr value obtained during baselining + * phase (in %); reserved for future */ +} wmi_motion_det_base_line_event; + /* ADD NEW DEFS HERE */ diff --git a/drivers/staging/fw-api/fw/wmi_version.h b/drivers/staging/fw-api/fw/wmi_version.h index 2fd9952be7e536cfcf36fe64f1f02d58f5c21239..82a0019fceb49d73af6aa3d80d5d3ba773d969e6 100755 --- a/drivers/staging/fw-api/fw/wmi_version.h +++ b/drivers/staging/fw-api/fw/wmi_version.h @@ -36,7 +36,7 @@ #define __WMI_VER_MINOR_ 0 /** WMI revision number has to be incremented when there is a * change that may or may not break compatibility. */ -#define __WMI_REVISION_ 510 +#define __WMI_REVISION_ 536 /** The Version Namespace should not be normally changed. Only * host and firmware of the same WMI namespace will work diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h index d2bc9fd2cf8a53751d424230f9e47f6025f2fa62..6258739b423c7a67af0c7a4b825c1bf942123758 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h @@ -1804,6 +1804,7 @@ typedef struct { uint32_t wmm_caps; /* since this is 4 byte aligned, we don't declare it as tlv array */ uint32_t mcsset[WMI_HOST_ROAM_OFFLOAD_NUM_MCS_SET >> 2]; + uint32_t ho_delay_for_rx; } roam_offload_param; #define WMI_FILS_MAX_RRK_LENGTH 64 diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c index 026919ad7e65392422c1870b351dccd9ec6ae0be..cefd55f3b463794cf818f0fec20a8726465bcbbc 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c @@ -4678,6 +4678,8 @@ QDF_STATUS send_roam_scan_offload_mode_cmd_tlv(wmi_unified_t wmi_handle, roam_offload_params->rssi_cat_gap = roam_req->roam_rssi_cat_gap; roam_offload_params->select_5g_margin = roam_req->select_5ghz_margin; + roam_offload_params->handoff_delay_for_rx = + roam_req->roam_offload_params.ho_delay_for_rx; roam_offload_params->reassoc_failure_timeout = roam_req->reassoc_failure_timeout; diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_reg_service.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_reg_service.h index dc9e8f7d772393ce01a520da78b530d51b399bac..8d0180ccd4c4a9240b9b226fd89a149ce31a6c63 100644 --- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_reg_service.h +++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_reg_service.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -390,6 +390,19 @@ QDF_STATUS cds_get_channel_list_with_power(struct channel_power *base_channels, uint8_t *num_base_channels); +/** + * cds_set_channel_state() - API to set the channel state in reg table + * @chan_num - input channel enum + * @state - state of the channel to be set + * CHANNEL_STATE_DISABLE + * CHANNEL_STATE_DFS + * CHANNEL_STATE_ENABLE + * CHANNEL_STATE_INVALID + * + * Return: Void + */ +void cds_set_channel_state(uint32_t chan_num, enum channel_state state); + enum channel_state cds_get_channel_state(uint32_t chan_num); QDF_STATUS cds_get_dfs_region(enum dfs_region *dfs_reg); QDF_STATUS cds_put_dfs_region(enum dfs_region dfs_reg); diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c index b03b3eadaf74be069d2d5b96d723efa68a472b49..2c240a7ab9621a60e68f352e513a27514cb308c7 100644 --- a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c +++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c @@ -2681,7 +2681,7 @@ uint32_t cds_get_connectivity_stats_pkt_bitmap(void *context) QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR, "Magic cookie(%x) for adapter sanity verification is invalid", adapter->magic); - return QDF_STATUS_E_FAILURE; + return 0; } return adapter->pkt_type_bitmap; } diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c index 50348b395edc17e1bbb28a609a3a775b627588c5..b560ddd3f3bf65bc0495aad87efa2f68efa1966e 100644 --- a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c +++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c @@ -2946,9 +2946,9 @@ bool cds_is_connection_in_progress(uint8_t *session_id, hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); if ((eConnectionState_Associated == - hdd_sta_ctx->conn_info.connState) - && (false == - hdd_sta_ctx->conn_info.uIsAuthenticated)) { + hdd_sta_ctx->conn_info.connState) + && sme_is_sta_key_exchange_in_progress( + hdd_ctx->hHal, adapter->sessionId)) { sta_mac = (uint8_t *) &(adapter->macAddressCurrent.bytes[0]); cds_debug("client " MAC_ADDRESS_STR diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_reg_service.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_reg_service.c index 41357770a3a6aaa1080178f9cc06c245d16154b0..26c61cd7f0da94ae587fa3b81ef606b9ca8e1e9d 100644 --- a/drivers/staging/qcacld-3.0/core/cds/src/cds_reg_service.c +++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_reg_service.c @@ -219,6 +219,16 @@ enum channel_enum cds_get_channel_enum(uint32_t chan_num) return INVALID_CHANNEL; } +void cds_set_channel_state(uint32_t chan_num, enum channel_state state) +{ + enum channel_enum chan_enum; + + chan_enum = cds_get_channel_enum(chan_num); + if (INVALID_CHANNEL == chan_enum) + return; + + reg_channels[chan_enum].state = state; +} /** * cds_get_channel_state() - get the channel state diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c index f45f153f5ea1545c56c0fff0af9763664dc36339..0f432bf3a1d6fd9d4bcc68fdd0a8011bcc8be625 100644 --- a/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c +++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c @@ -421,7 +421,16 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg, #ifndef REMOVE_PKT_LOG case HTT_T2H_MSG_TYPE_PKTLOG: { - pktlog_process_fw_msg(msg_word + 1); + uint32_t len = qdf_nbuf_len(htt_t2h_msg); + + if (len < sizeof(*msg_word) + sizeof(uint32_t)) { + qdf_print("%s: invalid nbuff len \n", __func__); + WARN_ON(1); + break; + } + + /*len is reduced by sizeof(*msg_word)*/ + pktlog_process_fw_msg(msg_word + 1, len - sizeof(*msg_word)); break; } #endif diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c index 3c656108fc7a959a2a378dd2acbbb0205289af8b..3155fdfd9bd68a47a42b039f277fa8b0f2eb6e5b 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c @@ -542,6 +542,46 @@ void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits) /* UNPAUSE OS Q */ ol_tx_flow_ct_unpause_os_q(pdev); } +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * ol_tx_flow_pool_lock() - take flow pool lock + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc) +{ + struct ol_tx_flow_pool_t *pool; + + pool = tx_desc->pool; + qdf_spin_lock_bh(&pool->flow_pool_lock); +} + +/** + * ol_tx_flow_pool_unlock() - release flow pool lock + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc) +{ + struct ol_tx_flow_pool_t *pool; + + pool = tx_desc->pool; + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} +#else +static inline +void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc) +{ +} +static inline +void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc) +{ +} +#endif /** * ol_tx_update_connectivity_stats() - update connectivity stats @@ -557,14 +597,31 @@ static void ol_tx_update_connectivity_stats(struct ol_tx_desc_t *tx_desc, enum htt_tx_status status) { void *osif_dev; + uint32_t pkt_type_bitmap; ol_txrx_stats_rx_fp stats_rx = NULL; uint8_t pkt_type = 0; qdf_assert(tx_desc); + + ol_tx_flow_pool_lock(tx_desc); + /* + * In cases when vdev has gone down and tx completion + * are received, leads to NULL vdev access. + * So, check for NULL before dereferencing it. + */ + if (!tx_desc->vdev || + !tx_desc->vdev->osif_dev || + !tx_desc->vdev->stats_rx) { + ol_tx_flow_pool_unlock(tx_desc); + return; + } osif_dev = tx_desc->vdev->osif_dev; stats_rx = tx_desc->vdev->stats_rx; + ol_tx_flow_pool_unlock(tx_desc); - if (stats_rx) { + pkt_type_bitmap = cds_get_connectivity_stats_pkt_bitmap(osif_dev); + + if (pkt_type_bitmap) { if (status != htt_tx_status_download_fail) stats_rx(netbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type); @@ -590,7 +647,15 @@ static void ol_tx_update_arp_stats(struct ol_tx_desc_t *tx_desc, uint32_t tgt_ip; qdf_assert(tx_desc); + + ol_tx_flow_pool_lock(tx_desc); + if (!tx_desc->vdev) { + ol_tx_flow_pool_unlock(tx_desc); + return; + } + tgt_ip = cds_get_arp_stats_gw_ip(tx_desc->vdev->osif_dev); + ol_tx_flow_pool_unlock(tx_desc); if (tgt_ip == qdf_nbuf_get_arp_tgt_ip(netbuf)) { if (status != htt_tx_status_download_fail) @@ -675,7 +740,6 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev, struct ol_tx_desc_t *tx_desc; uint32_t byte_cnt = 0; qdf_nbuf_t netbuf; - uint32_t pkt_type_bitmap; tp_ol_packetdump_cb packetdump_cb; uint32_t is_tx_desc_freed = 0; struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL; @@ -718,17 +782,14 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev, } /* track connectivity stats */ - pkt_type_bitmap = cds_get_connectivity_stats_pkt_bitmap( - tx_desc->vdev->osif_dev); - if (pkt_type_bitmap) - ol_tx_update_connectivity_stats(tx_desc, netbuf, - status); + ol_tx_update_connectivity_stats(tx_desc, netbuf, + status); if (tx_desc->pkt_type != OL_TX_FRM_TSO) { packetdump_cb = pdev->ol_tx_packetdump_cb; if (packetdump_cb) packetdump_cb(netbuf, status, - tx_desc->vdev->vdev_id, TX_DATA_PKT); + tx_desc->vdev_id, TX_DATA_PKT); } DPTRACE(qdf_dp_trace_ptr(netbuf, diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c index 2d131986f63f3650a7db6cf683a32b29066c1c8c..2d51d5f78b0ca5b6dc6c90646bb1c5104b0d5481 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c @@ -2204,7 +2204,7 @@ void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val) vdev->drop_unenc = val; } -#if defined(CONFIG_HL_SUPPORT) +#if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev) @@ -2223,14 +2223,31 @@ ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev) } #else - -static void -ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev) +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev) { + struct ol_txrx_pdev_t *pdev = vdev->pdev; + struct ol_tx_flow_pool_t *pool; + int i; + struct ol_tx_desc_t *tx_desc; -} + qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock); + for (i = 0; i < pdev->tx_desc.pool_size; i++) { + tx_desc = ol_tx_desc_find(pdev, i); + if (!qdf_atomic_read(&tx_desc->ref_cnt)) + /* not in use */ + continue; -#endif + pool = tx_desc->pool; + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (tx_desc->vdev == vdev) + tx_desc->vdev = NULL; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } + qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock); +} +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +#endif /* CONFIG_HL_SUPPORT */ /** * ol_txrx_vdev_detach - Deallocate the specified data virtual diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h index c0c991e1b27a302d2e4ccddacdf712a7308dea7c..50bcf85f371a301665b18888b942dfe37d64d30e 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h @@ -1386,6 +1386,11 @@ struct ol_rx_remote_data { uint8_t mac_id; }; +struct ol_fw_data { + void *data; + uint32_t len; +}; + #define INVALID_REORDER_INDEX 0xFFFF #endif /* _OL_TXRX_TYPES__H_ */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h index 7ea7fabd26b2eed95809124bcf18dbbe83265cbb..885d9e48b5c76240be8ada85691b35f9a6b02610 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -240,6 +240,15 @@ bool hdd_conn_is_connected(hdd_station_ctx_t *pHddStaCtx); */ eCsrBand hdd_conn_get_connected_band(hdd_station_ctx_t *pHddStaCtx); +/** + * hdd_get_sta_connection_in_progress() - get STA for which connection + * is in progress + * @hdd_ctx: hdd context + * + * Return: hdd adpater for which connection is in progress + */ +hdd_adapter_t *hdd_get_sta_connection_in_progress(hdd_context_t *hdd_ctx); + /** * hdd_sme_roam_callback() - hdd sme roam callback * @pContext: pointer to adapter context @@ -371,4 +380,26 @@ static inline void hdd_save_gtk_params(hdd_adapter_t *adapter, } #endif +/** + * hdd_copy_ht_caps()- copy ht caps info from roam info to + * hdd station context. + * @hdd_ht_cap: pointer to Source ht_cap info of type ieee80211_ht_cap + * @roam_ht_cap: pointer to roam ht_caps info + * + * Return: None + */ +void hdd_copy_ht_caps(struct ieee80211_ht_cap *hdd_ht_cap, + tDot11fIEHTCaps *roam_ht_cap); + +/** + * hdd_copy_vht_caps()- copy vht caps info from roam info to + * hdd station context. + * @hdd_vht_cap: pointer to Source vht_cap info of type ieee80211_vht_cap + * @roam_vht_cap: pointer to roam vht_caps info + * + * Return: None + */ +void hdd_copy_vht_caps(struct ieee80211_vht_cap *hdd_vht_cap, + tDot11fIEVHTCaps *roam_vht_cap); + #endif diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h index 9daf952183fe9fdb110f11bf9014c62381e4a0d7..d4dd36ddcf27de1cf6b131bbafbc3f3630f26807 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -11495,6 +11495,8 @@ enum hw_filter_mode { * <ini> * gActionOUIConnect1x1 - Used to specify action OUIs for 1x1 connection * @Default: 000C43 00 25 42 001018 06 02FFF02C0000 BC 25 42 001018 06 02FF040C0000 BC 25 42 00037F 00 35 6C + * Note: User should strictly add new action OUIs at the end of this + * default value. * * Default OUIs: (All values in Hex) * OUI 1 : 000C43 @@ -11535,7 +11537,22 @@ enum hw_filter_mode { /* * <ini> * gActionOUIITOExtension - Used to extend in-activity time for specified APs - * @Default: Empty string + * @Default: 00037F 06 01010000FF7F FC 01 000AEB 02 0100 C0 01 + * Note: User should strictly add new action OUIs at the end of this + * default value. + * + * Default OUIs: (All values in Hex) + * OUI 1: 00037F + * OUI data Len: 06 + * OUI Data: 01010000FF7F + * OUI data Mask: FC - 11111100 + * Info Mask : 01 - only OUI present in Info mask + * + * OUI 2: 000AEB + * OUI data Len: 02 + * OUI Data: 0100 + * OUI data Mask: C0 - 11000000 + * Info Mask : 01 - only OUI present in Info mask * * This ini is used to specify AP OUIs using which station's in-activity time * can be extended with the respective APs @@ -11549,7 +11566,7 @@ enum hw_filter_mode { * </ini> */ #define CFG_ACTION_OUI_ITO_EXTENSION_NAME "gActionOUIITOExtension" -#define CFG_ACTION_OUI_ITO_EXTENSION_DEFAULT "" +#define CFG_ACTION_OUI_ITO_EXTENSION_DEFAULT "00037F 06 01010000FF7F FC 01 000AEB 02 0100 C0 01" /* * <ini> @@ -13181,6 +13198,23 @@ enum hw_filter_mode { #define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MAX (300) #define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_DEFAULT (3) +/* + * <ini> + * gTxSchDelay - Enable/Disable Tx sch delay + * @Min: 0 + * @Max: 5 + * @Default: 2 + * + * Usage: Internal/External + * + * </ini> + */ + +#define CFG_TX_SCH_DELAY_NAME "gTxSchDelay" +#define CFG_TX_SCH_DELAY_MIN (0) +#define CFG_TX_SCH_DELAY_MAX (5) +#define CFG_TX_SCH_DELAY_DEFAULT (2) + /*--------------------------------------------------------------------------- Type declarations -------------------------------------------------------------------------*/ @@ -14041,6 +14075,7 @@ struct hdd_config { uint32_t neighbor_report_offload_per_threshold_offset; uint32_t neighbor_report_offload_cache_timeout; uint32_t neighbor_report_offload_max_req_cap; + uint8_t enable_tx_sch_delay; }; #define VAR_OFFSET(_Struct, _Var) (offsetof(_Struct, _Var)) diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h index 793e7008fa03c99df49a9405f80148f3a6c2151a..788fcfeadcade92947986f82dc39098a743357a1 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h @@ -1833,6 +1833,33 @@ enum hdd_sta_smps_param { HDD_STA_SMPS_PARAM_DTIM_1CHRX_ENABLE = 5 }; +/** + * struct hdd_cache_channel_info - Structure of the channel info + * which needs to be cached + * @channel_num: channel number + * @reg_status: Current regulatory status of the channel + * Enable + * Disable + * DFS + * Invalid + * @wiphy_status: Current wiphy status + */ +struct hdd_cache_channel_info { + uint32_t channel_num; + enum channel_state reg_status; + uint32_t wiphy_status; +}; + +/** + * struct hdd_cache_channels - Structure of the channels to be cached + * @num_channels: Number of channels to be cached + * @channel_info: Structure of the channel info + */ +struct hdd_cache_channels { + uint32_t num_channels; + struct hdd_cache_channel_info *channel_info; +}; + /** Adapter structure definition */ struct hdd_context_s { /** Global CDS context */ @@ -2138,6 +2165,8 @@ struct hdd_context_s { struct mutex power_stats_lock; #endif qdf_atomic_t is_acs_allowed; + struct hdd_cache_channels *original_channels; + qdf_mutex_t cache_channel_lock; }; int hdd_validate_channel_and_bandwidth(hdd_adapter_t *adapter, @@ -2911,4 +2940,21 @@ hdd_station_info_t *hdd_get_stainfo(hdd_station_info_t *aStaInfo, int hdd_driver_memdump_init(void); void hdd_driver_memdump_deinit(void); +/** + * hdd_is_cli_iface_up() - check if there is any cli iface up + * @hdd_ctx: HDD context + * + * Return: return true if there is any cli iface(STA/P2P_CLI) is up + * else return false + */ +bool hdd_is_cli_iface_up(hdd_context_t *hdd_ctx); + +/** + * wlan_hdd_free_cache_channels() - Free the cache channels list + * @hdd_ctx: Pointer to HDD context + * + * Return: None + */ +void wlan_hdd_free_cache_channels(hdd_context_t *hdd_ctx); + #endif /* end #if !defined(WLAN_HDD_MAIN_H) */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h index bbc9bbd24b48b351d14593967d3d24aae5fbf88a..05bcf5fe2d36ca9b9ad316c74d9a1aab2026a71f 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h @@ -61,6 +61,14 @@ QDF_STATUS hdd_init_tx_rx(hdd_adapter_t *pAdapter); QDF_STATUS hdd_deinit_tx_rx(hdd_adapter_t *pAdapter); QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf); +/** + * hdd_reset_all_adapters_connectivity_stats() - reset connectivity stats + * @hdd_ctx: pointer to HDD Station Context + * + * Return: None + */ +void hdd_reset_all_adapters_connectivity_stats(hdd_context_t *hdd_ctx); + /** * hdd_tx_rx_collect_connectivity_stats_info() - collect connectivity stats * @skb: pointer to skb data diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c index 19d61390087afa59ad288bd4f8bfe1c5b2b6e199..7808ed9d7222b47f529de7b300e4a085124d33db 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c @@ -291,6 +291,49 @@ hdd_conn_get_connected_cipher_algo(hdd_station_ctx_t *pHddStaCtx, return fConnected; } +hdd_adapter_t *hdd_get_sta_connection_in_progress(hdd_context_t *hdd_ctx) +{ + hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL; + hdd_adapter_t *adapter = NULL; + QDF_STATUS status; + hdd_station_ctx_t *hdd_sta_ctx; + + if (!hdd_ctx) { + hdd_err("HDD context is NULL"); + return NULL; + } + + status = hdd_get_front_adapter(hdd_ctx, &adapter_node); + while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) { + adapter = adapter_node->pAdapter; + if (!adapter) + goto end; + + hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); + if ((QDF_STA_MODE == adapter->device_mode) || + (QDF_P2P_CLIENT_MODE == adapter->device_mode) || + (QDF_P2P_DEVICE_MODE == adapter->device_mode)) { + if (eConnectionState_Connecting == + hdd_sta_ctx->conn_info.connState) { + hdd_debug("session_id %d: Connection is in progress", + adapter->sessionId); + return adapter; + } else if ((eConnectionState_Associated == + hdd_sta_ctx->conn_info.connState) && + sme_is_sta_key_exchange_in_progress( + hdd_ctx->hHal, adapter->sessionId)) { + hdd_debug("session_id %d: Key exchange is in progress", + adapter->sessionId); + return adapter; + } + } +end: + status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next); + adapter_node = next; + } + return NULL; +} + /** * hdd_remove_beacon_filter() - remove beacon filter * @adapter: Pointer to the hdd adapter @@ -338,14 +381,6 @@ static int hdd_add_beacon_filter(hdd_adapter_t *adapter) return 0; } -/** - * hdd_copy_vht_caps()- copy vht caps info from roam info to - * hdd station context. - * @hdd_sta_ctx: pointer to hdd station context - * @roam_info: pointer to roam info - * - * Return: None - */ void hdd_copy_ht_caps(struct ieee80211_ht_cap *hdd_ht_cap, tDot11fIEHTCaps *roam_ht_cap) { @@ -531,7 +566,7 @@ void hdd_copy_ht_caps(struct ieee80211_ht_cap *hdd_ht_cap, #define VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB_SHIFT 26 /** - * hdd_copy_ht_caps()- copy ht caps info from roam info to + * hdd_copy_vht_caps()- copy vht caps info from roam info to * hdd station context. * @hdd_sta_ctx: pointer to hdd station context * @roam_info: pointer to roam info @@ -5363,11 +5398,12 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter, #endif uint16_t gen_ie_len, uint8_t *gen_ie) { - tHalHandle halHandle = WLAN_HDD_GET_HAL_CTX(pAdapter); - tDot11fIERSN dot11RSNIE; - tDot11fIEWPA dot11WPAIE; + uint32_t ret; uint8_t *pRsnIe; uint16_t RSNIeLen; + tDot11fIERSN dot11RSNIE; + tDot11fIEWPA dot11WPAIE; + tHalHandle halHandle = WLAN_HDD_GET_HAL_CTX(pAdapter); /* * Clear struct of tDot11fIERSN and tDot11fIEWPA specifically @@ -5389,8 +5425,13 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter, pRsnIe = gen_ie + 2; RSNIeLen = gen_ie_len - 2; /* Unpack the RSN IE */ - dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle, - pRsnIe, RSNIeLen, &dot11RSNIE, false); + ret = dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle, + pRsnIe, RSNIeLen, &dot11RSNIE, + false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return -EINVAL; + } /* Copy out the encryption and authentication types */ hdd_debug("pairwise cipher suite count: %d", dot11RSNIE.pwise_cipher_suite_count); @@ -5423,8 +5464,12 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter, pRsnIe = gen_ie + 2 + 4; RSNIeLen = gen_ie_len - (2 + 4); /* Unpack the WPA IE */ - dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle, + ret = dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle, pRsnIe, RSNIeLen, &dot11WPAIE, false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return -EINVAL; + } /* Copy out the encryption and authentication types */ hdd_debug("WPA unicast cipher suite count: %d", dot11WPAIE.unicast_cipher_count); diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c index b468dadc79dd2439ce700feb624ba2b535874124..8b68ff40aecb0e1a1ddd3ddd86eb6f1acafd5561 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c @@ -5256,6 +5256,14 @@ struct reg_table_entry g_registry_table[] = { CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_DEFAULT, CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MIN, CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MAX), + + REG_VARIABLE(CFG_TX_SCH_DELAY_NAME, + WLAN_PARAM_Integer, + struct hdd_config, enable_tx_sch_delay, + VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT, + CFG_TX_SCH_DELAY_DEFAULT, + CFG_TX_SCH_DELAY_MIN, + CFG_TX_SCH_DELAY_MAX), }; /** @@ -7048,6 +7056,10 @@ void hdd_cfg_print(hdd_context_t *pHddCtx) CFG_OCE_WAN_SCORE_IDX15_TO_12_NAME, pHddCtx->config->oce_wan_score_slots15_to_12); + hdd_debug("Name = [%s] value = [%d]", + CFG_TX_SCH_DELAY_NAME, + pHddCtx->config->enable_tx_sch_delay); + hdd_cfg_print_11k_offload_params(pHddCtx); } diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c index 95358cb508b505ff05063c3b23bbc70095c40747..813a7eeffd0fd1acd6ef755f8ab581cf59833ca9 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c @@ -4766,8 +4766,8 @@ static int __wlan_hdd_cfg80211_keymgmt_set_key(struct wiphy *wiphy, return -EPERM; } - if ((data == NULL) || (data_len == 0) || - (data_len > SIR_ROAM_SCAN_PSK_SIZE)) { + if ((data == NULL) || (data_len <= 0) || + (data_len > SIR_ROAM_SCAN_PSK_SIZE)) { hdd_err("Invalid data"); return -EINVAL; } @@ -10882,6 +10882,11 @@ static int __wlan_hdd_cfg80211_set_nud_stats(struct wiphy *wiphy, ENTER(); + if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) { + hdd_err("Command not allowed in FTM mode"); + return -EINVAL; + } + err = wlan_hdd_validate_context(hdd_ctx); if (0 != err) return err; @@ -11469,6 +11474,11 @@ static int __wlan_hdd_cfg80211_get_nud_stats(struct wiphy *wiphy, ENTER(); + if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) { + hdd_err("Command not allowed in FTM mode"); + return -EINVAL; + } + err = wlan_hdd_validate_context(hdd_ctx); if (0 != err) return err; @@ -14186,19 +14196,10 @@ static bool wlan_hdd_is_duplicate_channel(uint8_t *arr, } #endif -/* - *wlan_hdd_send_sta_authorized_event: Function to send station authorized - *event to user space in case of SAP - *pAdapter: Pointer to the adapter - *@pHddCtx: HDD Context - *@mac_addr: MAC address of the STA for whic the Authorized event needs to - *be sent - *This api is used to send station authorized event to user space - */ -static QDF_STATUS wlan_hdd_send_sta_authorized_event( - hdd_adapter_t *pAdapter, - hdd_context_t *pHddCtx, - struct qdf_mac_addr mac_addr) +QDF_STATUS wlan_hdd_send_sta_authorized_event( + hdd_adapter_t *pAdapter, + hdd_context_t *pHddCtx, + const struct qdf_mac_addr *mac_addr) { struct sk_buff *vendor_event; uint32_t sta_flags = 0; @@ -14234,7 +14235,7 @@ static QDF_STATUS wlan_hdd_send_sta_authorized_event( } status = nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_MAC, - QDF_MAC_ADDR_SIZE, mac_addr.bytes); + QDF_MAC_ADDR_SIZE, mac_addr->bytes); if (status) { hdd_err("STA MAC put fails"); kfree_skb(vendor_event); @@ -14321,7 +14322,7 @@ static int __wlan_hdd_change_station(struct wiphy *wiphy, status = wlan_hdd_send_sta_authorized_event( pAdapter, pHddCtx, - STAMacAddress); + &STAMacAddress); if (status != QDF_STATUS_SUCCESS) { return -EINVAL; } @@ -17650,16 +17651,7 @@ static int wlan_hdd_cfg80211_connect(struct wiphy *wiphy, return ret; } -/** - * wlan_hdd_disconnect() - hdd disconnect api - * @pAdapter: Pointer to adapter - * @reason: Disconnect reason code - * - * This function is used to issue a disconnect request to SME - * - * Return: 0 for success, non-zero for failure - */ -static int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason) +int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason) { int status, result = 0; unsigned long rc; @@ -17977,6 +17969,7 @@ static int wlan_hdd_cfg80211_set_privacy_ibss(hdd_adapter_t *pAdapter, struct cfg80211_ibss_params *params) { + uint32_t ret; int status = 0; hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter); eCsrEncryptionType encryptionType = eCSR_ENCRYPT_TYPE_NONE; @@ -18010,13 +18003,22 @@ static int wlan_hdd_cfg80211_set_privacy_ibss(hdd_adapter_t *pAdapter, hdd_err("invalid ie len:%d", ie[1]); return -EINVAL; } - /* Unpack the WPA IE */ - /* Skip past the EID byte and length byte - and four byte WiFi OUI */ - dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle, - &ie[2 + 4], ie[1] - 4, - &dot11WPAIE, false); - /*Extract the multicast cipher, the encType for unicast - cipher for wpa-none is none */ + /* + * Unpack the WPA IE. Skip past the EID byte and + * length byte - and four byte WiFi OUI + */ + ret = dot11f_unpack_ie_wpa( + (tpAniSirGlobal) halHandle, + &ie[2 + 4], ie[1] - 4, + &dot11WPAIE, false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed ret: 0x%x", ret); + return -EINVAL; + } + /* + * Extract the multicast cipher, the encType for + * unicast cipher for wpa-none is none + */ encryptionType = hdd_translate_wpa_to_csr_encryption_type (dot11WPAIE.multicast_cipher); @@ -18713,8 +18715,6 @@ int __wlan_hdd_cfg80211_del_station(struct wiphy *wiphy, } pAdapter->aStaInfo[staId].isDeauthInProgress = true; - pAdapter->cache_sta_info[staId].reason_code = - pDelStaParams->reason_code; hdd_debug("Delete STA with MAC::" MAC_ADDRESS_STR, MAC_ADDR_ARRAY(mac)); @@ -20344,6 +20344,12 @@ static int __wlan_hdd_cfg80211_update_connect_params( fils_info->key_nai_length = req->fils_erp_username_len + sizeof(char) + req->fils_erp_realm_len; + if (fils_info->key_nai_length > + FILS_MAX_KEYNAME_NAI_LENGTH) { + hdd_err("Key NAI Length %d", + fils_info->key_nai_length); + return -EINVAL; + } if (req->fils_erp_username_len && req->fils_erp_username) { buf = fils_info->keyname_nai; qdf_mem_copy(buf, req->fils_erp_username, diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h index b72d89ab9ae62b93263ec75a79435cd7731c7a57..debfa7c078697ccb772ba24cefbbd1a7fcd82b24 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h @@ -607,6 +607,17 @@ void hdd_process_defer_disconnect(hdd_adapter_t *adapter); */ int wlan_hdd_try_disconnect(hdd_adapter_t *adapter); +/** + * wlan_hdd_disconnect() - hdd disconnect api + * @pAdapter: Pointer to adapter + * @reason: Disconnect reason code + * + * This function is used to issue a disconnect request to SME + * + * Return: 0 for success, non-zero for failure + */ +int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason); + /** * hdd_bt_activity_cb() - callback function to receive bt activity * @context: HDD context @@ -663,4 +674,32 @@ void wlan_hdd_save_gtk_offload_params(hdd_adapter_t *adapter, uint8_t *replay_ctr, bool big_endian, uint32_t ul_flags); + +/* + * wlan_hdd_send_sta_authorized_event() - Function to send station authorized + * event to user space in case of SAP + * @pAdapter: Pointer to the adapter + * @pHddCtx: HDD Context + * @mac_addr: MAC address of the STA for which the Authorized event needs to + * be sent + * + * This api is used to send station authorized event to user space + * + * Return: Returns QDF_STATUS_SUCCESS on success else rturns error value + */ + +QDF_STATUS wlan_hdd_send_sta_authorized_event( + hdd_adapter_t *pAdapter, + hdd_context_t *pHddCtx, + const struct qdf_mac_addr *mac_addr); + +/** + * wlan_hdd_restore_channels() - Restore the channels which were cached + * and disabled in wlan_hdd_disable_channels api. + * @hdd_ctx: Pointer to the HDD context + * + * Return: 0 on success, Error code on failure + */ +int wlan_hdd_restore_channels(hdd_context_t *hdd_ctx); + #endif diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c index f6ace8b0b2ac79ac345d3c554f39ed73df7fbc50..5624731d91b29ecafd3b699af36462a44ca14164 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c @@ -1294,14 +1294,33 @@ static void hdd_cleanup_on_fw_down(void) ENTER(); hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD); - cds_set_fw_state(CDS_FW_STATE_DOWN); cds_set_target_ready(false); if (hdd_ctx != NULL) hdd_cleanup_scan_queue(hdd_ctx, NULL); wlan_hdd_purge_notifier(); EXIT(); +} +/** + * wlan_hdd_set_the_pld_uevent() - set the pld event + * @uevent: uevent status + * + * Return: void + */ +static void wlan_hdd_set_the_pld_uevent(struct pld_uevent_data *uevent) +{ + switch (uevent->uevent) { + case PLD_RECOVERY: + cds_set_recovery_in_progress(true); + break; + case PLD_FW_DOWN: + cds_set_fw_state(CDS_FW_STATE_DOWN); + break; + case PLD_FW_READY: + cds_set_target_ready(true); + break; + } } /** @@ -1314,12 +1333,26 @@ static void hdd_cleanup_on_fw_down(void) static void wlan_hdd_pld_uevent(struct device *dev, struct pld_uevent_data *uevent) { + enum cds_driver_state driver_state; + ENTER(); + mutex_lock(&hdd_init_deinit_lock); + hdd_info("pld event %d", uevent->uevent); + + driver_state = cds_get_driver_state(); + + if (driver_state == CDS_DRIVER_STATE_UNINITIALIZED || + cds_is_driver_loading()) { + wlan_hdd_set_the_pld_uevent(uevent); + goto uevent_not_allowed; + } + + wlan_hdd_set_the_pld_uevent(uevent); + switch (uevent->uevent) { case PLD_RECOVERY: - cds_set_recovery_in_progress(true); hdd_pld_ipa_uc_shutdown_pipes(); wlan_hdd_purge_notifier(); break; @@ -1327,9 +1360,10 @@ static void wlan_hdd_pld_uevent(struct device *dev, hdd_cleanup_on_fw_down(); break; case PLD_FW_READY: - cds_set_target_ready(true); break; } +uevent_not_allowed: + mutex_unlock(&hdd_init_deinit_lock); EXIT(); return; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c index b80b07002fd95d9c20787258538b5be0e9960068..ca4cd712ce2257fb638f45c093379a417d471aea 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -4021,6 +4021,13 @@ int wlan_hdd_cfg80211_set_epno_list(struct wiphy *wiphy, return ret; } +#define PARAM_ID QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID +#define PARAM_REALM QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM +#define PARAM_ROAM_ID \ + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID +#define PARAM_ROAM_PLMN \ + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN + /** * hdd_extscan_passpoint_fill_network_list() - passpoint fill network list * @hddctx: HDD context @@ -4039,7 +4046,8 @@ static int hdd_extscan_passpoint_fill_network_list( { struct nlattr *network[QCA_WLAN_VENDOR_ATTR_PNO_MAX + 1]; struct nlattr *networks; - int rem1, len; + int rem1; + size_t len; uint8_t index; uint32_t expected_networks; @@ -4068,49 +4076,47 @@ static int hdd_extscan_passpoint_fill_network_list( } /* Parse and fetch identifier */ - if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID]) { + if (!network[PARAM_ID]) { hdd_err("attr passpoint id failed"); return -EINVAL; } - req_msg->networks[index].id = nla_get_u32( - network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID]); + req_msg->networks[index].id = nla_get_u32(network[PARAM_ID]); hdd_debug("Id %u", req_msg->networks[index].id); /* Parse and fetch realm */ - if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]) { + if (!network[PARAM_REALM]) { hdd_err("attr realm failed"); return -EINVAL; } - len = nla_len( - network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]); - if (len < 0 || len > SIR_PASSPOINT_REALM_LEN) { - hdd_err("Invalid realm size %d", len); + len = nla_strlcpy(req_msg->networks[index].realm, + network[PARAM_REALM], + SIR_PASSPOINT_REALM_LEN); + /* Don't send partial realm to firmware */ + if (len >= SIR_PASSPOINT_REALM_LEN) { + hdd_err("user passed invalid realm, len:%zu", len); return -EINVAL; } - qdf_mem_copy(req_msg->networks[index].realm, - nla_data(network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]), - len); - hdd_debug("realm len %d", len); + hdd_debug("realm: %s", req_msg->networks[index].realm); /* Parse and fetch roaming consortium ids */ - if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID]) { + if (!network[PARAM_ROAM_ID]) { hdd_err("attr roaming consortium ids failed"); return -EINVAL; } nla_memcpy(&req_msg->networks[index].roaming_consortium_ids, - network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID], - sizeof(req_msg->networks[0].roaming_consortium_ids)); + network[PARAM_ROAM_ID], + sizeof(req_msg->networks[0].roaming_consortium_ids)); hdd_debug("roaming consortium ids"); /* Parse and fetch plmn */ - if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN]) { + if (!network[PARAM_ROAM_PLMN]) { hdd_err("attr plmn failed"); return -EINVAL; } nla_memcpy(&req_msg->networks[index].plmn, - network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN], - SIR_PASSPOINT_PLMN_LEN); + network[PARAM_ROAM_PLMN], + SIR_PASSPOINT_PLMN_LEN); hdd_debug("plmn %02x:%02x:%02x)", req_msg->networks[index].plmn[0], req_msg->networks[index].plmn[1], @@ -4345,6 +4351,11 @@ int wlan_hdd_cfg80211_reset_passpoint_list(struct wiphy *wiphy, return ret; } +#undef PARAM_ID +#undef PARAM_REALM +#undef PARAM_ROAM_ID +#undef PARAM_ROAM_PLMN + /** * wlan_hdd_init_completion_extwow() - Initialize ext wow variable * @hdd_ctx: Global HDD context diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c index f520359b86183e6f66fdebde3beac94fba8e9c28..eabc11098f9fa5781336c572786d1472c56cc342 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c @@ -405,6 +405,10 @@ static int __hdd_hostapd_stop(struct net_device *dev) hdd_stop_adapter(hdd_ctx, adapter, true); clear_bit(DEVICE_IFACE_OPENED, &adapter->event_flags); + + if (!hdd_is_cli_iface_up(hdd_ctx)) + sme_scan_flush_result(hdd_ctx->hHal); + /* Stop all tx queues */ hdd_info("Disabling queues"); wlan_hdd_netif_queue_control(adapter, @@ -1543,7 +1547,7 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent, struct ch_params_s sap_ch_param = {0}; eCsrPhyMode phy_mode; bool legacy_phymode; - tSap_StationDisassocCompleteEvent *disconnect_event; + tSap_StationDisassocCompleteEvent *disassoc_comp; hdd_station_info_t *stainfo; cds_context_type *cds_ctx; @@ -2153,34 +2157,21 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent, hdd_green_ap_add_sta(pHddCtx); break; - case eSAP_STA_LOSTLINK_DETECTED: - disconnect_event = - &pSapEvent->sapevt.sapStationDisassocCompleteEvent; - - wlan_hdd_get_peer_rssi(pHostapdAdapter, - &disconnect_event->staMac, - HDD_WLAN_GET_PEER_RSSI_SOURCE_DRIVER); - - /* - * For user initiated disconnect, reason_code is updated while - * issuing the disconnect from HDD. - */ - if (disconnect_event->reason != eSAP_USR_INITATED_DISASSOC) { - stainfo = hdd_get_stainfo( - pHostapdAdapter->cache_sta_info, - disconnect_event->staMac); - if (stainfo) - stainfo->reason_code = - disconnect_event->reason_code; - } - return QDF_STATUS_SUCCESS; - case eSAP_STA_DISASSOC_EVENT: + disassoc_comp = + &pSapEvent->sapevt.sapStationDisassocCompleteEvent; memcpy(wrqu.addr.sa_data, - &pSapEvent->sapevt.sapStationDisassocCompleteEvent. - staMac, QDF_MAC_ADDR_SIZE); + &disassoc_comp->staMac, QDF_MAC_ADDR_SIZE); hdd_notice(" disassociated " MAC_ADDRESS_STR, MAC_ADDR_ARRAY(wrqu.addr.sa_data)); + stainfo = hdd_get_stainfo(pHostapdAdapter->cache_sta_info, + disassoc_comp->staMac); + if (stainfo) { + stainfo->rssi = disassoc_comp->rssi; + stainfo->tx_rate = disassoc_comp->tx_rate; + stainfo->rx_rate = disassoc_comp->rx_rate; + stainfo->reason_code = disassoc_comp->reason_code; + } qdf_status = qdf_event_set(&pHostapdState->qdf_sta_disassoc_event); if (!QDF_IS_STATUS_SUCCESS(qdf_status)) @@ -2644,11 +2635,11 @@ int hdd_softap_unpack_ie(tHalHandle halHandle, bool *pMFPRequired, uint16_t gen_ie_len, uint8_t *gen_ie) { - tDot11fIERSN dot11RSNIE; - tDot11fIEWPA dot11WPAIE; - + uint32_t ret; uint8_t *pRsnIe; uint16_t RSNIeLen; + tDot11fIERSN dot11RSNIE; + tDot11fIEWPA dot11WPAIE; if (NULL == halHandle) { hdd_err("Error haHandle returned NULL"); @@ -2671,8 +2662,13 @@ int hdd_softap_unpack_ie(tHalHandle halHandle, RSNIeLen = gen_ie_len - 2; /* Unpack the RSN IE */ memset(&dot11RSNIE, 0, sizeof(tDot11fIERSN)); - dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle, - pRsnIe, RSNIeLen, &dot11RSNIE, false); + ret = dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle, + pRsnIe, RSNIeLen, &dot11RSNIE, + false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return -EINVAL; + } /* Copy out the encryption and authentication types */ hdd_debug("pairwise cipher suite count: %d", dot11RSNIE.pwise_cipher_suite_count); @@ -2707,8 +2703,12 @@ int hdd_softap_unpack_ie(tHalHandle halHandle, RSNIeLen = gen_ie_len - (2 + 4); /* Unpack the WPA IE */ memset(&dot11WPAIE, 0, sizeof(tDot11fIEWPA)); - dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle, + ret = dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle, pRsnIe, RSNIeLen, &dot11WPAIE, false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return -EINVAL; + } /* Copy out the encryption and authentication types */ hdd_debug("WPA unicast cipher suite count: %d", dot11WPAIE.unicast_cipher_count); @@ -4461,7 +4461,6 @@ static __iw_softap_disassoc_sta(struct net_device *dev, uint8_t *peerMacAddr; int ret; struct tagCsrDelStaParams del_sta_params; - hdd_station_info_t *stainfo; ENTER_DEV(dev); @@ -4488,11 +4487,6 @@ static __iw_softap_disassoc_sta(struct net_device *dev, &del_sta_params); hdd_softap_sta_disassoc(pHostapdAdapter, &del_sta_params); - stainfo = hdd_get_stainfo(pHostapdAdapter->cache_sta_info, - del_sta_params.peerMacAddr); - if (stainfo) - stainfo->reason_code = del_sta_params.reason_code; - EXIT(); return 0; } @@ -5232,7 +5226,7 @@ static int __iw_get_ap_freq(struct net_device *dev, return -EIO; } status = hdd_wlan_get_freq(channel, &freq); - if (true == status) { + if (0 == status) { /* Set Exponent parameter as 6 (MHZ) in struct * iw_freq * iwlist & iwconfig command * shows frequency into proper @@ -5244,7 +5238,7 @@ static int __iw_get_ap_freq(struct net_device *dev, } else { channel = pHddApCtx->operatingChannel; status = hdd_wlan_get_freq(channel, &freq); - if (true == status) { + if (0 == status) { /* Set Exponent parameter as 6 (MHZ) in struct iw_freq * iwlist & iwconfig command shows frequency into proper * format (2.412 GHz instead of 246.2 MHz) @@ -6762,19 +6756,24 @@ static bool wlan_hdd_rate_is_11g(u8 rate) */ static bool wlan_hdd_get_sap_obss(hdd_adapter_t *pHostapdAdapter) { - uint8_t ht_cap_ie[DOT11F_IE_HTCAPS_MAX_LEN]; + uint32_t ret; + uint8_t *ie = NULL; tDot11fIEHTCaps dot11_ht_cap_ie = {0}; + uint8_t ht_cap_ie[DOT11F_IE_HTCAPS_MAX_LEN]; hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(pHostapdAdapter); beacon_data_t *beacon = pHostapdAdapter->sessionCtx.ap.beacon; - uint8_t *ie = NULL; ie = wlan_hdd_cfg80211_get_ie_ptr(beacon->tail, beacon->tail_len, WLAN_EID_HT_CAPABILITY); if (ie && ie[1]) { qdf_mem_copy(ht_cap_ie, &ie[2], DOT11F_IE_HTCAPS_MAX_LEN); - dot11f_unpack_ie_ht_caps((tpAniSirGlobal)hdd_ctx->hHal, - ht_cap_ie, ie[1], &dot11_ht_cap_ie, - false); + ret = dot11f_unpack_ie_ht_caps((tpAniSirGlobal)hdd_ctx->hHal, + ht_cap_ie, ie[1], + &dot11_ht_cap_ie, false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return false; + } return dot11_ht_cap_ie.supportedChannelWidthSet; } @@ -7800,6 +7799,205 @@ static inline int wlan_hdd_set_udp_resp_offload(hdd_adapter_t *padapter, } #endif +static void hdd_check_and_disconnect_sta_on_invalid_channel( + hdd_context_t *hdd_ctx) +{ + hdd_adapter_t *sta_adapter; + uint8_t sta_chan; + + sta_chan = hdd_get_operating_channel(hdd_ctx, QDF_STA_MODE); + + if (!sta_chan) { + hdd_err("STA not connected"); + return; + } + + hdd_err("STA connected on chan %d", sta_chan); + + if (sme_is_channel_valid(hdd_ctx->hHal, sta_chan)) { + hdd_err("STA connected on chan %d and it is valid", sta_chan); + return; + } + + sta_adapter = hdd_get_adapter(hdd_ctx, QDF_STA_MODE); + + if (!sta_adapter) { + hdd_err("STA adapter does not exist"); + return; + } + + hdd_err("chan %d not valid, issue disconnect", sta_chan); + /* Issue Disconnect request */ + wlan_hdd_disconnect(sta_adapter, eCSR_DISCONNECT_REASON_DEAUTH); +} + +/** + * wlan_hdd_get_wiphy_channel() - Get wiphy channel + * @wiphy: Pointer to wiphy structure + * @freq: Frequency of the channel for which the wiphy hw value is required + * + * Return: wiphy channel for valid frequency else return NULL + */ +static struct ieee80211_channel *wlan_hdd_get_wiphy_channel( + struct wiphy *wiphy, + uint32_t freq) +{ + uint32_t band_num, channel_num; + struct ieee80211_channel *wiphy_channel = NULL; + + for (band_num = 0; band_num < HDD_NUM_NL80211_BANDS; band_num++) { + for (channel_num = 0; channel_num < + wiphy->bands[band_num]->n_channels; + channel_num++) { + wiphy_channel = &(wiphy->bands[band_num]-> + channels[channel_num]); + if (wiphy_channel->center_freq == freq) + return wiphy_channel; + } + } + return wiphy_channel; +} + +int wlan_hdd_restore_channels(hdd_context_t *hdd_ctx) +{ + struct hdd_cache_channels *cache_chann; + struct wiphy *wiphy; + int freq, status, rf_channel; + int i; + struct ieee80211_channel *wiphy_channel = NULL; + + ENTER(); + + if (!hdd_ctx) { + hdd_err("HDD Context is NULL"); + return -EINVAL; + } + + wiphy = hdd_ctx->wiphy; + if (!wiphy) { + hdd_err("Wiphy is NULL"); + return -EINVAL; + } + + qdf_mutex_acquire(&hdd_ctx->cache_channel_lock); + + cache_chann = hdd_ctx->original_channels; + + if (!cache_chann || !cache_chann->num_channels) { + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + hdd_err("channel list is NULL or num channels are zero"); + return -EINVAL; + } + + for (i = 0; i < cache_chann->num_channels; i++) { + status = hdd_wlan_get_freq( + cache_chann->channel_info[i].channel_num, + &freq); + if (status) + continue; + + wiphy_channel = wlan_hdd_get_wiphy_channel(wiphy, freq); + if (!wiphy_channel) + continue; + rf_channel = wiphy_channel->hw_value; + /* + * Restore the orginal states of the channels + * only if we have cached non zero values + */ + if (cache_chann->channel_info[i].reg_status) + cds_set_channel_state(rf_channel, + cache_chann-> + channel_info[i].reg_status); + + if (cache_chann->channel_info[i].wiphy_status && wiphy_channel) + wiphy_channel->flags = + cache_chann->channel_info[i].wiphy_status; + } + + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + + status = sme_update_channel_list(hdd_ctx->hHal); + if (status) + hdd_err("Can't Restore channel list"); + EXIT(); + + return 0; +} + +/** + * wlan_hdd_disable_channels() - Cache the channels + * and current state of the channels from the channel list + * received in the command and disable the channels on the + * wiphy and reg table. + * @hdd_ctx: Pointer to hdd context + * + * Return: 0 on success, Error code on failure + */ +static int wlan_hdd_disable_channels(hdd_context_t *hdd_ctx) +{ + struct hdd_cache_channels *cache_chann; + struct wiphy *wiphy; + int freq, status, rf_channel; + int i; + struct ieee80211_channel *wiphy_channel = NULL; + + ENTER(); + + if (!hdd_ctx) { + hdd_err("HDD Context is NULL"); + return -EINVAL; + } + + wiphy = hdd_ctx->wiphy; + if (!wiphy) { + hdd_err("Wiphy is NULL"); + return -EINVAL; + } + + qdf_mutex_acquire(&hdd_ctx->cache_channel_lock); + cache_chann = hdd_ctx->original_channels; + + if (!cache_chann || !cache_chann->num_channels) { + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + hdd_err("channel list is NULL or num channels are zero"); + return -EINVAL; + } + + for (i = 0; i < cache_chann->num_channels; i++) { + status = hdd_wlan_get_freq( + cache_chann->channel_info[i].channel_num, + &freq); + if (status) + continue; + wiphy_channel = wlan_hdd_get_wiphy_channel(wiphy, freq); + if (!wiphy_channel) + continue; + rf_channel = wiphy_channel->hw_value; + /* + * Cache the current states of + * the channels + */ + cache_chann->channel_info[i].reg_status = + cds_get_channel_state( + rf_channel); + cache_chann->channel_info[i].wiphy_status = + wiphy_channel->flags; + hdd_debug("Disable channel %d reg_stat %d wiphy_stat 0x%x", + cache_chann->channel_info[i].channel_num, + cache_chann->channel_info[i].reg_status, + wiphy_channel->flags); + + cds_set_channel_state(rf_channel, CHANNEL_STATE_DISABLE); + wiphy_channel->flags |= IEEE80211_CHAN_DISABLED; + } + + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + status = sme_update_channel_list(hdd_ctx->hHal); + + EXIT(); + return status; +} + /** * wlan_hdd_cfg80211_start_bss() - start bss * @pHostapdAdapter: Pointer to hostapd adapter @@ -7841,6 +8039,7 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter, enum dfs_mode mode; bool disable_fw_tdls_state = false; uint8_t ignore_cac = 0; + hdd_adapter_t *sta_adapter; ENTER(); @@ -7857,6 +8056,30 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter, } } + /* + * For STA+SAP concurrency support from GUI, first STA connection gets + * triggered and while it is in progress, SAP start also comes up. + * Once STA association is successful, STA connect event is sent to + * kernel which gets queued in kernel workqueue and supplicant won't + * process M1 received from AP and send M2 until this NL80211_CONNECT + * event is received. Workqueue is not scheduled as RTNL lock is already + * taken by hostapd thread which has issued start_bss command to driver. + * Driver cannot complete start_bss as the pending command at the head + * of the SME command pending list is hw_mode_update for STA session + * which cannot be processed as SME is in WAITforKey state for STA + * interface. The start_bss command for SAP interface is queued behind + * the hw_mode_update command and so it cannot be processed until + * hw_mode_update command is processed. This is causing a deadlock so + * disconnect the STA interface first if connection or key exchange is + * in progress and then start SAP interface. + */ + sta_adapter = hdd_get_sta_connection_in_progress(pHddCtx); + if (sta_adapter) { + hdd_debug("Disconnecting STA with session id: %d", + sta_adapter->sessionId); + wlan_hdd_disconnect(sta_adapter, eCSR_DISCONNECT_REASON_DEAUTH); + } + sme_config = qdf_mem_malloc(sizeof(tSmeConfigParams)); if (!sme_config) { hdd_err("failed to allocate memory"); @@ -7881,6 +8104,17 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter, } } + if (pHostapdAdapter->device_mode == QDF_SAP_MODE) { + /* + * Disable the channels received in command + * SET_DISABLE_CHANNEL_LIST + */ + status = wlan_hdd_disable_channels(pHddCtx); + if (!QDF_IS_STATUS_SUCCESS(status)) + hdd_err("Disable channel list fail"); + hdd_check_and_disconnect_sta_on_invalid_channel(pHddCtx); + } + pConfig = &pHostapdAdapter->sessionCtx.ap.sapConfig; pBeacon = pHostapdAdapter->sessionCtx.ap.beacon; @@ -8482,6 +8716,8 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter, error: if (sme_config) qdf_mem_free(sme_config); + if (pHostapdAdapter->device_mode == QDF_SAP_MODE) + wlan_hdd_restore_channels(pHddCtx); /* Revert the indoor to passive marking if START BSS fails */ if (iniConfig->disable_indoor_channel) { hdd_update_indoor_channel(pHddCtx, false); @@ -8563,6 +8799,18 @@ static int __wlan_hdd_cfg80211_stop_ap(struct wiphy *wiphy, if (0 != ret) return ret; + /* + * If a STA connection is in progress in another adapter, disconnect + * the STA and complete the SAP operation. STA will reconnect + * after SAP stop is done. + */ + staAdapter = hdd_get_sta_connection_in_progress(pHddCtx); + if (staAdapter) { + hdd_debug("Disconnecting STA with session id: %d", + staAdapter->sessionId); + wlan_hdd_disconnect(staAdapter, eCSR_DISCONNECT_REASON_DEAUTH); + } + if (pAdapter->device_mode == QDF_SAP_MODE) { wlan_hdd_del_station(pAdapter); hdd_green_ap_stop_bss(pHddCtx); diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.h b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.h index ebc6a7763114cc4ceac59c43ed4211e29570e6f9..6bff97de826e16232adb5156502f855e64000900 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.h +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -137,9 +137,5 @@ bool hdd_is_peer_associated(hdd_adapter_t *adapter, struct qdf_mac_addr *mac_addr); void hdd_sap_indicate_disconnect_for_sta(hdd_adapter_t *adapter); void hdd_sap_destroy_events(hdd_adapter_t *adapter); -void hdd_copy_ht_caps(struct ieee80211_ht_cap *hdd_ht_cap, - tDot11fIEHTCaps *roam_ht_cap); -void hdd_copy_vht_caps(struct ieee80211_vht_cap *hdd_vht_cap, - tDot11fIEVHTCaps *roam_vht_cap); #endif /* end #if !defined(WLAN_HDD_HOSTAPD_H) */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ioctl.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ioctl.c index 9addef2a4aac1b270cc969693d78fc1ac44e7526..12c208b0dfae12a8667c94cfa82499f19a491d9d 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ioctl.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ioctl.c @@ -853,56 +853,17 @@ static int hdd_parse_reassoc_command_v1_data(const uint8_t *pValue, void hdd_wma_send_fastreassoc_cmd(hdd_adapter_t *adapter, const tSirMacAddr bssid, int channel) { - QDF_STATUS status; hdd_wext_state_t *wext_state = WLAN_HDD_GET_WEXT_STATE_PTR(adapter); hdd_station_ctx_t *hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); - tCsrRoamProfile *profile = &wext_state->roamProfile; - tSirMacAddr connected_bssid = {0}; - struct wma_roam_invoke_cmd *fastreassoc; - cds_msg_t msg = {0}; - - fastreassoc = qdf_mem_malloc(sizeof(*fastreassoc)); - if (NULL == fastreassoc) { - hdd_err("qdf_mem_malloc failed for fastreassoc"); - return; - } - if (hdd_sta_ctx) { - qdf_mem_copy(connected_bssid, - hdd_sta_ctx->conn_info.bssId.bytes, ETH_ALEN); - /* if both are same then set the flag */ - if (!qdf_mem_cmp(connected_bssid, bssid, ETH_ALEN)) { - fastreassoc->is_same_bssid = true; - hdd_debug("bssid same, bssid[%pM]", bssid); - } - } - fastreassoc->vdev_id = adapter->sessionId; - fastreassoc->bssid[0] = bssid[0]; - fastreassoc->bssid[1] = bssid[1]; - fastreassoc->bssid[2] = bssid[2]; - fastreassoc->bssid[3] = bssid[3]; - fastreassoc->bssid[4] = bssid[4]; - fastreassoc->bssid[5] = bssid[5]; - - status = sme_get_beacon_frm(WLAN_HDD_GET_HAL_CTX(adapter), profile, - bssid, &fastreassoc->frame_buf, - &fastreassoc->frame_len, - &channel); - fastreassoc->channel = channel; - if (QDF_STATUS_SUCCESS != status) { - hdd_warn("sme_get_beacon_frm failed"); - fastreassoc->frame_buf = NULL; - fastreassoc->frame_len = 0; - } + tCsrRoamProfile *profile = &wext_state->roamProfile; + tSirMacAddr connected_bssid; - msg.type = SIR_HAL_ROAM_INVOKE; - msg.reserved = 0; - msg.bodyptr = fastreassoc; - status = cds_mq_post_message(QDF_MODULE_ID_WMA, &msg); - if (QDF_STATUS_SUCCESS != status) { - hdd_err("Not able to post ROAM_INVOKE_CMD message to WMA"); - qdf_mem_free(fastreassoc); - } + qdf_mem_copy(connected_bssid, hdd_sta_ctx->conn_info.bssId.bytes, + ETH_ALEN); + sme_fast_reassoc(WLAN_HDD_GET_HAL_CTX(adapter), + profile, bssid, channel, adapter->sessionId, + connected_bssid); } #endif @@ -5195,9 +5156,6 @@ static int drv_cmd_get_ibss_peer_info_all(hdd_adapter_t *adapter, } hdd_debug("%s", &extra[numOfBytestoPrint]); } - - /* Free temporary buffer */ - qdf_mem_free(extra); } else { /* Command failed, log error */ hdd_err("GETIBSSPEERINFOALL command failed with status code %d", @@ -6346,7 +6304,7 @@ static int hdd_driver_rxfilter_comand_handler(uint8_t *command, value = command + 13; ret = kstrtou8(value, 10, &type); if (ret < 0) { - hdd_err("kstrtou8 failed invalid input value %d", type); + hdd_err("kstrtou8 failed invalid input value"); return -EINVAL; } @@ -6903,6 +6861,310 @@ static int drv_cmd_set_channel_switch(hdd_adapter_t *adapter, return 0; } +void wlan_hdd_free_cache_channels(hdd_context_t *hdd_ctx) +{ + ENTER(); + + if (!hdd_ctx->original_channels) + return; + + qdf_mutex_acquire(&hdd_ctx->cache_channel_lock); + hdd_ctx->original_channels->num_channels = 0; + qdf_mem_free(hdd_ctx->original_channels->channel_info); + hdd_ctx->original_channels->channel_info = NULL; + qdf_mem_free(hdd_ctx->original_channels); + hdd_ctx->original_channels = NULL; + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + + EXIT(); +} + +/** + * hdd_alloc_chan_cache() - Allocate the memory to cache the channel + * info for the channels received in command SET_DISABLE_CHANNEL_LIST + * @hdd_ctx: Pointer to HDD context + * @num_chan: Number of channels for which memory needs to + * be allocated + * + * Return: 0 on success and error code on failure + */ +static int hdd_alloc_chan_cache(hdd_context_t *hdd_ctx, int num_chan) +{ + hdd_ctx->original_channels = + qdf_mem_malloc(sizeof(struct hdd_cache_channels)); + if (!hdd_ctx->original_channels) { + hdd_err("QDF_MALLOC_ERR"); + return -ENOMEM; + } + hdd_ctx->original_channels->num_channels = num_chan; + hdd_ctx->original_channels->channel_info = + qdf_mem_malloc(num_chan * + sizeof(struct hdd_cache_channel_info)); + if (!hdd_ctx->original_channels->channel_info) { + hdd_err("QDF_MALLOC_ERR"); + hdd_ctx->original_channels->num_channels = 0; + qdf_mem_free(hdd_ctx->original_channels); + hdd_ctx->original_channels = NULL; + return -ENOMEM; + } + return 0; +} + +/** + * hdd_parse_disable_chan_cmd() - Parse the channel list received + * in command. + * @adapter: pointer to hdd adapter + * @ptr: Pointer to the command string + * + * This function parses the channel list received in the command. + * command should be a string having format + * SET_DISABLE_CHANNEL_LIST <num of channels> + * <channels separated by spaces>. + * If the command comes multiple times than this function will compare + * the channels received in the command with the channles cached in the + * first command, if the channel list matches with the cached channles, + * it returns success otherwise returns failure. + * + * Return: 0 on success, Error code on failure + */ + +static int hdd_parse_disable_chan_cmd(hdd_adapter_t *adapter, uint8_t *ptr) +{ + hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter); + uint8_t *param; + int j, i, temp_int, ret = 0, num_channels; + int parsed_channels[MAX_CHANNEL]; + bool is_command_repeated = false; + + if (NULL == hdd_ctx) { + hdd_err("HDD Context is NULL"); + return -EINVAL; + } + + param = strnchr(ptr, strlen(ptr), ' '); + /*no argument after the command*/ + if (NULL == param) + return -EINVAL; + + /*no space after the command*/ + else if (SPACE_ASCII_VALUE != *param) + return -EINVAL; + + param++; + + /*removing empty spaces*/ + while ((SPACE_ASCII_VALUE == *param) && ('\0' != *param)) + param++; + + /*no argument followed by spaces*/ + if ('\0' == *param) + return -EINVAL; + + /*getting the first argument ie the number of channels*/ + if (sscanf(param, "%d ", &temp_int) != 1) { + hdd_err("Cannot get number of channels from input"); + return -EINVAL; + } + + if (temp_int < 0 || temp_int > MAX_CHANNEL) { + hdd_err("Invalid Number of channel received"); + return -EINVAL; + } + + hdd_debug("Number of channel to disable are: %d", temp_int); + + if (!temp_int) { + if (!wlan_hdd_restore_channels(hdd_ctx)) { + /* + * Free the cache channels only when the command is + * received with num channels as 0 + */ + wlan_hdd_free_cache_channels(hdd_ctx); + } + return 0; + } + + qdf_mutex_acquire(&hdd_ctx->cache_channel_lock); + + if (!hdd_ctx->original_channels) { + if (hdd_alloc_chan_cache(hdd_ctx, temp_int)) { + ret = -ENOMEM; + goto mem_alloc_failed; + } + } else if (hdd_ctx->original_channels->num_channels != temp_int) { + hdd_err("Invalid Number of channels"); + ret = -EINVAL; + is_command_repeated = true; + goto parse_failed; + } else { + is_command_repeated = true; + } + num_channels = temp_int; + for (j = 0; j < num_channels; j++) { + /* + * param pointing to the beginning of first space + * after number of channels + */ + param = strpbrk(param, " "); + /*no channel list after the number of channels argument*/ + if (NULL == param) { + hdd_err("Invalid No of channel provided in the list"); + ret = -EINVAL; + goto parse_failed; + } + + param++; + + /*removing empty space*/ + while ((SPACE_ASCII_VALUE == *param) && ('\0' != *param)) + param++; + + if ('\0' == *param) { + hdd_err("No channel is provided in the list"); + ret = -EINVAL; + goto parse_failed; + } + + if (sscanf(param, "%d ", &temp_int) != 1) { + hdd_err("Cannot read channel number"); + ret = -EINVAL; + goto parse_failed; + } + + if (!IS_CHANNEL_VALID(temp_int)) { + hdd_err("Invalid channel number received"); + ret = -EINVAL; + goto parse_failed; + } + + hdd_debug("channel[%d] = %d", j, temp_int); + parsed_channels[j] = temp_int; + } + + /*extra arguments check*/ + param = strpbrk(param, " "); + if (NULL != param) { + while ((SPACE_ASCII_VALUE == *param) && ('\0' != *param)) + param++; + + if ('\0' != *param) { + hdd_err("Invalid argument received"); + ret = -EINVAL; + goto parse_failed; + } + } + + /* + * If command is received first time, cache the channels to + * be disabled else compare the channels received in the + * command with the cached channels, if channel list matches + * return success otherewise return failure. + */ + if (!is_command_repeated) + for (j = 0; j < num_channels; j++) + hdd_ctx->original_channels-> + channel_info[j].channel_num = + parsed_channels[j]; + else { + for (i = 0; i < num_channels; i++) { + for (j = 0; j < num_channels; j++) + if (hdd_ctx->original_channels-> + channel_info[i].channel_num == + parsed_channels[j]) + break; + if (j == num_channels) { + ret = -EINVAL; + goto parse_failed; + } + } + ret = 0; + } +mem_alloc_failed: + + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + EXIT(); + + return ret; + +parse_failed: + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + if (!is_command_repeated) + wlan_hdd_free_cache_channels(hdd_ctx); + EXIT(); + + return ret; +} + +static int drv_cmd_set_disable_chan_list(hdd_adapter_t *adapter, + hdd_context_t *hdd_ctx, + uint8_t *command, + uint8_t command_len, + hdd_priv_data_t *priv_data) +{ + return hdd_parse_disable_chan_cmd(adapter, command); +} + +/** + * hdd_get_disable_ch_list() - get disable channel list + * @hdd_ctx: hdd context + * @buf: buffer to hold disable channel list + * @buf_len: buffer length + * + * Return: length of data copied to buf + */ +static int hdd_get_disable_ch_list(hdd_context_t *hdd_ctx, uint8_t *buf, + uint32_t buf_len) +{ + struct hdd_cache_channel_info *ch_list; + unsigned char i, num_ch; + int len = 0; + + qdf_mutex_acquire(&hdd_ctx->cache_channel_lock); + if (hdd_ctx->original_channels && + hdd_ctx->original_channels->num_channels && + hdd_ctx->original_channels->channel_info) { + num_ch = hdd_ctx->original_channels->num_channels; + + len = scnprintf(buf, buf_len, "%s %hhu", + "GET_DISABLE_CHANNEL_LIST", num_ch); + ch_list = hdd_ctx->original_channels->channel_info; + for (i = 0; (i < num_ch) && len <= buf_len; i++) { + len += scnprintf(buf + len, buf_len - len, + " %d", ch_list[i].channel_num); + } + } + qdf_mutex_release(&hdd_ctx->cache_channel_lock); + + return len; +} + +static int drv_cmd_get_disable_chan_list(hdd_adapter_t *adapter, + hdd_context_t *hdd_ctx, + uint8_t *command, + uint8_t command_len, + hdd_priv_data_t *priv_data) +{ + char extra[512] = {0}; + int max_len, copied_length; + + hdd_debug("Received Command to get disable Channels list"); + + max_len = QDF_MIN(priv_data->total_len, sizeof(extra)); + copied_length = hdd_get_disable_ch_list(hdd_ctx, extra, max_len); + if (copied_length == 0) { + hdd_err("disable channel list is not yet programmed"); + return -EINVAL; + } + + if (copy_to_user(priv_data->buf, &extra, copied_length + 1)) { + hdd_err("failed to copy data to user buffer"); + return -EFAULT; + } + + hdd_debug("data:%s", extra); + return 0; +} + /* * The following table contains all supported WLAN HDD * IOCTL driver commands and the handler for each of them. @@ -7013,6 +7275,8 @@ static const struct hdd_drv_cmd hdd_drv_cmds[] = { {"CHANNEL_SWITCH", drv_cmd_set_channel_switch, true}, {"SETANTENNAMODE", drv_cmd_set_antenna_mode, true}, {"GETANTENNAMODE", drv_cmd_get_antenna_mode, false}, + {"SET_DISABLE_CHANNEL_LIST", drv_cmd_set_disable_chan_list, true}, + {"GET_DISABLE_CHANNEL_LIST", drv_cmd_get_disable_chan_list, false}, {"STOP", drv_cmd_dummy, false}, }; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c index 7efaab3a6c4959d499b7f4e41620b83832acf319..bd75d66edf33f33ffbffe66b6af78711ecfbdd62 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c @@ -2389,6 +2389,13 @@ static int __hdd_stop(struct net_device *dev) /* Make sure the interface is marked as closed */ clear_bit(DEVICE_IFACE_OPENED, &adapter->event_flags); + /* + * Upon wifi turn off, DUT has to flush the scan results so if + * this is the last cli iface, flush the scan database. + */ + if (!hdd_is_cli_iface_up(hdd_ctx)) + sme_scan_flush_result(hdd_ctx->hHal); + /* * Find if any iface is up. If any iface is up then can't put device to * sleep/power save mode @@ -3543,6 +3550,17 @@ int hdd_set_fw_params(hdd_adapter_t *adapter) hdd_err("Failed to set LPRx"); goto error; } + + ret = sme_cli_set_command( + adapter->sessionId, + WMI_PDEV_PARAM_TX_SCH_DELAY, + hdd_ctx->config->enable_tx_sch_delay, + PDEV_CMD); + if (ret) { + hdd_err("Failed to set WMI_PDEV_PARAM_TX_SCH_DELAY"); + goto error; + } + if (adapter->device_mode == QDF_STA_MODE) { sme_set_smps_cfg(adapter->sessionId, HDD_STA_SMPS_PARAM_UPPER_BRSSI_THRESH, @@ -4106,9 +4124,14 @@ QDF_STATUS hdd_stop_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter, hdd_ctx->hHal, adapter->sessionId, eCSR_DISCONNECT_REASON_IBSS_LEAVE); - else if (QDF_STA_MODE == adapter->device_mode) + else if (QDF_STA_MODE == adapter->device_mode) { qdf_ret_status = wlan_hdd_try_disconnect(adapter); + hdd_debug("Send disconnected event to userspace"); + wlan_hdd_cfg80211_indicate_disconnect( + adapter->dev, true, + WLAN_REASON_UNSPECIFIED); + } else qdf_ret_status = sme_roam_disconnect( hdd_ctx->hHal, @@ -5899,6 +5922,7 @@ static void hdd_wlan_exit(hdd_context_t *hdd_ctx) qdf_spinlock_destroy(&hdd_ctx->hdd_adapter_lock); qdf_spinlock_destroy(&hdd_ctx->sta_update_info_lock); qdf_spinlock_destroy(&hdd_ctx->connection_status_lock); + qdf_mutex_destroy(&hdd_ctx->cache_channel_lock); /* * Close CDS @@ -9750,6 +9774,8 @@ int hdd_wlan_stop_modules(hdd_context_t *hdd_ctx, bool ftm_mode) hdd_err("CNSS power down failed put device into Low power mode:%d", ret); } + /* Free the cache channels of the command SET_DISABLE_CHANNEL_LIST */ + wlan_hdd_free_cache_channels(hdd_ctx); /* Once the firmware sequence is completed reset this flag */ hdd_ctx->imps_enabled = false; hdd_ctx->driver_status = DRIVER_MODULES_CLOSED; @@ -9938,6 +9964,11 @@ int hdd_wlan_startup(struct device *dev) if (ret) goto err_hdd_free_context; + + ret = qdf_mutex_create(&hdd_ctx->cache_channel_lock); + if (QDF_IS_STATUS_ERROR(ret)) + goto err_hdd_free_context; + hdd_green_ap_init(hdd_ctx); hdd_init_spectral_scan(hdd_ctx); @@ -10365,8 +10396,6 @@ void hdd_softap_sta_disassoc(hdd_adapter_t *adapter, if (pDelStaParams->peerMacAddr.bytes[0] & 0x1) return; - wlan_hdd_get_peer_rssi(adapter, &pDelStaParams->peerMacAddr, - HDD_WLAN_GET_PEER_RSSI_SOURCE_DRIVER); wlansap_disassoc_sta(WLAN_HDD_GET_SAP_CTX_PTR(adapter), pDelStaParams); } @@ -12272,6 +12301,28 @@ void hdd_pld_ipa_uc_shutdown_pipes(void) hdd_ipa_uc_force_pipe_shutdown(hdd_ctx); } +bool hdd_is_cli_iface_up(hdd_context_t *hdd_ctx) +{ + hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL; + hdd_adapter_t *adapter; + QDF_STATUS status; + + status = hdd_get_front_adapter(hdd_ctx, &adapter_node); + while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) { + adapter = adapter_node->pAdapter; + if ((adapter->device_mode == QDF_STA_MODE || + adapter->device_mode == QDF_P2P_CLIENT_MODE) && + qdf_atomic_test_bit(DEVICE_IFACE_OPENED, + &adapter->event_flags)){ + return true; + } + status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next); + adapter_node = next; + } + + return false; +} + /* Register the module init/exit functions */ module_init(hdd_module_init); module_exit(hdd_module_exit); diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c index 4fad0cd258f3c0f20ac23b117c0af57fec894e90..1edea835029a0d2740409a35108700290124ca17 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c @@ -1453,6 +1453,20 @@ QDF_STATUS hdd_wlan_shutdown(void) hdd_debug("Invoking packetdump deregistration API"); wlan_deregister_txrx_packetdump(); + /* + * After SSR, FW clear its txrx stats. In host, + * as adapter is intact so those counts are still + * available. Now if agains Set stats command comes, + * then host will increment its counts start from its + * last saved value, i.e., count before SSR, and FW will + * increment its count from 0. This will finally sends a + * mismatch of packet counts b/w host and FW to framework + * that will create ambiquity. Therfore, Resetting the host + * counts here so that after SSR both FW and host start + * increment their counts from 0. + */ + hdd_reset_all_adapters_connectivity_stats(pHddCtx); + hdd_reset_all_adapters(pHddCtx); /* Flush cached rx frame queue */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c index d8e264d19e0c247e5d28c9cb70547254dc84e701..4556e4e156e00435fe9a28d9ff1e6b28f57e1e8d 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c @@ -305,6 +305,7 @@ static int hdd_add_scan_event_from_ies(struct hdd_scan_info *scanInfo, tCsrScanResultInfo *scan_result, char *current_event, char *last_event) { + int ret; hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(scanInfo->dev); tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter); tSirBssDescription *descriptor = &scan_result->BssDescriptor; @@ -334,9 +335,13 @@ static int hdd_add_scan_event_from_ies(struct hdd_scan_info *scanInfo, if (ie_length <= 0) return 0; - dot11f_unpack_beacon_i_es((tpAniSirGlobal) + ret = dot11f_unpack_beacon_i_es((tpAniSirGlobal) hHal, (uint8_t *) descriptor->ieFields, ie_length, &dot11BeaconIEs, false); + if (DOT11F_FAILED(ret)) { + hdd_err("unpack failed, ret: 0x%x", ret); + return -EINVAL; + } pDot11SSID = &dot11BeaconIEs.SSID; @@ -2619,7 +2624,7 @@ static int __wlan_hdd_cfg80211_vendor_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request = NULL; struct nlattr *attr; enum nl80211_band band; - uint8_t n_channels = 0, n_ssid = 0; + uint32_t n_channels = 0, n_ssid = 0; uint32_t tmp, count, j; size_t len, ie_len; struct ieee80211_channel *chan; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c index e841c1637946af0ff530c059671ceab25b63cc1e..0eebb2b759f4700ab37138628bcecc7576121b16 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -913,6 +913,10 @@ QDF_STATUS hdd_softap_register_sta(hdd_adapter_t *pAdapter, pAdapter->aStaInfo[staId].tlSTAState = OL_TXRX_PEER_STATE_AUTH; pAdapter->sessionCtx.ap.uIsAuthenticated = true; + if (!qdf_is_macaddr_broadcast(pPeerMacAddress)) + qdf_status = wlan_hdd_send_sta_authorized_event( + pAdapter, pHddCtx, + pPeerMacAddress); } else { hdd_info("ULA auth StaId= %d. Changing TL state to CONNECTED at Join time", @@ -1016,6 +1020,8 @@ QDF_STATUS hdd_softap_stop_bss(hdd_adapter_t *pAdapter) } } } + if (pAdapter->device_mode == QDF_SAP_MODE) + wlan_hdd_restore_channels(pHddCtx); /* Mark the indoor channel (passive) to enable */ if (pHddCtx->config->disable_indoor_channel) { diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c index ce13f51bcf8a61391ef7527e1bb1670f9dc16675..56c5ed5729c09df070dae8e3f8028a6a87c8fe86 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c @@ -469,6 +469,56 @@ static void hdd_get_transmit_sta_id(hdd_adapter_t *adapter, } } +/** + * hdd_clear_tx_rx_connectivity_stats() - clear connectivity stats + * @hdd_ctx: pointer to HDD Station Context + * + * Return: None + */ +static void hdd_clear_tx_rx_connectivity_stats(hdd_adapter_t *adapter) +{ + QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG, + "Clear txrx connectivity stats"); + qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats, + sizeof(adapter->hdd_stats.hdd_arp_stats)); + qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats, + sizeof(adapter->hdd_stats.hdd_dns_stats)); + qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats, + sizeof(adapter->hdd_stats.hdd_tcp_stats)); + qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats, + sizeof(adapter->hdd_stats.hdd_icmpv4_stats)); + adapter->pkt_type_bitmap = 0; + adapter->track_arp_ip = 0; + qdf_mem_zero(adapter->dns_payload, adapter->track_dns_domain_len); + adapter->track_dns_domain_len = 0; + adapter->track_src_port = 0; + adapter->track_dest_port = 0; + adapter->track_dest_ipv4 = 0; +} + +void hdd_reset_all_adapters_connectivity_stats(hdd_context_t *hdd_ctx) +{ + hdd_adapter_list_node_t *adapterNode = NULL, *pNext = NULL; + QDF_STATUS status; + hdd_adapter_t *adapter; + + ENTER(); + + status = hdd_get_front_adapter(hdd_ctx, &adapterNode); + + while (NULL != adapterNode && QDF_STATUS_SUCCESS == status) { + adapter = adapterNode->pAdapter; + hdd_clear_tx_rx_connectivity_stats(adapter); + + status = hdd_get_next_adapter(hdd_ctx, adapterNode, &pNext); + adapterNode = pNext; + } + + EXIT(); + +} + + /** * hdd_tx_rx_is_dns_domain_name_match() - function to check whether dns * domain name in the received skb matches with the tracking dns domain diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c index 50632e921505f889c9d2430bc33ea97827e6e356..824c9da2c7d57e4ce9c9740498e059caaf44f392 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c @@ -3343,7 +3343,7 @@ int hdd_wlan_get_frag_threshold(hdd_adapter_t *pAdapter, * @channel: channel to be converted * @pfreq: where to store the frequency * - * Return: 1 on success, otherwise a negative errno + * Return: 0 on success, otherwise a negative errno */ int hdd_wlan_get_freq(uint32_t channel, uint32_t *pfreq) { @@ -3353,7 +3353,7 @@ int hdd_wlan_get_freq(uint32_t channel, uint32_t *pfreq) for (i = 0; i < FREQ_CHAN_MAP_TABLE_SIZE; i++) { if (channel == freq_chan_map[i].chan) { *pfreq = freq_chan_map[i].freq; - return 1; + return 0; } } } @@ -3913,9 +3913,8 @@ static void hdd_get_peer_rssi_cb(struct sir_peer_info_resp *sta_rssi, { struct statsContext *get_rssi_context; struct sir_peer_info *rssi_info; - uint8_t peer_num, i; + uint8_t peer_num; hdd_adapter_t *padapter; - hdd_station_info_t *stainfo; if ((sta_rssi == NULL) || (context == NULL)) { hdd_err("Bad param, sta_rssi [%pK] context [%pK]", @@ -3959,19 +3958,6 @@ static void hdd_get_peer_rssi_cb(struct sir_peer_info_resp *sta_rssi, peer_num * sizeof(*rssi_info)); padapter->peer_sta_info.sta_num = peer_num; - for (i = 0; i < peer_num; i++) { - stainfo = hdd_get_stainfo(padapter->cache_sta_info, - rssi_info[i].peer_macaddr); - if (stainfo) { - stainfo->rssi = rssi_info[i].rssi; - stainfo->tx_rate = rssi_info[i].tx_rate; - stainfo->rx_rate = rssi_info[i].rx_rate; - hdd_info("rssi:%d tx_rate:%u rx_rate:%u %pM", - stainfo->rssi, stainfo->tx_rate, - stainfo->rx_rate, stainfo->macAddrSTA.bytes); - } - } - /* notify the caller */ complete(&get_rssi_context->completion); @@ -4855,7 +4841,7 @@ static int __iw_get_freq(struct net_device *dev, struct iw_request_info *info, return -EIO; } status = hdd_wlan_get_freq(channel, &freq); - if (true == status) { + if (0 == status) { /* Set Exponent parameter as 6 (MHZ) * in struct iw_freq iwlist & iwconfig * command shows frequency into proper diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h index 3f015517152e7f87b9526594616e10df88510088..42f71ca4593e141508afa92b7e1ce841cdd949a7 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h @@ -1021,6 +1021,10 @@ typedef struct sAniSirGlobal { /* 11k Offload Support */ bool is_11k_offload_supported; + + uint32_t peer_rssi; + uint32_t peer_txrate; + uint32_t peer_rxrate; } tAniSirGlobal; typedef enum { diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h index e685a895ffa0f85ff2acee8ff2c0bc4678d9c682..09a17e8d08ef53d2ac6eccb1cde97ef6140ae84b 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h @@ -509,6 +509,7 @@ typedef struct sPESession /* Added to Support BT-AMP */ bool ch_switch_in_progress; /* previous auth frame's sequence number */ uint16_t prev_auth_seq_num; + int8_t def_max_tx_pwr; } tPESession, *tpPESession; /*------------------------------------------------------------------------- diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c index d3ca0c144a0def07a8fceb2db9a33e33cac88126..b20e27fc1e289d5bd7c59e7177f53094a18a49b8 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1062,8 +1062,9 @@ bool lim_process_fils_auth_frame2(tpAniSirGlobal mac_ctx, tpPESession pe_session, tSirMacAuthFrameBody *rx_auth_frm_body) { - bool pmkid_found = false; int i; + uint32_t ret; + bool pmkid_found = false; tDot11fIERSN dot11f_ie_rsn = {0}; if (rx_auth_frm_body->authAlgoNumber != eSIR_FILS_SK_WITHOUT_PFS) @@ -1072,10 +1073,14 @@ bool lim_process_fils_auth_frame2(tpAniSirGlobal mac_ctx, if (!pe_session->fils_info) return false; - dot11f_unpack_ie_rsn(mac_ctx, - &rx_auth_frm_body->rsn_ie.info[0], - rx_auth_frm_body->rsn_ie.length, - &dot11f_ie_rsn, 0); + ret = dot11f_unpack_ie_rsn(mac_ctx, + &rx_auth_frm_body->rsn_ie.info[0], + rx_auth_frm_body->rsn_ie.length, + &dot11f_ie_rsn, 0); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return false; + } for (i = 0; i < dot11f_ie_rsn.pmkid_count; i++) { if (qdf_mem_cmp(dot11f_ie_rsn.pmkid[i], diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c index 77e541e97da867e728b2834c32a0f1217b03dd6b..500c184cefd15028fcaecfa090e2cb62beefb6a7 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c @@ -1898,6 +1898,7 @@ __lim_process_sme_join_req(tpAniSirGlobal mac_ctx, uint32_t *msg_buf) session->maxTxPower = lim_get_max_tx_power(reg_max, local_power_constraint, mac_ctx->roam.configParam.nTxPowerCap); + session->def_max_tx_pwr = session->maxTxPower; pe_debug("Reg max %d local power con %d max tx pwr %d", reg_max, local_power_constraint, session->maxTxPower); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c index dd6f4a30129e6504c8201546200bc01a0eea8ff5..3dcfbc7e9154e17ee2382100e067f4c9f6c33558 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -1607,6 +1607,7 @@ lim_send_assoc_req_mgmt_frame(tpAniSirGlobal mac_ctx, tLimMlmAssocReq *mlm_assoc_req, tpPESession pe_session) { + int ret; tDot11fAssocRequest *frm; uint16_t caps; uint8_t *frame; @@ -1917,9 +1918,14 @@ lim_send_assoc_req_mgmt_frame(tpAniSirGlobal mac_ctx, * before packing the frm structure. In this way, the IE ordering * which the latest 802.11 spec mandates is maintained. */ - if (add_ie_len) - dot11f_unpack_assoc_request(mac_ctx, add_ie, + if (add_ie_len) { + ret = dot11f_unpack_assoc_request(mac_ctx, add_ie, add_ie_len, frm, true); + if (DOT11F_FAILED(ret)) { + pe_err("unpack failed, ret: 0x%x", ret); + goto end; + } + } status = dot11f_get_packed_assoc_request_size(mac_ctx, frm, &payload); if (DOT11F_FAILED(status)) { diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c index ef022d79d815fe2bd1275e59f2cd10947bfa441d..c1116be4c1adf4dcf0006a378d86733e1b5748e6 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -232,6 +232,7 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx, tpSirRSNie rsn_ie, tpPESession session) { + uint32_t ret; uint8_t wpa_idx = 0; uint32_t privacy, val; @@ -284,15 +285,26 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx, } else if ((rsn_ie->length == rsn_ie->rsnIEdata[1] + 2) && (rsn_ie->rsnIEdata[0] == SIR_MAC_RSN_EID)) { pe_debug("Only RSN IE is present"); - dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2], - (uint8_t) rsn_ie->length, - &session->gStartBssRSNIe, false); + ret = dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2], + rsn_ie->rsnIEdata[1], + &session->gStartBssRSNIe, false); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return false; + } + return true; + } else if ((rsn_ie->length == rsn_ie->rsnIEdata[1] + 2) && (rsn_ie->rsnIEdata[0] == SIR_MAC_WPA_EID)) { pe_debug("Only WPA IE is present"); - dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[6], - (uint8_t) rsn_ie->length - 4, - &session->gStartBssWPAIe, false); + ret = dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[6], + (uint8_t) rsn_ie->length - 4, + &session->gStartBssWPAIe, false); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return false; + } + return true; } /* Check validity of WPA IE */ if (wpa_idx + 6 >= SIR_MAC_MAX_IE_LENGTH) @@ -310,12 +322,21 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx, return false; } else { /* Both RSN and WPA IEs are present */ - dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2], - (uint8_t) rsn_ie->length, - &session->gStartBssRSNIe, false); - dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[wpa_idx + 6], - rsn_ie->rsnIEdata[wpa_idx + 1] - 4, - &session->gStartBssWPAIe, false); + ret = dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2], + rsn_ie->rsnIEdata[1], + &session->gStartBssRSNIe, false); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return false; + } + ret = dot11f_unpack_ie_wpa(mac_ctx, + &rsn_ie->rsnIEdata[wpa_idx + 6], + rsn_ie->rsnIEdata[wpa_idx + 1] - 4, + &session->gStartBssWPAIe, false); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return false; + } } return true; } diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c index 691c92ff7f33fab3587eca53ef4c2803e5ece56a..f809a5b257beaed78ba2a550a1d618247455c096 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -268,7 +268,7 @@ rrm_process_link_measurement_request(tpAniSirGlobal pMac, } pHdr = WMA_GET_RX_MAC_HEADER(pRxPacketInfo); - LinkReport.txPower = lim_get_max_tx_power(pSessionEntry->maxTxPower, + LinkReport.txPower = lim_get_max_tx_power(pSessionEntry->def_max_tx_pwr, pLinkReq->MaxTxPower.maxTxPower, pMac->roam.configParam. nTxPowerCap); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c index ef678b5e6fd244fe0d770847f82ed1fbd886babc..bbaffe76223a55a00cc2f2ee495fd6b9b8208dfb 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c @@ -5822,17 +5822,25 @@ tSirRetStatus populate_dot11f_assoc_res_wsc_ie(tpAniSirGlobal pMac, tDot11fIEWscAssocRes *pDot11f, tpSirAssocReq pRcvdAssocReq) { - tDot11fIEWscAssocReq parsedWscAssocReq = { 0, }; + uint32_t ret; uint8_t *wscIe; + tDot11fIEWscAssocReq parsedWscAssocReq = { 0, }; - wscIe = - limGetWscIEPtr(pMac, pRcvdAssocReq->addIE.addIEdata, + wscIe = limGetWscIEPtr(pMac, pRcvdAssocReq->addIE.addIEdata, pRcvdAssocReq->addIE.length); if (wscIe != NULL) { /* retreive WSC IE from given AssocReq */ - dot11f_unpack_ie_wsc_assoc_req(pMac, wscIe + 2 + 4, /* EID, length, OUI */ - wscIe[1] - 4, /* length without OUI */ - &parsedWscAssocReq, false); + ret = dot11f_unpack_ie_wsc_assoc_req(pMac, + /* EID, length, OUI */ + wscIe + 2 + 4, + /* length without OUI */ + wscIe[1] - 4, + &parsedWscAssocReq, false); + if (!DOT11F_SUCCEEDED(ret)) { + pe_err("unpack failed, ret: %d", ret); + return eSIR_HAL_INPUT_INVALID; + } + pDot11f->present = 1; /* version has to be 0x10 */ pDot11f->Version.present = 1; diff --git a/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h b/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h index 5880c9c8425a704c92f4efaa3d3ca11fd2c17836..30ae31d94a221293d0277033908954c23ee50976 100644 --- a/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h +++ b/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h @@ -145,7 +145,6 @@ typedef enum { */ eSAP_STA_DISASSOC_EVENT, - eSAP_STA_LOSTLINK_DETECTED, /* Event sent when user called wlansap_set_key_sta */ eSAP_STA_SET_KEY_EVENT, /* Event sent whenever there is MIC failure detected */ @@ -303,6 +302,9 @@ typedef struct sap_StationDisassocCompleteEvent_s { uint32_t statusCode; uint32_t reason_code; eSapDisassocReason reason; + int rssi; + int tx_rate; + int rx_rate; } tSap_StationDisassocCompleteEvent; typedef struct sap_StationSetKeyCompleteEvent_s { diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c index ceda978f7434e013e8f63da5133d439d7f6dd8d8..b940349e3ccd55392d1f7fc41a8fc7f2b2d35e66 100644 --- a/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c +++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c @@ -1075,11 +1075,7 @@ wlansap_roam_callback(void *ctx, tCsrRoamInfo *csr_roam_info, uint32_t roamId, eSAP_UPDATE_SCAN_RESULT, (void *) eSAP_STATUS_SUCCESS); break; - case eCSR_ROAM_LOSTLINK_DETECTED: - sap_signal_hdd_event(sap_ctx, csr_roam_info, - eSAP_STA_LOSTLINK_DETECTED, - (void *)eSAP_STATUS_SUCCESS); - break; + default: break; } diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c index f70a178deeaecb288b1468e7f052639ad622af27..b0a8eab4f8a8fa883cb46a7d313136730ea12736 100644 --- a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c +++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c @@ -2986,22 +2986,6 @@ QDF_STATUS sap_signal_hdd_event(ptSapContext sap_ctx, break; - case eSAP_STA_LOSTLINK_DETECTED: - if (!csr_roaminfo) { - QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR, - FL("Invalid CSR Roam Info")); - return QDF_STATUS_E_INVAL; - } - sap_ap_event.sapHddEventCode = eSAP_STA_LOSTLINK_DETECTED; - disassoc_comp = - &sap_ap_event.sapevt.sapStationDisassocCompleteEvent; - - qdf_copy_macaddr(&disassoc_comp->staMac, - &csr_roaminfo->peerMac); - disassoc_comp->reason_code = csr_roaminfo->reasonCode; - - break; - case eSAP_STA_DISASSOC_EVENT: if (!csr_roaminfo) { @@ -3023,6 +3007,10 @@ QDF_STATUS sap_signal_hdd_event(ptSapContext sap_ctx, disassoc_comp->statusCode = csr_roaminfo->statusCode; disassoc_comp->status = (eSapStatus) context; + disassoc_comp->rssi = csr_roaminfo->rssi; + disassoc_comp->rx_rate = csr_roaminfo->rx_rate; + disassoc_comp->tx_rate = csr_roaminfo->tx_rate; + disassoc_comp->reason_code = csr_roaminfo->disassoc_reason; break; case eSAP_STA_SET_KEY_EVENT: diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h index 0ff0d3bdc669c1841464db22b7488a39fdd98caf..1136fd9c655cab2441e209627cf8a449d9f7c3b3 100644 --- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h +++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h @@ -356,9 +356,8 @@ typedef struct tagCsrEseCckmInfo { #endif } tCsrEseCckmInfo; -#define CSR_DOT11F_IE_RSN_MAX_LEN (114) typedef struct tagCsrEseCckmIe { - uint8_t cckmIe[CSR_DOT11F_IE_RSN_MAX_LEN]; + uint8_t cckmIe[DOT11F_IE_RSN_MAX_LEN]; uint8_t cckmIeLen; } tCsrEseCckmIe; #endif /* FEATURE_WLAN_ESE */ @@ -1439,6 +1438,7 @@ typedef struct tagCsrRoamInfo { tSirResultCodes statusCode; /* this'd be our own defined or sent from otherBSS(per 802.11spec) */ uint32_t reasonCode; + uint8_t disassoc_reason; uint8_t staId; /* Peer stationId when connected */ /* * The DPU signatures will be sent eventually to TL to help it @@ -1547,6 +1547,9 @@ typedef struct tagCsrRoamInfo { #ifdef WLAN_FEATURE_FILS_SK struct fils_join_rsp_params *fils_join_rsp; #endif + int rssi; + int tx_rate; + int rx_rate; } tCsrRoamInfo; typedef struct tagCsrFreqScanInfo { diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h index 735366326f952ce73b0f9818dc336c4d1af9d036..2e665c8b25ec917063acab2f5c8e1422361860b7 100644 --- a/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h +++ b/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h @@ -1758,6 +1758,17 @@ QDF_STATUS sme_ipa_uc_stat_request(tHalHandle hal, QDF_STATUS sme_set_smps_cfg(uint32_t vdev_id, uint32_t param_id, uint32_t param_val); + +/** + * sme_get_peer_stats() - sme api to post peer info request + * @mac: mac handle + * @req: peer info request struct send to wma + * + * Return: QDF_STATUS_SUCCESS or non-zero on failure + */ +QDF_STATUS sme_get_peer_stats(tpAniSirGlobal mac, + struct sir_peer_info_req req); + /** * sme_get_peer_info() - sme api to get peer info * @hal: hal handle for getting global mac struct @@ -1929,4 +1940,31 @@ void sme_display_disconnect_stats(tHalHandle hal, uint8_t session_id); */ QDF_STATUS sme_set_vc_mode_config(uint32_t vc_bitmap); +/** + * sme_is_sta_key_exchange_in_progress() - checks whether the STA/P2P client + * session has key exchange in progress + * + * @hal: global hal handle + * @session_id: session id + * + * Return: true - if key exchange in progress + * false - if not in progress + */ +bool sme_is_sta_key_exchange_in_progress(tHalHandle hal, uint8_t session_id); + +/** + * sme_fast_reassoc() - invokes FAST REASSOC command + * @hal: handle returned by mac_open + * @profile: current connected profile + * @bssid: bssid to look for in scan cache + * @channel: channel on which reassoc should be send + * @vdev_id: vdev id + * @connected_bssid: bssid of currently connected profile + * + * Return: QDF_STATUS + */ +QDF_STATUS sme_fast_reassoc(tHalHandle hal, tCsrRoamProfile *profile, + const tSirMacAddr bssid, int channel, + uint8_t vdev_id, const tSirMacAddr connected_bssid); + #endif /* #if !defined( __SME_API_H ) */ diff --git a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c index 37eb6b6cc65d7cee676c60b5fa32c97804ca2b16..6d7489b32f9bc232d28ff5ca07d081a5a3f60630 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c @@ -2460,6 +2460,9 @@ QDF_STATUS sme_process_msg(tHalHandle hHal, cds_msg_t *pMsg) { QDF_STATUS status = QDF_STATUS_E_FAILURE; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); + struct sir_peer_info *peer_stats; + struct sir_peer_info_resp *peer_info_rsp; + #ifdef WLAN_FEATURE_ROAM_OFFLOAD tSmeCmd *sme_cmd = NULL; #endif @@ -2782,6 +2785,17 @@ QDF_STATUS sme_process_msg(tHalHandle hHal, cds_msg_t *pMsg) if (pMac->sme.pget_peer_info_ind_cb) pMac->sme.pget_peer_info_ind_cb(pMsg->bodyptr, pMac->sme.pget_peer_info_cb_context); + if (pMsg->bodyptr) { + peer_info_rsp = (struct sir_peer_info_resp *) + (pMsg->bodyptr); + peer_stats = (struct sir_peer_info *) + (peer_info_rsp->info); + if (peer_stats) { + pMac->peer_rssi = peer_stats[0].rssi; + pMac->peer_txrate = peer_stats[0].tx_rate; + pMac->peer_rxrate = peer_stats[0].rx_rate; + } + } qdf_mem_free(pMsg->bodyptr); break; case eWNI_SME_GET_PEER_INFO_EXT_IND: @@ -10895,6 +10909,38 @@ QDF_STATUS sme_get_link_speed(tHalHandle hHal, tSirLinkSpeedInfo *lsReq, return status; } +QDF_STATUS sme_get_peer_stats(tpAniSirGlobal mac, struct sir_peer_info_req req) +{ + QDF_STATUS qdf_status; + cds_msg_t message; + + qdf_status = sme_acquire_global_lock(&mac->sme); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + sme_debug("Failed to get Lock"); + return qdf_status; + } + /* serialize the req through MC thread */ + message.bodyptr = qdf_mem_malloc(sizeof(req)); + if (NULL == message.bodyptr) { + QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, + "%s: Memory allocation failed.", __func__); + sme_release_global_lock(&mac->sme); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(message.bodyptr, &req, sizeof(req)); + message.type = WMA_GET_PEER_INFO; + message.reserved = 0; + qdf_status = cds_mq_post_message(QDF_MODULE_ID_WMA, &message); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, + "%s: Post get peer info msg fail", __func__); + qdf_mem_free(message.bodyptr); + qdf_status = QDF_STATUS_E_FAILURE; + } + sme_release_global_lock(&mac->sme); + return qdf_status; +} + QDF_STATUS sme_get_peer_info(tHalHandle hal, struct sir_peer_info_req req, void *context, void (*callbackfn)(struct sir_peer_info_resp *param, @@ -18758,3 +18804,82 @@ QDF_STATUS sme_destroy_config(tHalHandle hal) return status; } + +bool sme_is_sta_key_exchange_in_progress(tHalHandle hal, uint8_t session_id) +{ + tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal); + + if (!CSR_IS_SESSION_VALID(mac_ctx, session_id)) { + sme_err("Invalid session id: %d", session_id); + return false; + } + + return CSR_IS_WAIT_FOR_KEY(mac_ctx, session_id); +} + +QDF_STATUS sme_fast_reassoc(tHalHandle hal, tCsrRoamProfile *profile, + const tSirMacAddr bssid, int channel, + uint8_t vdev_id, const tSirMacAddr connected_bssid) +{ + QDF_STATUS status; + struct wma_roam_invoke_cmd *fastreassoc; + cds_msg_t msg = {0}; + tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal); + + fastreassoc = qdf_mem_malloc(sizeof(*fastreassoc)); + if (NULL == fastreassoc) { + sme_err("qdf_mem_malloc failed for fastreassoc"); + return QDF_STATUS_E_NOMEM; + } + /* if both are same then set the flag */ + if (!qdf_mem_cmp(connected_bssid, bssid, ETH_ALEN)) { + fastreassoc->is_same_bssid = true; + sme_debug("bssid same, bssid[%pM]", bssid); + } + fastreassoc->vdev_id = vdev_id; + fastreassoc->bssid[0] = bssid[0]; + fastreassoc->bssid[1] = bssid[1]; + fastreassoc->bssid[2] = bssid[2]; + fastreassoc->bssid[3] = bssid[3]; + fastreassoc->bssid[4] = bssid[4]; + fastreassoc->bssid[5] = bssid[5]; + + status = sme_get_beacon_frm(hal, profile, bssid, + &fastreassoc->frame_buf, + &fastreassoc->frame_len, + &channel); + + if (!channel) { + sme_err("channel retrieval from BSS desc fails!"); + qdf_mem_free(fastreassoc); + return QDF_STATUS_E_FAULT; + } + + fastreassoc->channel = channel; + if (QDF_STATUS_SUCCESS != status) { + sme_warn("sme_get_beacon_frm failed"); + fastreassoc->frame_buf = NULL; + fastreassoc->frame_len = 0; + } + + if (csr_is_auth_type_ese(mac_ctx->roam.roamSession[vdev_id]. + connectedProfile.AuthType)) { + sme_err("Beacon is not required for ESE"); + if (fastreassoc->frame_len) { + qdf_mem_free(fastreassoc->frame_buf); + fastreassoc->frame_buf = NULL; + fastreassoc->frame_len = 0; + } + } + + msg.type = SIR_HAL_ROAM_INVOKE; + msg.reserved = 0; + msg.bodyptr = fastreassoc; + status = cds_mq_post_message(QDF_MODULE_ID_WMA, &msg); + if (QDF_STATUS_SUCCESS != status) { + sme_err("Not able to post ROAM_INVOKE_CMD message to WMA"); + qdf_mem_free(fastreassoc); + } + + return status; +} diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c index fd67bd7b3dbf9d8f07f178c864018649a12edf66..84ecd885102c6fb4088b30a7c03a808e6c1f4ec9 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c @@ -3788,11 +3788,6 @@ QDF_STATUS csr_roam_call_callback(tpAniSirGlobal pMac, uint32_t sessionId, pSession->connectedProfile.operationChannel = pRoamInfo->channelChangeRespEvent->newChannelNumber; - if (eCSR_ROAM_RESULT_LOSTLINK == u2 || - eCSR_ROAM_LOSTLINK_DETECTED == u1) { - sme_debug("eCSR_ROAM_RESULT_LOSTLINK "); - } - if (NULL != pSession->callback) { if (pRoamInfo) { pRoamInfo->sessionId = (uint8_t) sessionId; @@ -6769,6 +6764,10 @@ static void csr_roam_process_results_default(tpAniSirGlobal mac_ctx, break; case eCsrForcedDisassocSta: case eCsrForcedDeauthSta: + roam_info.rssi = mac_ctx->peer_rssi; + roam_info.tx_rate = mac_ctx->peer_txrate; + roam_info.rx_rate = mac_ctx->peer_rxrate; + csr_roam_state_change(mac_ctx, eCSR_ROAMING_STATE_JOINED, session_id); session = CSR_GET_SESSION(mac_ctx, session_id); @@ -6780,6 +6779,9 @@ static void csr_roam_process_results_default(tpAniSirGlobal mac_ctx, cmd->u.roamCmd.peerMac, sizeof(tSirMacAddr)); roam_info.reasonCode = eCSR_ROAM_RESULT_FORCED; + /* Update the MAC reason code */ + roam_info.disassoc_reason = cmd->u.roamCmd.reason; + roam_info.statusCode = eSIR_SME_SUCCESS; status = csr_roam_call_callback(mac_ctx, session_id, &roam_info, cmd->u.roamCmd.roamId, @@ -11351,6 +11353,11 @@ csr_roam_send_disconnect_done_indication(tpAniSirGlobal mac_ctx, tSirSmeRsp roam_info.statusCode = eSIR_SME_STA_NOT_ASSOCIATED; qdf_mem_copy(roam_info.peerMac.bytes, discon_ind->peer_mac, ETH_ALEN); + roam_info.rssi = mac_ctx->peer_rssi; + roam_info.tx_rate = mac_ctx->peer_txrate; + roam_info.rx_rate = mac_ctx->peer_rxrate; + roam_info.disassoc_reason = discon_ind->reason_code; + csr_roam_call_callback(mac_ctx, discon_ind->session_id, &roam_info, 0, eCSR_ROAM_LOSTLINK, eCSR_ROAM_RESULT_DISASSOC_IND); @@ -12688,12 +12695,16 @@ QDF_STATUS csr_roam_lost_link(tpAniSirGlobal pMac, uint32_t sessionId, sme_debug("RC: %d", roamInfo.reasonCode); - if (type == eWNI_SME_DISASSOC_IND || type == eWNI_SME_DEAUTH_IND) - csr_roam_call_callback(pMac, sessionId, &roamInfo, 0, - eCSR_ROAM_LOSTLINK_DETECTED, result); - else - csr_roam_call_callback(pMac, sessionId, NULL, 0, - eCSR_ROAM_LOSTLINK_DETECTED, result); + if (type == eWNI_SME_DISASSOC_IND || type == eWNI_SME_DEAUTH_IND) { + struct sir_peer_info_req req; + + req.sessionid = sessionId; + req.peer_macaddr = roamInfo.peerMac; + sme_get_peer_stats(pMac, req); + } + + csr_roam_call_callback(pMac, sessionId, NULL, 0, + eCSR_ROAM_LOSTLINK_DETECTED, result); if (eWNI_SME_DISASSOC_IND == type) status = csr_send_mb_disassoc_cnf_msg(pMac, pDisassocIndMsg); @@ -19283,6 +19294,7 @@ static void csr_update_score_params(tpAniSirGlobal mac_ctx, * @mac_ctx: MAC context * @session: Pointer to the CSR Roam Session * @req_buffer: Pointer to the RSO Request buffer + * @enabled: 11k offload enabled/disabled. * * API to update 11k offload params to Roam Scan Offload request buffer * @@ -19290,7 +19302,8 @@ static void csr_update_score_params(tpAniSirGlobal mac_ctx, */ static void csr_update_11k_offload_params(tpAniSirGlobal mac_ctx, tCsrRoamSession *session, - tSirRoamOffloadScanReq *req_buffer) + tSirRoamOffloadScanReq *req_buffer, + bool enabled) { struct wmi_11k_offload_params *params = &req_buffer->offload_11k_params; tCsrConfig *csr_config = &mac_ctx->roam.configParam; @@ -19298,7 +19311,15 @@ static void csr_update_11k_offload_params(tpAniSirGlobal mac_ctx, &csr_config->neighbor_report_offload; params->vdev_id = session->sessionId; - params->offload_11k_bitmask = csr_config->offload_11k_enable_bitmask; + + if (enabled) { + params->offload_11k_bitmask = + csr_config->offload_11k_enable_bitmask; + } else { + params->offload_11k_bitmask = 0; + sme_debug("11k offload disabled in RSO"); + return; + } /* * If none of the parameters are enabled, then set the @@ -19310,6 +19331,7 @@ static void csr_update_11k_offload_params(tpAniSirGlobal mac_ctx, sme_err("No valid neighbor report offload params %x", neighbor_report_offload->params_bitmask); params->offload_11k_bitmask = 0; + return; } /* @@ -19636,11 +19658,19 @@ csr_roam_offload_scan(tpAniSirGlobal mac_ctx, uint8_t session_id, csr_update_driver_assoc_ies(mac_ctx, session, req_buf); csr_update_fils_params_rso(mac_ctx, session, req_buf); csr_update_score_params(mac_ctx, req_buf); - if (reason == REASON_CTX_INIT) - csr_update_11k_offload_params(mac_ctx, session, - req_buf); } + /* + * 11k offload is enabled during RSO Start after connect indication and + * 11k offload is disabled during RSO Stop after disconnect indication + */ + if (command == ROAM_SCAN_OFFLOAD_START && + reason == REASON_CTX_INIT) + csr_update_11k_offload_params(mac_ctx, session, req_buf, TRUE); + else if (command == ROAM_SCAN_OFFLOAD_STOP && + reason == REASON_DISCONNECTED) + csr_update_11k_offload_params(mac_ctx, session, req_buf, FALSE); + QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG, "Assoc IE buffer:"); QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG, diff --git a/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c b/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c index 4a833e62065fd78d3ea60396cab4244c78c3c22b..6a6cd7c6e4841d88df0029ce27cde8b3e3883e8a 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -4111,7 +4111,6 @@ static QDF_STATUS sme_qos_del_ts_req(tpAniSirGlobal pMac, struct sme_qos_acinfo *pACInfo; tSirDeltsReq *pMsg; sme_QosWmmTspecInfo *pTspecInfo; - QDF_STATUS status = QDF_STATUS_E_FAILURE; #ifdef FEATURE_WLAN_DIAG_SUPPORT WLAN_HOST_DIAG_EVENT_DEF(qos, host_event_wlan_qos_payload_type); @@ -4181,23 +4180,23 @@ static QDF_STATUS sme_qos_del_ts_req(tpAniSirGlobal pMac, pTspecInfo->ts_info.up, pTspecInfo->ts_info.tid); qdf_mem_zero(&pACInfo->curr_QoSInfo[tspec_mask - 1], sizeof(sme_QosWmmTspecInfo)); - if (QDF_IS_STATUS_SUCCESS(cds_send_mb_message_to_mac(pMsg))) { - status = QDF_STATUS_SUCCESS; - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG, - "%s: %d: sme_qos_del_ts_req:Test: sent down a DELTS req to PE", - __func__, __LINE__); - /* event: EVENT_WLAN_QOS */ -#ifdef FEATURE_WLAN_DIAG_SUPPORT - qos.eventId = SME_QOS_DIAG_DELTS; - qos.reasonCode = SME_QOS_DIAG_USER_REQUESTED; - WLAN_HOST_DIAG_EVENT_REPORT(&qos, EVENT_WLAN_QOS); -#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + + if (!QDF_IS_STATUS_SUCCESS(cds_send_mb_message_to_mac(pMsg))) { + sme_err("DELTS req to PE failed"); + return QDF_STATUS_E_FAILURE; } - sme_set_tspec_uapsd_mask_per_session(pMac, - &pMsg->req.tspec.tsinfo, - sessionId); - return status; + sme_debug("sent down a DELTS req to PE"); +#ifdef FEATURE_WLAN_DIAG_SUPPORT + qos.eventId = SME_QOS_DIAG_DELTS; + qos.reasonCode = SME_QOS_DIAG_USER_REQUESTED; + WLAN_HOST_DIAG_EVENT_REPORT(&qos, EVENT_WLAN_QOS); +#endif + + sme_set_tspec_uapsd_mask_per_session(pMac, &pMsg->req.tspec.tsinfo, + sessionId); + + return QDF_STATUS_SUCCESS; } /* @@ -4939,8 +4938,12 @@ static QDF_STATUS sme_qos_process_handoff_assoc_req_ev(tpAniSirGlobal pMac, if (csr_roam_is11r_assoc(pMac, sessionId)) pSession->ftHandoffInProgress = true; #endif - /* If FT handoff is in progress, legacy handoff need not be enabled */ - if (!pSession->ftHandoffInProgress) + /* If FT handoff/ESE in progress, legacy handoff need not be enabled */ + if (!pSession->ftHandoffInProgress +#ifdef FEATURE_WLAN_ESE + && !csr_roam_is_ese_assoc(pMac, sessionId) +#endif + ) pSession->handoffRequested = true; /* this session no longer needs UAPSD */ diff --git a/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c b/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c index cf3d84a527edf43332e9be1056990fc3a324ce55..e75902a139703d82e574057406874d980bc17a1d 100644 --- a/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c +++ b/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -1480,7 +1480,7 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length) char parseArgsString[DBGLOG_PARSE_ARGS_STRING_LENGTH]; char *dbgidString; - while (count < length) { + while ((count + 1) < length) { debugid = DBGLOG_GET_DBGID(buffer[count + 1]); moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]); @@ -1493,6 +1493,9 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length) OS_MEMZERO(parseArgsString, sizeof(parseArgsString)); totalWriteLen = 0; + if (!numargs || (count + numargs + 2 > length)) + goto skip_args_processing; + for (curArgs = 0; curArgs < numargs; curArgs++) { /* * Using sprintf_s instead of sprintf, @@ -1505,7 +1508,7 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length) buffer[count + 2 + curArgs]); totalWriteLen += writeLen; } - +skip_args_processing: if (debugid < MAX_DBG_MSGS) { dbgidString = DBG_MSG_ARR[moduleid][debugid]; if (dbgidString != NULL) { @@ -1997,6 +2000,11 @@ int dbglog_parse_debug_logs(ol_scn_t scn, uint8_t *data, uint32_t datalen) len = param_buf->num_bufp; } + if (len < sizeof(dropped)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid length\n")); + return A_ERROR; + } + dropped = *((A_UINT32 *) datap); if (dropped > 0) { AR_DEBUG_PRINTF(ATH_DEBUG_TRC, diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h b/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h index 7ffc9582dbd272daf98f611a0c94e68d2bfa57dd..e02af09e463df0db86067682f263c14ac6d90c36 100644 --- a/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h +++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -142,7 +142,7 @@ int pktlog_disable(struct hif_opaque_softc *scn); int pktlogmod_init(void *context); void pktlogmod_exit(void *context); int pktlog_htc_attach(void); -void pktlog_process_fw_msg(uint32_t *msg_word); +void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len); #define ol_pktlog_attach(_scn) \ do { \ @@ -192,7 +192,7 @@ static inline int pktlog_htc_attach(void) { return 0; } -static inline void pktlog_process_fw_msg(uint32_t *msg_word) +static inline void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len) { } #endif /* REMOVE_PKT_LOG */ #endif /* _PKTLOG_AC_H_ */ diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c index f4fe6e10a849748867ec9d44d7e8f9865efb29cc..7f9ab37be9bab196983b62d35c95c58d7f14e9fa 100644 --- a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c +++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -709,18 +709,21 @@ int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff) * * Return: None */ -void pktlog_process_fw_msg(uint32_t *buff) +void pktlog_process_fw_msg(uint32_t *buff, uint32_t len) { uint32_t *pl_hdr; uint32_t log_type; struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX); + struct ol_fw_data pl_fw_data; if (!txrx_pdev) { qdf_print("%s: txrx_pdev is NULL", __func__); return; } - pl_hdr = buff; + pl_fw_data.data = pl_hdr; + pl_fw_data.len = len; + log_type = (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; @@ -730,19 +733,19 @@ void pktlog_process_fw_msg(uint32_t *buff) || (log_type == PKTLOG_TYPE_TX_FRM_HDR) || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR)) wdi_event_handler(WDI_EVENT_TX_STATUS, - txrx_pdev, pl_hdr); + txrx_pdev, &pl_fw_data); else if (log_type == PKTLOG_TYPE_RC_FIND) wdi_event_handler(WDI_EVENT_RATE_FIND, - txrx_pdev, pl_hdr); + txrx_pdev, &pl_fw_data); else if (log_type == PKTLOG_TYPE_RC_UPDATE) wdi_event_handler(WDI_EVENT_RATE_UPDATE, - txrx_pdev, pl_hdr); + txrx_pdev, &pl_fw_data); else if (log_type == PKTLOG_TYPE_RX_STAT) wdi_event_handler(WDI_EVENT_RX_DESC, - txrx_pdev, pl_hdr); + txrx_pdev, &pl_fw_data); else if (log_type == PKTLOG_TYPE_SW_EVENT) wdi_event_handler(WDI_EVENT_SW_EVENT, - txrx_pdev, pl_hdr); + txrx_pdev, &pl_fw_data); } @@ -759,6 +762,7 @@ static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt) struct ol_pktlog_dev_t *pdev = (struct ol_pktlog_dev_t *)context; qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; uint32_t *msg_word; + uint32_t msg_len; /* check for successful message reception */ if (pkt->Status != QDF_STATUS_SUCCESS) { @@ -772,7 +776,8 @@ static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt) qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0); msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg); - pktlog_process_fw_msg(msg_word); + msg_len = qdf_nbuf_len(pktlog_t2h_msg); + pktlog_process_fw_msg(msg_word, msg_len); qdf_nbuf_free(pktlog_t2h_msg); } diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c index 565729daa43b911f64b40ad52a7112b1c2bb79ad..7f89fb98fb50ef54541ebf0c106ddebe1bfac5ef 100644 --- a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c +++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c @@ -363,6 +363,8 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) struct ath_pktlog_hdr pl_hdr; struct ath_pktlog_info *pl_info; uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; if (!txrx_pdev) { printk("Invalid pdev in %s\n", __func__); @@ -372,7 +374,19 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) qdf_assert(data); pl_dev = txrx_pdev->pl_dev; - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } /* * Makes the short words (16 bits) portable b/w little endian * and big endian @@ -397,6 +411,11 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); pl_info = pl_dev->pl_info; + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) { size_t log_size = sizeof(frm_hdr) + pl_hdr.size; void *txdesc_hdr_ctl = (void *) @@ -406,7 +425,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) qdf_mem_copy(txdesc_hdr_ctl, &frm_hdr, sizeof(frm_hdr)); qdf_mem_copy((char *)txdesc_hdr_ctl + sizeof(frm_hdr), - ((void *)data + + ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); pl_hdr.size = log_size; @@ -423,7 +442,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) log_size, &pl_hdr); qdf_assert(txstat_log.ds_status); qdf_mem_copy(txstat_log.ds_status, - ((void *)data + sizeof(struct ath_pktlog_hdr)), + ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, txstat_log.ds_status); @@ -442,6 +461,8 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) struct ath_pktlog_hdr pl_hdr; struct ath_pktlog_info *pl_info; uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; if (!txrx_pdev) { qdf_print("Invalid pdev in %s\n", __func__); @@ -451,7 +472,19 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) qdf_assert(data); pl_dev = txrx_pdev->pl_dev; - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } /* * Makes the short words (16 bits) portable b/w little endian * and big endian @@ -473,12 +506,12 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) if (pl_hdr.log_type == PKTLOG_TYPE_TX_FRM_HDR) { /* Valid only for the TX CTL */ - process_ieee_hdr(data + sizeof(pl_hdr)); + process_ieee_hdr(fw_data->data + sizeof(pl_hdr)); } if (pl_hdr.log_type == PKTLOG_TYPE_TX_VIRT_ADDR) { A_UINT32 desc_id = (A_UINT32) - *((A_UINT32 *) (data + sizeof(pl_hdr))); + *((A_UINT32 *) (fw_data->data + sizeof(pl_hdr))); A_UINT32 vdev_id = desc_id; /* if the pkt log msg is for the bcn frame the vdev id @@ -530,8 +563,13 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) pl_hdr.size = (pl_hdr.size > sizeof(txctl_log.priv.txdesc_ctl)) ? sizeof(txctl_log.priv.txdesc_ctl) : pl_hdr.size; + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } qdf_mem_copy((void *)&txctl_log.priv.txdesc_ctl, - ((void *)data + sizeof(struct ath_pktlog_hdr)), + ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); qdf_assert(txctl_log.txdesc_hdr_ctl); qdf_mem_copy(txctl_log.txdesc_hdr_ctl, &txctl_log.priv, @@ -550,7 +588,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); qdf_assert(txstat_log.ds_status); qdf_mem_copy(txstat_log.ds_status, - ((void *)data + sizeof(struct ath_pktlog_hdr)), + ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, txstat_log.ds_status); @@ -564,12 +602,12 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data) log_size = sizeof(pl_msdu_info.priv); if (pl_dev->mt_pktlog_enabled == false) - fill_ieee80211_hdr_data(txrx_pdev, &pl_msdu_info, data); + fill_ieee80211_hdr_data(txrx_pdev, &pl_msdu_info, fw_data->data); pl_msdu_info.ath_msdu_info = pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); qdf_mem_copy((void *)&pl_msdu_info.priv.msdu_id_info, - ((void *)data + sizeof(struct ath_pktlog_hdr)), + ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)), sizeof(pl_msdu_info.priv.msdu_id_info)); qdf_mem_copy(pl_msdu_info.ath_msdu_info, &pl_msdu_info.priv, sizeof(pl_msdu_info.priv)); @@ -650,6 +688,8 @@ A_STATUS process_rx_info(void *pdev, void *data) struct ath_pktlog_hdr pl_hdr; size_t log_size; uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; if (!pdev) { printk("Invalid pdev in %s", __func__); @@ -657,7 +697,20 @@ A_STATUS process_rx_info(void *pdev, void *data) } pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev; pl_info = pl_dev->pl_info; - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & ATH_PKTLOG_HDR_FLAGS_MASK) >> ATH_PKTLOG_HDR_FLAGS_SHIFT; @@ -685,12 +738,17 @@ A_STATUS process_rx_info(void *pdev, void *data) pl_hdr.type_specific_data = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); #endif + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + log_size = pl_hdr.size; rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); qdf_mem_copy(rxstat_log.rx_desc, - (void *)data + sizeof(struct ath_pktlog_hdr), pl_hdr.size); + (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), pl_hdr.size); cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); return A_OK; @@ -702,6 +760,8 @@ A_STATUS process_rate_find(void *pdev, void *data) struct ath_pktlog_hdr pl_hdr; struct ath_pktlog_info *pl_info; size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; /* * Will be uncommented when the rate control find @@ -720,7 +780,19 @@ A_STATUS process_rate_find(void *pdev, void *data) return A_ERROR; } - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } /* * Makes the short words (16 bits) portable b/w little endian * and big endian @@ -758,8 +830,12 @@ A_STATUS process_rate_find(void *pdev, void *data) rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } qdf_mem_copy(rcf_log.rcFind, - ((char *)data + sizeof(struct ath_pktlog_hdr)), + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind); @@ -772,6 +848,8 @@ A_STATUS process_sw_event(void *pdev, void *data) struct ath_pktlog_hdr pl_hdr; struct ath_pktlog_info *pl_info; size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; /* * Will be uncommented when the rate control find @@ -790,7 +868,19 @@ A_STATUS process_sw_event(void *pdev, void *data) return A_ERROR; } - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } /* * Makes the short words (16 bits) portable b/w little endian * and big endian @@ -829,8 +919,12 @@ A_STATUS process_sw_event(void *pdev, void *data) sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } qdf_mem_copy(sw_event.sw_event, - ((char *)data + sizeof(struct ath_pktlog_hdr)), + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); return A_OK; @@ -844,6 +938,8 @@ A_STATUS process_rate_update(void *pdev, void *data) struct ath_pktlog_info *pl_info; struct ath_pktlog_rc_update rcu_log; uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; if (!pdev) { printk("Invalid pdev in %s\n", __func__); @@ -853,7 +949,19 @@ A_STATUS process_rate_update(void *pdev, void *data) printk("Invalid data in %s\n", __func__); return A_ERROR; } - pl_tgt_hdr = (uint32_t *) data; + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + pl_tgt_hdr = (uint32_t *) fw_data->data; + if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } /* * Makes the short words (16 bits) portable b/w little endian * and big endian @@ -896,8 +1004,12 @@ A_STATUS process_rate_update(void *pdev, void *data) */ rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } qdf_mem_copy(rcu_log.txRateCtrl, - ((char *)data + sizeof(struct ath_pktlog_hdr)), + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), pl_hdr.size); cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl); return A_OK; diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c index 78a8d4181b8f88331f6fcaa70d4632320a7626e3..f8cdc65244793b3647669ae5baf14722ebbbe991 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c @@ -1011,7 +1011,8 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info, if ((resp_event->vdev_id < wma->max_bssid) && (qdf_atomic_read( &wma->interfaces[resp_event->vdev_id].vdev_restart_params.hidden_ssid_restart_in_progress)) - && (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == true)) { + && (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == true) + && (req_msg->msg_type == WMA_HIDDEN_SSID_VDEV_RESTART)) { tpHalHiddenSsidVdevRestart hidden_ssid_restart = (tpHalHiddenSsidVdevRestart)req_msg->user_data; WMA_LOGE("%s: vdev restart event recevied for hidden ssid set using IOCTL", @@ -2679,13 +2680,13 @@ int wma_vdev_delete_handler(void *handle, uint8_t *cmd_param_info, event->vdev_id); return -EINVAL; } + qdf_mc_timer_stop(&req_msg->event_timeout); + qdf_mc_timer_destroy(&req_msg->event_timeout); wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock); /* Send response to upper layers */ wma_vdev_detach_callback(req_msg->user_data); - qdf_mc_timer_stop(&req_msg->event_timeout); - qdf_mc_timer_destroy(&req_msg->event_timeout); qdf_mem_free(req_msg); return status; @@ -4750,9 +4751,9 @@ static void wma_del_tdls_sta(tp_wma_handle wma, tpDeleteStaParams del_sta) WMA_DELETE_STA_TIMEOUT); if (!msg) { WMA_LOGE(FL("Failed to allocate vdev_id %d"), - peerStateParams->vdevId); + del_sta->smesessionId); wma_remove_req(wma, - peerStateParams->vdevId, + del_sta->smesessionId, WMA_DELETE_STA_RSP_START); del_sta->status = QDF_STATUS_E_NOMEM; goto send_del_rsp; diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c index c7b87254af1f04f47656326959db551659ad5a84..da3df9629c1410a9cbd5324802ad43ec4de7f5f5 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c @@ -1878,7 +1878,11 @@ QDF_STATUS wma_send_offload_11k_params(WMA_HANDLE handle, return QDF_STATUS_E_NOSUPPORT; } - if (!params->neighbor_report_params.ssid.length) { + /* + * If 11k enable command and ssid length is 0, drop it + */ + if (params->offload_11k_bitmask && + !params->neighbor_report_params.ssid.length) { WMA_LOGD("%s: SSID Len 0", __func__); return QDF_STATUS_E_INVAL; } @@ -2018,11 +2022,14 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle, break; } + /* + * Send 11k offload enable to FW as part of RSO Start + */ if (roam_req->reason == REASON_CTX_INIT) { qdf_status = wma_send_offload_11k_params(wma_handle, &roam_req->offload_11k_params); if (qdf_status != QDF_STATUS_SUCCESS) { - WMA_LOGE("11k offload params not sent, status %d", + WMA_LOGE("11k offload enable not sent, status %d", qdf_status); break; } @@ -2057,6 +2064,20 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle, WMA_LOGD("Dont send RSO stop during roam sync"); break; } + + /* + * Send 11k offload disable command to FW as part of RSO Stop + */ + if (roam_req->reason == REASON_DISCONNECTED) { + qdf_status = wma_send_offload_11k_params(wma_handle, + &roam_req->offload_11k_params); + if (qdf_status != QDF_STATUS_SUCCESS) { + WMA_LOGE("11k offload disable not sent, status %d", + qdf_status); + break; + } + } + wma_handle->suitable_ap_hb_failure = false; if (wma_handle->roam_offload_enabled) { uint32_t mode; @@ -2436,7 +2457,16 @@ static int wma_fill_roam_synch_buffer(tp_wma_handle wma, fils_info = (wmi_roam_fils_synch_tlv_param *) (param_buf->roam_fils_synch_info); - if (param_buf->roam_fils_synch_info) { + if (fils_info) { + if ((fils_info->kek_len > SIR_KEK_KEY_LEN_FILS) || + (fils_info->pmk_len > SIR_PMK_LEN)) { + WMA_LOGE("%s: Invalid kek_len %d or pmk_len %d", + __func__, + fils_info->kek_len, + fils_info->pmk_len); + return -EINVAL; + } + roam_synch_ind_ptr->kek_len = fils_info->kek_len; qdf_mem_copy(roam_synch_ind_ptr->kek, fils_info->kek, fils_info->kek_len); @@ -2605,6 +2635,12 @@ int wma_roam_synch_event_handler(void *handle, uint8_t *event, goto cleanup_label; } + if (synch_event->vdev_id >= wma->max_bssid) { + WMA_LOGE("%s: received invalid vdev_id %d", + __func__, synch_event->vdev_id); + return status; + } + if (synch_event->bcn_probe_rsp_len > param_buf->num_bcn_probe_rsp_frame || synch_event->reassoc_req_len > @@ -2617,11 +2653,6 @@ int wma_roam_synch_event_handler(void *handle, uint8_t *event, synch_event->reassoc_rsp_len); goto cleanup_label; } - if (synch_event->vdev_id >= wma->max_bssid) { - WMA_LOGE("%s: received invalid vdev_id %d", - __func__, synch_event->vdev_id); - goto cleanup_label; - } wma_peer_debug_log(synch_event->vdev_id, DEBUG_ROAM_SYNCH_IND, DEBUG_INVALID_PEER_ID, NULL, NULL, @@ -4877,12 +4908,12 @@ int wma_extscan_change_results_event_handler(void *handle, tSirWifiSignificantChange *dest_ap; wmi_extscan_wlan_change_result_bssid *src_chglist; - int numap; + uint32_t numap; int i, k; uint8_t *src_rssi; int count = 0; int moredata; - int rssi_num = 0; + uint32_t rssi_num = 0; tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE); uint32_t buf_len; bool excess_data = false; @@ -4914,8 +4945,17 @@ int wma_extscan_change_results_event_handler(void *handle, WMA_LOGE("%s: Invalid num of entries in page: %d", __func__, numap); return -EINVAL; } - for (i = 0; i < numap; i++) + for (i = 0; i < numap; i++) { + if (src_chglist->num_rssi_samples > (UINT_MAX - rssi_num)) { + WMA_LOGE("%s: Invalid num of rssi samples %d numap %d rssi_num %d", + __func__, src_chglist->num_rssi_samples, + numap, rssi_num); + return -EINVAL; + } rssi_num += src_chglist->num_rssi_samples; + src_chglist++; + } + src_chglist = param_buf->bssid_signal_descriptor_list; if (event->first_entry_index + event->num_entries_in_page < event->total_entries) { @@ -5010,6 +5050,8 @@ int wma_passpoint_match_event_handler(void *handle, struct wifi_passpoint_match *dest_match; tSirWifiScanResult *dest_ap; uint8_t *buf_ptr; + uint32_t buf_len = 0; + bool excess_data = false; tpAniSirGlobal mac = cds_get_context(QDF_MODULE_ID_PE); if (!mac) { @@ -5029,13 +5071,26 @@ int wma_passpoint_match_event_handler(void *handle, event = param_buf->fixed_param; buf_ptr = (uint8_t *)param_buf->fixed_param; - /* - * All the below lengths are UINT32 and summing up and checking - * against a constant should not be an issue. - */ - if ((sizeof(*event) + event->ie_length + event->anqp_length) > - WMI_SVC_MSG_MAX_SIZE || - (event->ie_length + event->anqp_length) > param_buf->num_bufp) { + do { + if (event->ie_length > (WMI_SVC_MSG_MAX_SIZE)) { + excess_data = true; + break; + } else { + buf_len = event->ie_length; + } + + if (event->anqp_length > (WMI_SVC_MSG_MAX_SIZE)) { + excess_data = true; + break; + } else { + buf_len += event->anqp_length; + } + + } while (0); + + if (excess_data || buf_len > (WMI_SVC_MSG_MAX_SIZE - sizeof(*event)) || + buf_len > (WMI_SVC_MSG_MAX_SIZE - sizeof(*dest_match)) || + (event->ie_length + event->anqp_length) > param_buf->num_bufp) { WMA_LOGE("IE Length: %u or ANQP Length: %u is huge, num_bufp: %u", event->ie_length, event->anqp_length, param_buf->num_bufp); @@ -5048,8 +5103,8 @@ int wma_passpoint_match_event_handler(void *handle, event->ssid.ssid_len = SIR_MAC_MAX_SSID_LENGTH; } - dest_match = qdf_mem_malloc(sizeof(*dest_match) + - event->ie_length + event->anqp_length); + dest_match = qdf_mem_malloc(sizeof(*dest_match) + buf_len); + if (!dest_match) { WMA_LOGE("%s: qdf_mem_malloc failed", __func__); return -EINVAL; diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c index 5eadeeae76d0ed6b87794dbc149e9eb7cd6e3471..001849e9b1c8f71dd227a7341138d9ef29eb03e4 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c @@ -3574,19 +3574,25 @@ int wma_unified_debug_print_event_handler(void *handle, uint8_t *datap, uint32_t datalen; param_buf = (WMI_DEBUG_PRINT_EVENTID_param_tlvs *) datap; - if (!param_buf) { + if (!param_buf || !param_buf->data) { WMA_LOGE("Get NULL point message from FW"); return -ENOMEM; } data = param_buf->data; datalen = param_buf->num_data; + if (datalen > WMI_SVC_MSG_MAX_SIZE) { + WMA_LOGE("Received data len %d exceeds max value %d", + datalen, WMI_SVC_MSG_MAX_SIZE); + return QDF_STATUS_E_FAILURE; + } + data[datalen - 1] = '\0'; #ifdef BIG_ENDIAN_HOST { - if (datalen > BIG_ENDIAN_MAX_DEBUG_BUF) { + if (datalen >= BIG_ENDIAN_MAX_DEBUG_BUF) { WMA_LOGE("%s Invalid data len %d, limiting to max", __func__, datalen); - datalen = BIG_ENDIAN_MAX_DEBUG_BUF; + datalen = BIG_ENDIAN_MAX_DEBUG_BUF - 1; } char dbgbuf[BIG_ENDIAN_MAX_DEBUG_BUF] = { 0 }; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b1298f093f13f5d7e09590bbc3e1be6f889e9c95..824c0830a9de0a4dec6d4666d4405a7ab625ae63 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -609,15 +609,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { + struct usb_interface_assoc_descriptor *d; + + d = (struct usb_interface_assoc_descriptor *)header; + if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { + dev_warn(ddev, + "config %d has an invalid interface association descriptor of length %d, skipping\n", + cfgno, d->bLength); + continue; + } + if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { - config->intf_assoc[iad_num] = - (struct usb_interface_assoc_descriptor - *)header; + config->intf_assoc[iad_num] = d; iad_num++; } @@ -918,10 +926,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) for (i = 0; i < num; i++) { buffer += length; cap = (struct usb_dev_cap_header *)buffer; - length = cap->bLength; - if (total_len < length) + if (total_len < sizeof(*cap) || total_len < cap->bLength) { + dev->bos->desc->bNumDeviceCaps = i; break; + } + length = cap->bLength; total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 8e641b5893edb7a6195c34e43248540fbff39aa6..70ce58fd2cb0f642fd3fd435ceaad00c01e10286 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -12,6 +12,7 @@ #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> +#include <linux/usb/cdc.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <asm/byteorder.h> @@ -2027,3 +2028,159 @@ int usb_driver_set_configuration(struct usb_device *udev, int config) return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration); + +/** + * cdc_parse_cdc_header - parse the extra headers present in CDC devices + * @hdr: the place to put the results of the parsing + * @intf: the interface for which parsing is requested + * @buffer: pointer to the extra headers to be parsed + * @buflen: length of the extra headers + * + * This evaluates the extra headers present in CDC devices which + * bind the interfaces for data and control and provide details + * about the capabilities of the device. + * + * Return: number of descriptors parsed or -EINVAL + * if the header is contradictory beyond salvage + */ + +int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, + struct usb_interface *intf, + u8 *buffer, + int buflen) +{ + /* duplicates are ignored */ + struct usb_cdc_union_desc *union_header = NULL; + + /* duplicates are not tolerated */ + struct usb_cdc_header_desc *header = NULL; + struct usb_cdc_ether_desc *ether = NULL; + struct usb_cdc_mdlm_detail_desc *detail = NULL; + struct usb_cdc_mdlm_desc *desc = NULL; + + unsigned int elength; + int cnt = 0; + + memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); + hdr->phonet_magic_present = false; + while (buflen > 0) { + elength = buffer[0]; + if (!elength) { + dev_err(&intf->dev, "skipping garbage byte\n"); + elength = 1; + goto next_desc; + } + if ((buflen < elength) || (elength < 3)) { + dev_err(&intf->dev, "invalid descriptor buffer length\n"); + break; + } + if (buffer[1] != USB_DT_CS_INTERFACE) { + dev_err(&intf->dev, "skipping garbage\n"); + goto next_desc; + } + + switch (buffer[2]) { + case USB_CDC_UNION_TYPE: /* we've found it */ + if (elength < sizeof(struct usb_cdc_union_desc)) + goto next_desc; + if (union_header) { + dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); + goto next_desc; + } + union_header = (struct usb_cdc_union_desc *)buffer; + break; + case USB_CDC_COUNTRY_TYPE: + if (elength < sizeof(struct usb_cdc_country_functional_desc)) + goto next_desc; + hdr->usb_cdc_country_functional_desc = + (struct usb_cdc_country_functional_desc *)buffer; + break; + case USB_CDC_HEADER_TYPE: + if (elength != sizeof(struct usb_cdc_header_desc)) + goto next_desc; + if (header) + return -EINVAL; + header = (struct usb_cdc_header_desc *)buffer; + break; + case USB_CDC_ACM_TYPE: + if (elength < sizeof(struct usb_cdc_acm_descriptor)) + goto next_desc; + hdr->usb_cdc_acm_descriptor = + (struct usb_cdc_acm_descriptor *)buffer; + break; + case USB_CDC_ETHERNET_TYPE: + if (elength != sizeof(struct usb_cdc_ether_desc)) + goto next_desc; + if (ether) + return -EINVAL; + ether = (struct usb_cdc_ether_desc *)buffer; + break; + case USB_CDC_CALL_MANAGEMENT_TYPE: + if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) + goto next_desc; + hdr->usb_cdc_call_mgmt_descriptor = + (struct usb_cdc_call_mgmt_descriptor *)buffer; + break; + case USB_CDC_DMM_TYPE: + if (elength < sizeof(struct usb_cdc_dmm_desc)) + goto next_desc; + hdr->usb_cdc_dmm_desc = + (struct usb_cdc_dmm_desc *)buffer; + break; + case USB_CDC_MDLM_TYPE: + if (elength < sizeof(struct usb_cdc_mdlm_desc *)) + goto next_desc; + if (desc) + return -EINVAL; + desc = (struct usb_cdc_mdlm_desc *)buffer; + break; + case USB_CDC_MDLM_DETAIL_TYPE: + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) + goto next_desc; + if (detail) + return -EINVAL; + detail = (struct usb_cdc_mdlm_detail_desc *)buffer; + break; + case USB_CDC_NCM_TYPE: + if (elength < sizeof(struct usb_cdc_ncm_desc)) + goto next_desc; + hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; + break; + case USB_CDC_MBIM_TYPE: + if (elength < sizeof(struct usb_cdc_mbim_desc)) + goto next_desc; + + hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; + break; + case USB_CDC_MBIM_EXTENDED_TYPE: + if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) + break; + hdr->usb_cdc_mbim_extended_desc = + (struct usb_cdc_mbim_extended_desc *)buffer; + break; + case CDC_PHONET_MAGIC_NUMBER: + hdr->phonet_magic_present = true; + break; + default: + /* + * there are LOTS more CDC descriptors that + * could legitimately be found here. + */ + dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", + buffer[2], elength); + goto next_desc; + } + cnt++; +next_desc: + buflen -= elength; + buffer += elength; + } + hdr->usb_cdc_union_desc = union_header; + hdr->usb_cdc_header_desc = header; + hdr->usb_cdc_mdlm_detail_desc = detail; + hdr->usb_cdc_mdlm_desc = desc; + hdr->usb_cdc_ether_desc = ether; + return cnt; +} + +EXPORT_SYMBOL(cdc_parse_cdc_header); diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index b4483f8b83980213f4823041f3ce9ac6ecacdb46..82ff7054a67203f451a7822be20138173e7a2245 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3450,6 +3450,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) if (on) { dev_dbg(mdwc->dev, "%s: turn on host\n", __func__); + pm_runtime_get_sync(mdwc->dev); mdwc->hs_phy->flags |= PHY_HOST_MODE; if (dwc->maximum_speed == USB_SPEED_SUPER) { mdwc->ss_phy->flags |= PHY_HOST_MODE; @@ -3458,7 +3459,6 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) } usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH); - pm_runtime_get_sync(mdwc->dev); dbg_event(0xFF, "StrtHost gync", atomic_read(&mdwc->dev->power.usage_count)); if (!IS_ERR(mdwc->vbus_reg)) diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 7950f25136a59b26210d24961614a880baec7144..fc1979e09a032f2239dc1f3bf4d770d5628906f7 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -879,6 +879,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; + /* + * If instance is not created which is the case in power off charging + * mode, dev will be NULL. Hence return error if it is the case. + */ + if (!dev) + return -ENODEV; /* * printk(KERN_INFO "acc_ctrlrequest " * "%02x.%02x v%04x i%04x l%u\n", diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index c001dcda62fd904d45470fe8d318d423f60fa3d5..792d3575558764ef006b5933abccc589080fabb8 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -49,6 +49,7 @@ static struct gsi_inst_status { /* Deregister misc device and free instance structures */ static void gsi_inst_clean(struct gsi_opts *opts); +static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port); static void ipa_disconnect_handler(struct gsi_data_port *d_port); static int gsi_ctrl_send_notification(struct f_gsi *gsi); static int gsi_alloc_trb_buffer(struct f_gsi *gsi); @@ -501,14 +502,11 @@ static void ipa_disconnect_handler(struct gsi_data_port *d_port) */ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, GSI_EP_OP_SET_CLR_BLOCK_DBL); - gsi->in_ep_desc_backup = gsi->d_port.in_ep->desc; usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE); } - if (gsi->d_port.out_ep) { - gsi->out_ep_desc_backup = gsi->d_port.out_ep->desc; + if (gsi->d_port.out_ep) usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE); - } gsi->d_port.net_ready_trigger = false; } @@ -614,6 +612,7 @@ static void ipa_work_handler(struct work_struct *w) struct usb_gadget *gadget = gsi->gadget; struct device *dev; struct device *gad_dev; + bool block_db; event = read_event(d_port); @@ -676,28 +675,6 @@ static void ipa_work_handler(struct work_struct *w) break; } - /* - * Update desc and reconfigure USB GSI OUT and IN - * endpoint for RNDIS Adaptor enable case. - */ - if (d_port->out_ep && !d_port->out_ep->desc && - gsi->out_ep_desc_backup) { - d_port->out_ep->desc = gsi->out_ep_desc_backup; - d_port->out_ep->ep_intr_num = 1; - log_event_dbg("%s: OUT ep_op_config", __func__); - usb_gsi_ep_op(d_port->out_ep, - &d_port->out_request, GSI_EP_OP_CONFIG); - } - - if (d_port->in_ep && !d_port->in_ep->desc && - gsi->in_ep_desc_backup) { - d_port->in_ep->desc = gsi->in_ep_desc_backup; - d_port->in_ep->ep_intr_num = 2; - log_event_dbg("%s: IN ep_op_config", __func__); - usb_gsi_ep_op(d_port->in_ep, - &d_port->in_request, GSI_EP_OP_CONFIG); - } - ipa_connect_channels(d_port); ipa_data_path_enable(d_port); d_port->sm_state = STATE_CONNECTED; @@ -759,7 +736,15 @@ static void ipa_work_handler(struct work_struct *w) if (event == EVT_HOST_NRDY) { log_event_dbg("%s: ST_CON_HOST_NRDY\n", __func__); - ipa_disconnect_handler(d_port); + block_db = true; + /* stop USB ringing doorbell to GSI(OUT_EP) */ + usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, + GSI_EP_OP_SET_CLR_BLOCK_DBL); + gsi_rndis_ipa_reset_trigger(d_port); + usb_gsi_ep_op(d_port->in_ep, NULL, + GSI_EP_OP_ENDXFER); + usb_gsi_ep_op(d_port->out_ep, NULL, + GSI_EP_OP_ENDXFER); } ipa_disconnect_work_handler(d_port); @@ -1467,6 +1452,27 @@ static void gsi_rndis_open(struct f_gsi *rndis) rndis_signal_connect(rndis->params); } +static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port) +{ + struct f_gsi *rndis = d_port_to_gsi(d_port); + unsigned long flags; + + if (!rndis) { + log_event_err("%s: gsi prot ctx is %pK", __func__, rndis); + return; + } + + spin_lock_irqsave(&rndis->d_port.lock, flags); + if (!rndis) { + log_event_err("%s: No RNDIS instance", __func__); + spin_unlock_irqrestore(&rndis->d_port.lock, flags); + return; + } + + rndis->d_port.net_ready_trigger = false; + spin_unlock_irqrestore(&rndis->d_port.lock, flags); +} + void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param) { struct f_gsi *rndis = param->v; diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 3806e7014199d13c892f32e8a03c1b37e286bddb..2938153fe7b1dc86c8140ca48327e40efb63d670 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -189,6 +189,7 @@ static int usb_console_setup(struct console *co, char *options) tty_kref_put(tty); reset_open_count: port->port.count = 0; + info->port = NULL; usb_autopm_put_interface(serial->interface); error_get_interface: usb_serial_put(serial); diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index f58caa9e6a27e6e1a7161a66e5e9a97698cc1e1a..a155cd02bce240db83742f0299ab8ea12a56e7a8 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf) intf->desc.bInterfaceProtocol == USB_PR_UAS); } -static int uas_find_uas_alt_setting(struct usb_interface *intf) +static struct usb_host_interface *uas_find_uas_alt_setting( + struct usb_interface *intf) { int i; @@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf) struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) - return alt->desc.bAlternateSetting; + return alt; } - return -ENODEV; + return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, @@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); unsigned long flags = id->driver_info; - int r, alt; - + struct usb_host_interface *alt; + int r; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) + if (!alt) return 0; - r = uas_find_endpoints(&intf->altsetting[alt], eps); + r = uas_find_endpoints(alt, eps); if (r < 0) return 0; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index e26e32169a364e2903edf49829b9262c8ba877d6..f952635ebe5f4316780472a76854b61ec8f7d081 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -849,14 +849,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids); static int uas_switch_interface(struct usb_device *udev, struct usb_interface *intf) { - int alt; + struct usb_host_interface *alt; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) - return alt; + if (!alt) + return -ENODEV; - return usb_set_interface(udev, - intf->altsetting[0].desc.bInterfaceNumber, alt); + return usb_set_interface(udev, alt->desc.bInterfaceNumber, + alt->desc.bAlternateSetting); } static int uas_configure_endpoints(struct uas_dev_info *devinfo) diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h index 5548f0f09f8a19e14a775509c4dd10403e5331c0..1ccb27113c119a5aef1aa1e586014e197dc27432 100644 --- a/drivers/video/fbdev/msm/mdss.h +++ b/drivers/video/fbdev/msm/mdss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -255,6 +255,13 @@ struct mdss_scaler_block { u32 *dest_scaler_off; u32 *dest_scaler_lut_off; struct mdss_mdp_qseed3_lut_tbl lut_tbl; + + /* + * Lock is mainly to serialize access to LUT. + * LUT values come asynchronously from userspace + * via ioctl. + */ + struct mutex scaler_lock; }; struct mdss_data_type; diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c index aeefc81657b045e80b44303a230e5efa6d8156ad..10d747962a917085991bd8eaba6e1457530710e3 100644 --- a/drivers/video/fbdev/msm/mdss_debug_xlog.c +++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -755,6 +755,11 @@ static ssize_t mdss_xlog_dump_read(struct file *file, char __user *buff, if (__mdss_xlog_dump_calc_range()) { len = mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX); + if (len < 0 || len > count) { + pr_err("len is more than the size of user buffer\n"); + return 0; + } + if (copy_to_user(buff, xlog_buf, len)) return -EFAULT; *ppos += len; diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 7b9862a066be691dffc973084fd13f5da79d26b4..4528bacc3d0109072bc00c9f7e1f298e782f9c9e 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -772,7 +772,7 @@ static ssize_t mdss_dsi_cmd_state_read(struct file *file, char __user *buf, if (blen < 0) return 0; - if (copy_to_user(buf, buffer, blen)) + if (copy_to_user(buf, buffer, min(count, (size_t)blen+1))) return -EFAULT; *ppos += blen; diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index 410d36a3ac31ece63544acb899bf94190687de45..0efb79d0b0e49c459fe444baf17f3e950ba6fa7b 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -1,7 +1,7 @@ /* * MDSS MDP Interface (used by framebuffer core) * - * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public @@ -2033,8 +2033,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata) mdata->hflip_buffer_reused = true; /* prevent disable of prefill calculations */ mdata->min_prefill_lines = 0xffff; - /* clock gating feature is enabled by default */ - mdata->enable_gate = true; + /* clock gating feature is disabled by default */ + mdata->enable_gate = false; mdata->pixel_ram_size = 0; mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT; @@ -2436,6 +2436,8 @@ static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata, ret = mdss_mdp_ds_addr_setup(mdata); } + mutex_init(&mdata->scaler_off->scaler_lock); + return ret; } diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 2fabb78baffd35bcf612419d1dc104b7fae6fba4..32c4b7c4218b8341a9120b9410c47b0db54e2a2a 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3301,9 +3301,8 @@ int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en) goto end; } - if (!ctl->panel_data->panel_info.cont_splash_enabled - && (!mdss_mdp_ctl_is_power_on(ctl) || - mdss_panel_is_power_on_ulp(ctl->power_state))) { + if (!ctl->panel_data->panel_info.cont_splash_enabled && + !mdss_mdp_ctl_is_power_on(ctl)) { pr_debug("fb%d vsync pending first update en=%d, ctl power state:%d\n", mfd->index, en, ctl->power_state); rc = -EPERM; @@ -6660,14 +6659,18 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata, if (!mdata->scaler_off) return -EFAULT; + mutex_lock(&mdata->scaler_off->scaler_lock); + qseed3_lut_tbl = &mdata->scaler_off->lut_tbl; if ((lut_tbl->dir_lut_size != DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) || (lut_tbl->cir_lut_size != CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) || (lut_tbl->sep_lut_size != - SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t))) - return -EINVAL; + SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t))) { + mutex_unlock(&mdata->scaler_off->scaler_lock); + return -EINVAL; + } if (!qseed3_lut_tbl->dir_lut) { qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev, @@ -6675,7 +6678,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata, GFP_KERNEL); if (!qseed3_lut_tbl->dir_lut) { ret = -ENOMEM; - goto fail; + goto err; } } @@ -6685,7 +6688,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata, GFP_KERNEL); if (!qseed3_lut_tbl->cir_lut) { ret = -ENOMEM; - goto fail; + goto fail_free_dir_lut; } } @@ -6695,44 +6698,52 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata, GFP_KERNEL); if (!qseed3_lut_tbl->sep_lut) { ret = -ENOMEM; - goto fail; + goto fail_free_cir_lut; } } /* Invalidate before updating */ qseed3_lut_tbl->valid = false; - if (copy_from_user(qseed3_lut_tbl->dir_lut, (void *)(unsigned long)lut_tbl->dir_lut, lut_tbl->dir_lut_size)) { ret = -EINVAL; - goto err; + goto fail_free_sep_lut; } if (copy_from_user(qseed3_lut_tbl->cir_lut, (void *)(unsigned long)lut_tbl->cir_lut, lut_tbl->cir_lut_size)) { ret = -EINVAL; - goto err; + goto fail_free_sep_lut; } if (copy_from_user(qseed3_lut_tbl->sep_lut, (void *)(unsigned long)lut_tbl->sep_lut, lut_tbl->sep_lut_size)) { ret = -EINVAL; - goto err; + goto fail_free_sep_lut; } qseed3_lut_tbl->valid = true; + mutex_unlock(&mdata->scaler_off->scaler_lock); + return ret; -fail: - kfree(qseed3_lut_tbl->dir_lut); - kfree(qseed3_lut_tbl->cir_lut); - kfree(qseed3_lut_tbl->sep_lut); +fail_free_sep_lut: + devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->sep_lut); +fail_free_cir_lut: + devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->cir_lut); +fail_free_dir_lut: + devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->dir_lut); err: + qseed3_lut_tbl->dir_lut = NULL; + qseed3_lut_tbl->cir_lut = NULL; + qseed3_lut_tbl->sep_lut = NULL; qseed3_lut_tbl->valid = false; + mutex_unlock(&mdata->scaler_off->scaler_lock); + return ret; } diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c index c4eaf2778c5d9773bc9282e64582778693ff46c3..5b9798e2c24eed82e0e6a851023549e2c8dd57f8 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_pp.c +++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1611,11 +1611,16 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler, }; mdata = mdss_mdp_get_mdata(); + + mutex_lock(&mdata->scaler_off->scaler_lock); + lut_tbl = &mdata->scaler_off->lut_tbl; if ((!lut_tbl) || (!lut_tbl->valid)) { + mutex_unlock(&mdata->scaler_off->scaler_lock); pr_err("%s:Invalid QSEED3 LUT TABLE\n", __func__); return -EINVAL; } + if ((scaler->lut_flag & SCALER_LUT_DIR_WR) || (scaler->lut_flag & SCALER_LUT_Y_CIR_WR) || (scaler->lut_flag & SCALER_LUT_UV_CIR_WR) || @@ -1663,6 +1668,8 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler, } } + mutex_unlock(&mdata->scaler_off->scaler_lock); + return 0; } diff --git a/fs/block_dev.c b/fs/block_dev.c index d3c296d4eb25b5080d7252587c02b911b92f63d3..43b80ca84d9cff76c3dd691c0ab2fc2e0d4c60d9 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -558,6 +558,8 @@ static void bdev_evict_inode(struct inode *inode) } list_del_init(&bdev->bd_list); spin_unlock(&bdev_lock); + /* Detach inode from wb early as bdi_put() may free bdi->wb */ + inode_detach_wb(inode); if (bdev->bd_bdi != &noop_backing_dev_info) { bdi_put(bdev->bd_bdi); bdev->bd_bdi = &noop_backing_dev_info; @@ -1221,8 +1223,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_disk = disk; bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; - if (bdev->bd_bdi == &noop_backing_dev_info) - bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0; if (!partno) { @@ -1294,6 +1294,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) bdev->bd_inode->i_flags &= ~S_DAX; } + + if (bdev->bd_bdi == &noop_backing_dev_info) + bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); } else { if (bdev->bd_contains == bdev) { ret = 0; @@ -1325,8 +1328,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_disk = NULL; bdev->bd_part = NULL; bdev->bd_queue = NULL; - bdi_put(bdev->bd_bdi); - bdev->bd_bdi = &noop_backing_dev_info; if (bdev != bdev->bd_contains) __blkdev_put(bdev->bd_contains, mode, 1); bdev->bd_contains = NULL; @@ -1548,12 +1549,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) kill_bdev(bdev); bdev_write_inode(bdev); - /* - * Detaching bdev inode from its wb in __destroy_inode() - * is too late: the queue which embeds its bdi (along with - * root wb) can be gone as soon as we put_disk() below. - */ - inode_detach_wb(bdev->bd_inode); } if (bdev->bd_contains == bdev) { if (disk->fops->release) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ca7d46de5ca39e675308a25be10f9070c28ed0eb..028f38f0906ca3a5de6bd10636217affde8f2edf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1947,8 +1947,10 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, err = copy_out_args(cs, &req->out, nbytes); if (req->in.h.opcode == FUSE_CANONICAL_PATH) { - req->out.h.error = kern_path((char *)req->out.args[0].value, 0, - req->canonical_path); + char *path = (char *)req->out.args[0].value; + + path[req->out.args[0].size - 1] = 0; + req->out.h.error = kern_path(path, 0, req->canonical_path); } fuse_copy_finish(cs); diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h index 88b92b2f18721890c0dcb62da6263d66c1255d3d..d9fb3ce1874eaaf76acec357272ae20729cc5ab0 100644 --- a/fs/sdcardfs/sdcardfs.h +++ b/fs/sdcardfs/sdcardfs.h @@ -656,7 +656,7 @@ static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len) static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2) { - return q1->len == q2->len && str_case_eq(q1->name, q2->name); + return q1->len == q2->len && str_n_case_eq(q1->name, q2->name, q2->len); } #define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1) diff --git a/include/linux/refcount.h b/include/linux/refcount.h new file mode 100644 index 0000000000000000000000000000000000000000..600aadf9cca445a437f97a5c71a21febf537845f --- /dev/null +++ b/include/linux/refcount.h @@ -0,0 +1,294 @@ +#ifndef _LINUX_REFCOUNT_H +#define _LINUX_REFCOUNT_H + +/* + * Variant of atomic_t specialized for reference counts. + * + * The interface matches the atomic_t interface (to aid in porting) but only + * provides the few functions one should use for reference counting. + * + * It differs in that the counter saturates at UINT_MAX and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free issues. + * + * Memory ordering rules are slightly relaxed wrt regular atomic_t functions + * and provide only what is strictly required for refcounts. + * + * The increments are fully relaxed; these will not provide ordering. The + * rationale is that whatever is used to obtain the object we're increasing the + * reference count on will provide the ordering. For locked data structures, + * its the lock acquire, for RCU/lockless data structures its the dependent + * load. + * + * Do note that inc_not_zero() provides a control dependency which will order + * future stores against the inc, this ensures we'll never modify the object + * if we did not in fact acquire a reference. + * + * The decrements will provide release order, such that all the prior loads and + * stores will be issued before, it also provides a control dependency, which + * will order us against the subsequent free(). + * + * The control dependency is against the load of the cmpxchg (ll/sc) that + * succeeded. This means the stores aren't fully ordered, but this is fine + * because the 1->0 transition indicates no concurrency. + * + * Note that the allocator is responsible for ordering things between free() + * and alloc(). + * + */ + +#include <linux/atomic.h> +#include <linux/bug.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> + +#ifdef CONFIG_DEBUG_REFCOUNT +#define REFCOUNT_WARN(cond, str) WARN_ON(cond) +#define __refcount_check __must_check +#else +#define REFCOUNT_WARN(cond, str) (void)(cond) +#define __refcount_check +#endif + +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } + +static inline void refcount_set(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + +static inline unsigned int refcount_read(const refcount_t *r) +{ + return atomic_read(&r->refs); +} + +static inline __refcount_check +bool refcount_add_not_zero(unsigned int i, refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (!val) + return false; + + if (unlikely(val == UINT_MAX)) + return true; + + new = val + i; + if (new < val) + new = UINT_MAX; + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} + +static inline void refcount_add(unsigned int i, refcount_t *r) +{ + REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); +} + +/* + * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + */ +static inline __refcount_check +bool refcount_inc_not_zero(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + new = val + 1; + + if (!val) + return false; + + if (unlikely(!new)) + return true; + + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} + +/* + * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller already has a + * reference on the object, will WARN when this is not so. + */ +static inline void refcount_inc(refcount_t *r) +{ + REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); +} + +/* + * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +static inline __refcount_check +bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (unlikely(val == UINT_MAX)) + return false; + + new = val - i; + if (new > val) { + REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + return false; + } + + old = atomic_cmpxchg_release(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + return !new; +} + +static inline __refcount_check +bool refcount_dec_and_test(refcount_t *r) +{ + return refcount_sub_and_test(1, r); +} + +/* + * Similar to atomic_dec(), it will WARN on underflow and fail to decrement + * when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before. + */ +static inline +void refcount_dec(refcount_t *r) +{ + REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); +} + +/* + * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the + * success thereof. + * + * Like all decrement operations, it provides release memory order and provides + * a control dependency. + * + * It can be used like a try-delete operator; this explicit case is provided + * and not cmpxchg in generic, because that would allow implementing unsafe + * operations. + */ +static inline __refcount_check +bool refcount_dec_if_one(refcount_t *r) +{ + return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; +} + +/* + * No atomic_t counterpart, it decrements unless the value is 1, in which case + * it will return false. + * + * Was often done like: atomic_add_unless(&var, -1, 1) + */ +static inline __refcount_check +bool refcount_dec_not_one(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (unlikely(val == UINT_MAX)) + return true; + + if (val == 1) + return false; + + new = val - 1; + if (new > val) { + REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + return true; + } + + old = atomic_cmpxchg_release(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + return true; +} + +/* + * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail + * to decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +static inline __refcount_check +bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) +{ + if (refcount_dec_not_one(r)) + return false; + + mutex_lock(lock); + if (!refcount_dec_and_test(r)) { + mutex_unlock(lock); + return false; + } + + return true; +} + +/* + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +static inline __refcount_check +bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) +{ + if (refcount_dec_not_one(r)) + return false; + + spin_lock(lock); + if (!refcount_dec_and_test(r)) { + spin_unlock(lock); + return false; + } + + return true; +} + +#endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d0b5ca5d4e080346e8a657c6c6aee540e47776e7..6c1cbbedc79c0a17fb17c8a757f2f6f64beb7fb1 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -224,6 +224,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page) static inline void inode_detach_wb(struct inode *inode) { if (inode->i_wb) { + WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); wb_put(inode->i_wb); inode->i_wb = NULL; } diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index d3ab3f57aa9f9adc61a8cbc0ef5954f2808902dc..b6c14b1ebdaf61fdbb24cf31c77881584403efdd 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -717,6 +717,7 @@ struct usb_interface_assoc_descriptor { __u8 iFunction; } __attribute__ ((packed)); +#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8 /*-------------------------------------------------------------------------*/ diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h index 39e6927d9b7e4e2659d3b93ded45ff0883d967a7..fd0937ffb1e5b477e7c0994da578104bc738aa5e 100644 --- a/include/uapi/media/msm_camera.h +++ b/include/uapi/media/msm_camera.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2009-2012, 2014-2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2009-2012, 2014-2016, 2018 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1386,6 +1387,7 @@ struct msm_camera_csiphy_params { uint16_t lane_mask; uint8_t combo_mode; uint8_t csid_core; + uint64_t data_rate; }; struct msm_camera_csi2_params { diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h index 08605aca474d5adc4cf70cb0a40050fdba50f2d9..ac454ca9a7fca8de7695209eb7634b77a9fabe33 100644 --- a/include/uapi/media/msm_camsensor_sdk.h +++ b/include/uapi/media/msm_camsensor_sdk.h @@ -367,6 +367,7 @@ struct msm_camera_csiphy_params { unsigned char csid_core; unsigned int csiphy_clk; unsigned char csi_3phase; + uint64_t data_rate; }; struct msm_camera_i2c_seq_reg_array { diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 70a556f0dd0614750fcc37fedfca87bba68d418f..53ffa09bf22e1c106f25f47243af493a35ad4819 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2880,11 +2880,15 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event, update_task_burst(p, rq, event, runtime); update_cpu_busy_time(p, rq, event, wallclock, irqtime); update_task_pred_demand(rq, p, event); -done: + + if (exiting_task(p)) + goto done; + trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime, rq->cc.cycles, rq->cc.time, p->grp ? &rq->grp_time : NULL); +done: p->ravg.mark_start = wallclock; } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dd08161c84f5212d38398029ac6a32f0ffbd58b2..091803732d37c76942fbf2f7787dbe7c282e2f91 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -682,6 +682,19 @@ source "lib/Kconfig.kmemcheck" source "lib/Kconfig.kasan" +config DEBUG_REFCOUNT + bool "Verbose refcount checks" + help + Say Y here if you want reference counters (refcount_t and kref) to + generate WARNs on dubious usage. Without this refcount_t will still + be a saturating counter and avoid Use-After-Free by turning it into + a resource leak Denial-Of-Service. + + Use of this option will increase kernel text size but will alert the + admin of potential abuse. + + If in doubt, say "N". + endmenu # "Memory Debugging" config ARCH_HAS_KCOV diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c index d4b1d9a197f53188b7ad3a87a8a9b77d8b8f504b..bfb76a84be7343c769e44286e441a2e0229c64db 100644 --- a/net/ipc_router/ipc_router_core.c +++ b/net/ipc_router/ipc_router_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -224,6 +224,25 @@ void msm_ipc_router_set_ws_allowed(bool flag) is_wakeup_source_allowed = flag; } +/** + * is_sensor_port() - Check if the remote port is sensor service or not + * @rport: Pointer to the remote port. + * + * Return: true if the remote port is sensor service else false. + */ +static int is_sensor_port(struct msm_ipc_router_remote_port *rport) +{ + u32 svcid = 0; + + if (rport && rport->server) { + svcid = rport->server->name.service; + if (svcid == 400 || (svcid >= 256 && svcid <= 320)) + return true; + } + + return false; +} + static void init_routing_table(void) { int i; @@ -277,7 +296,7 @@ static uint32_t ipc_router_calc_checksum(union rr_control_msg *msg) */ static void skb_copy_to_log_buf(struct sk_buff_head *skb_head, unsigned int pl_len, unsigned int hdr_offset, - uint64_t *log_buf) + unsigned char *log_buf) { struct sk_buff *temp_skb; unsigned int copied_len = 0, copy_len = 0; @@ -357,7 +376,8 @@ static void ipc_router_log_msg(void *log_ctx, uint32_t xchng_type, else if (hdr->version == IPC_ROUTER_V2) hdr_offset = sizeof(struct rr_header_v2); } - skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf); + skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, + (unsigned char *)&pl_buf); if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) && (rport_ptr->server != NULL)) { @@ -2730,7 +2750,6 @@ static void do_read_data(struct work_struct *work) struct rr_packet *pkt = NULL; struct msm_ipc_port *port_ptr; struct msm_ipc_router_remote_port *rport_ptr; - int ret; struct msm_ipc_router_xprt_info *xprt_info = container_of(work, @@ -2738,16 +2757,7 @@ static void do_read_data(struct work_struct *work) read_data); while ((pkt = rr_read(xprt_info)) != NULL) { - if (pkt->length < calc_rx_header_size(xprt_info) || - pkt->length > MAX_IPC_PKT_SIZE) { - IPC_RTR_ERR("%s: Invalid pkt length %d\n", - __func__, pkt->length); - goto read_next_pkt1; - } - ret = extract_header(pkt); - if (ret < 0) - goto read_next_pkt1; hdr = &(pkt->hdr); if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) && @@ -4194,6 +4204,7 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, { struct msm_ipc_router_xprt_info *xprt_info = xprt->priv; struct msm_ipc_router_xprt_work *xprt_work; + struct msm_ipc_router_remote_port *rport_ptr = NULL; struct rr_packet *pkt; int ret; @@ -4246,16 +4257,40 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, if (!pkt) return; + if (pkt->length < calc_rx_header_size(xprt_info) || + pkt->length > MAX_IPC_PKT_SIZE) { + IPC_RTR_ERR("%s: Invalid pkt length %d\n", + __func__, pkt->length); + release_pkt(pkt); + return; + } + + ret = extract_header(pkt); + if (ret < 0) { + release_pkt(pkt); + return; + } + pkt->ws_need = false; + + if (pkt->hdr.type == IPC_ROUTER_CTRL_CMD_DATA) + rport_ptr = ipc_router_get_rport_ref(pkt->hdr.src_node_id, + pkt->hdr.src_port_id); + mutex_lock(&xprt_info->rx_lock_lhb2); list_add_tail(&pkt->list, &xprt_info->pkt_list); - if (!xprt_info->dynamic_ws) { - __pm_stay_awake(&xprt_info->ws); - pkt->ws_need = true; - } else { - if (is_wakeup_source_allowed) { + /* check every pkt is from SENSOR services or not and + * avoid holding both edge and port specific wake-up sources + */ + if (!is_sensor_port(rport_ptr)) { + if (!xprt_info->dynamic_ws) { __pm_stay_awake(&xprt_info->ws); pkt->ws_need = true; + } else { + if (is_wakeup_source_allowed) { + __pm_stay_awake(&xprt_info->ws); + pkt->ws_need = true; + } } } mutex_unlock(&xprt_info->rx_lock_lhb2); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ca1031411aa790fee4eb725e529621470479f57d..7541427537d0102c489cd0bc38fc66459d26ec04 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -500,11 +500,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; + int hdrincl; err = -EMSGSIZE; if (len > 0xFFFF) goto out; + /* hdrincl should be READ_ONCE(inet->hdrincl) + * but READ_ONCE() doesn't work with bit fields + */ + hdrincl = inet->hdrincl; /* * Check the flags. */ @@ -579,7 +584,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ - if (inet->hdrincl) + if (hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) @@ -601,9 +606,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, - inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, + hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | - (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), + (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); if (!saddr && ipc.oif) { @@ -612,7 +617,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto done; } - if (!inet->hdrincl) { + if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; @@ -637,7 +642,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto do_confirm; back_from_confirm: - if (inet->hdrincl) + if (hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index ede54061c554a355fe341837bee3e49dbd4d8381..939821821fcb86be5acf0ed95f708da7c8e8cafe 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -158,10 +158,13 @@ struct sock *xt_socket_lookup_slow_v4(struct net *net, #endif if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct udphdr *hp; + struct tcphdr _hdr; hp = skb_header_pointer(skb, ip_hdrlen(skb), - sizeof(_hdr), &_hdr); + iph->protocol == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), + &_hdr); if (hp == NULL) return NULL; @@ -360,9 +363,11 @@ struct sock *xt_socket_lookup_slow_v6(struct net *net, } if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct udphdr *hp; + struct tcphdr _hdr; - hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); + hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), &_hdr); if (hp == NULL) return NULL; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2141d047301da60c90f7fe433eb7ee7831397dc7..f7bf6e1cec7ad8e8df323b343f3418091387a0b0 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -223,6 +223,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, struct sock *sk = skb->sk; int ret = -ENOMEM; + if (!net_eq(dev_net(dev), sock_net(sk))) + return 0; + dev_hold(dev); if (is_vmalloc_addr(skb->head)) diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 2b0f0ac498d2c02c92a62d8e749a103f5b1849c1..5a58f9f380958f886de0f41bb0f34e07fb0502ae 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } create_info = (struct hci_create_pipe_resp *)skb->data; + if (create_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we @@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } delete_info = (struct hci_delete_pipe_noti *)skb->data; + if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f8d6a0ca9c03375f3ade59f7fabc5e281263bb3f..7e18940a6ca3a0cff6d6fa186274d7b9167d7433 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1694,7 +1694,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); - atomic_set(&match->sk_ref, 0); + refcount_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; @@ -1708,10 +1708,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; - if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { + if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; - atomic_inc(&match->sk_ref); + refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); __fanout_link(sk, po); err = 0; } @@ -1740,7 +1740,7 @@ static struct packet_fanout *fanout_release(struct sock *sk) if (f) { po->fanout = NULL; - if (atomic_dec_and_test(&f->sk_ref)) + if (refcount_dec_and_test(&f->sk_ref)) list_del(&f->list); else f = NULL; diff --git a/net/packet/internal.h b/net/packet/internal.h index 9ee46314b7d76df47d683c252a92ce97398d592b..94d1d405a11667ad95e61e49d4b66bdf31a6488a 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -1,6 +1,8 @@ #ifndef __PACKET_INTERNAL_H__ #define __PACKET_INTERNAL_H__ +#include <linux/refcount.h> + struct packet_mclist { struct packet_mclist *next; int ifindex; @@ -86,7 +88,7 @@ struct packet_fanout { struct list_head list; struct sock *arr[PACKET_FANOUT_MAX]; spinlock_t lock; - atomic_t sk_ref; + refcount_t sk_ref; struct packet_type prot_hook ____cacheline_aligned_in_smp; }; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3ebf3b652d600d75719468018ee8fbfc25114b27..fe5012e8ffc71ba887c8afece6f05fb5f0d83c13 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4423,6 +4423,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) struct socket *sock; int err = 0; + /* Do not peel off from one netns to another one. */ + if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) + return -EINVAL; + if (!asoc) return -EINVAL; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 31898856682e50a3b3dfd32b9d8ef17ab59e2e3d..d85dcd9e7d3884f29f8acba825f54a92851672f9 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -141,23 +141,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc) */ static int valid_master_desc(const char *new_desc, const char *orig_desc) { - if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { - if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) - goto out; - if (orig_desc) - if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) - goto out; - } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { - if (strlen(new_desc) == KEY_USER_PREFIX_LEN) - goto out; - if (orig_desc) - if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) - goto out; - } else - goto out; + int prefix_len; + + if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) + prefix_len = KEY_TRUSTED_PREFIX_LEN; + else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) + prefix_len = KEY_USER_PREFIX_LEN; + else + return -EINVAL; + + if (!new_desc[prefix_len]) + return -EINVAL; + + if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) + return -EINVAL; + return 0; -out: - return -EINVAL; } /* diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c index 090f468ad48f2900ef45f1690565e32aeea97c3e..ee137e4219a63e8f390290574cf16450d84c4fcf 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2796,7 +2796,7 @@ static void wcd_imped_config(struct snd_soc_codec *codec, 0x20, 0x00); snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_NCP_VCTRL, - 0x07, 0x04); + 0x07, 0x07); } break; } @@ -2865,7 +2865,11 @@ static int msm_anlg_cdc_hphl_dac_event(struct snd_soc_dapm_widget *w, MSM89XX_PMIC_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00); break; case SND_SOC_DAPM_POST_PMD: - wcd_imped_config(codec, impedl, false); + if (!ret) + wcd_imped_config(codec, impedl, false); + else + dev_dbg(codec->dev, "Failed to get mbhc impedance %d\n", + ret); snd_soc_update_bits(codec, MSM89XX_PMIC_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00); snd_soc_update_bits(codec, diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c index 29c218013a07b1ba96faf262e115ed9e7dda40d4..b682c37a15373930e75b27b5cbf883e300861d74 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c +++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -31,6 +31,7 @@ #define WCD_PROCFS_ENTRY_MAX_LEN 16 #define WCD_934X_RAMDUMP_START_ADDR 0x20100000 #define WCD_934X_RAMDUMP_SIZE ((1024 * 1024) - 128) +#define WCD_DSP_CNTL_MAX_COUNT 2 #define WCD_CNTL_MUTEX_LOCK(codec, lock) \ { \ @@ -909,11 +910,11 @@ static ssize_t wcd_miscdev_write(struct file *filep, const char __user *ubuf, { struct wcd_dsp_cntl *cntl = container_of(filep->private_data, struct wcd_dsp_cntl, miscdev); - char val[count]; + char val[WCD_DSP_CNTL_MAX_COUNT]; bool vote; int ret = 0; - if (count == 0 || count > 2) { + if (count == 0 || count > WCD_DSP_CNTL_MAX_COUNT) { pr_err("%s: Invalid count = %zd\n", __func__, count); ret = -EINVAL; goto done; diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c index 580ad4fa2bad2271c34981a6434ea79c1c83c42e..4416dc32ceabe3edd91a04a4c00515a1876e777c 100644 --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3714,6 +3714,7 @@ static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol, struct msm_compr_audio *prtd; int ret = 0; struct msm_adsp_event_data *event_data = NULL; + uint64_t actual_payload_len = 0; if (fe_id >= MSM_FRONTEND_DAI_MAX) { pr_err("%s Received invalid fe_id %lu\n", @@ -3751,6 +3752,16 @@ static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol, goto done; } + actual_payload_len = sizeof(struct msm_adsp_event_data) + + event_data->payload_len; + if (actual_payload_len >= U32_MAX) { + pr_err("%s payload length 0x%X exceeds limit", + __func__, event_data->payload_len); + ret = -EINVAL; + goto done; + } + + if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >= sizeof(ucontrol->value.bytes.data)) { pr_err("%s param length=%d exceeds limit", diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c index 7e022619c0979c07ec8b018e87b4ff1202171e97..08aca2e9da41fb5c83957c36a4f8ec872159c8af 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -52,6 +52,7 @@ static struct audio_locks the_locks; static const DECLARE_TLV_DB_LINEAR(msm_pcm_vol_gain, 0, PCM_MASTER_VOL_MAX_STEPS); + struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; @@ -1084,6 +1085,7 @@ static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol, struct msm_audio *prtd; int ret = 0; struct msm_adsp_event_data *event_data = NULL; + uint64_t actual_payload_len = 0; if (!pdata) { pr_err("%s pdata is NULL\n", __func__); @@ -1120,6 +1122,15 @@ static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol, goto done; } + actual_payload_len = sizeof(struct msm_adsp_event_data) + + event_data->payload_len; + if (actual_payload_len >= U32_MAX) { + pr_err("%s payload length 0x%X exceeds limit", + __func__, event_data->payload_len); + ret = -EINVAL; + goto done; + } + if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >= sizeof(ucontrol->value.bytes.data)) { pr_err("%s param length=%d exceeds limit", diff --git a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c index 18cac342405472b540e93b8110252e72c4b3eb16..72dd751bb0d847dc42c75f2c9c4b7ffe3b5f4307 100644 --- a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -645,6 +645,7 @@ static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol, struct msm_transcode_loopback *prtd; int ret = 0; struct msm_adsp_event_data *event_data = NULL; + uint64_t actual_payload_len = 0; if (fe_id >= MSM_FRONTEND_DAI_MAX) { pr_err("%s Received invalid fe_id %lu\n", @@ -682,6 +683,16 @@ static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol, goto done; } + actual_payload_len = sizeof(struct msm_adsp_event_data) + + event_data->payload_len; + if (actual_payload_len >= U32_MAX) { + pr_err("%s payload length 0x%X exceeds limit", + __func__, event_data->payload_len); + ret = -EINVAL; + goto done; + } + + if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >= sizeof(ucontrol->value.bytes.data)) { pr_err("%s param length=%d exceeds limit", diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index 2bce557e33338d5133216e238440c942f807fc7e..05a00db2fd777b368a850edf8902f0ae520b18cc 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -46,6 +46,7 @@ #define FALSE 0x00 #define SESSION_MAX 9 #define ASM_MAX_CHANNELS 8 + enum { ASM_TOPOLOGY_CAL = 0, ASM_CUSTOM_TOP_CAL, @@ -285,6 +286,11 @@ static ssize_t audio_output_latency_dbgfs_read(struct file *file, pr_err("%s: out_buffer is null\n", __func__); return 0; } + if (count < OUT_BUFFER_SIZE) { + pr_err("%s: read size %d exceeds buf size %zd\n", __func__, + OUT_BUFFER_SIZE, count); + return 0; + } snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\ out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\ out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec); @@ -338,6 +344,11 @@ static ssize_t audio_input_latency_dbgfs_read(struct file *file, pr_err("%s: in_buffer is null\n", __func__); return 0; } + if (count < IN_BUFFER_SIZE) { + pr_err("%s: read size %d exceeds buf size %zd\n", __func__, + IN_BUFFER_SIZE, count); + return 0; + } snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\ in_cont_tv.tv_sec, in_cont_tv.tv_usec); return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos, @@ -1158,7 +1169,9 @@ int q6asm_send_stream_cmd(struct audio_client *ac, { char *asm_params = NULL; struct apr_hdr hdr; - int sz, rc; + int rc; + uint32_t sz = 0; + uint64_t actual_sz = 0; if (!data || !ac) { pr_err("%s: %s is NULL\n", __func__, @@ -1175,7 +1188,15 @@ int q6asm_send_stream_cmd(struct audio_client *ac, goto done; } - sz = sizeof(struct apr_hdr) + data->payload_len; + actual_sz = sizeof(struct apr_hdr) + data->payload_len; + if (actual_sz > U32_MAX) { + pr_err("%s: payload size 0x%X exceeds limit\n", + __func__, data->payload_len); + rc = -EINVAL; + goto done; + } + + sz = (uint32_t)actual_sz; asm_params = kzalloc(sz, GFP_KERNEL); if (!asm_params) { rc = -ENOMEM; diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c index 1817b2fe2c72f1f91a6879fd3fac7d03ae5b7e10..5aa4623937b1a2d876cdf0f1b66a33f076ea99ac 100644 --- a/sound/soc/msm/sdm660-common.c +++ b/sound/soc/msm/sdm660-common.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2553,11 +2553,9 @@ void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream) mutex_lock(&mi2s_intf_conf[index].lock); if (--mi2s_intf_conf[index].ref_cnt == 0) { ret = msm_mi2s_set_sclk(substream, false); - if (ret < 0) { + if (ret < 0) pr_err("%s:clock disable failed for MI2S (%d); ret=%d\n", __func__, index, ret); - mi2s_intf_conf[index].ref_cnt++; - } if (mi2s_intf_conf[index].msm_is_ext_mclk) { mi2s_mclk[index].enable = 0; pr_debug("%s: Disabling mclk, clk_freq_in_hz = %u\n", diff --git a/sound/usb/card.c b/sound/usb/card.c index 23ea575f3bcf47a5588be4a8c1d4d52168910d13..d0868a5395e08cf0978c6763f8b5f339f135ae21 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -316,6 +316,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) case UAC_VERSION_1: { void *control_header; struct uac1_ac_header_descriptor *h1; + int rest_bytes; control_header = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen, NULL, UAC_HEADER); @@ -325,11 +326,30 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) } h1 = control_header; + rest_bytes = (void *)(host_iface->extra + + host_iface->extralen) - control_header; + + /* just to be sure -- this shouldn't hit at all */ + if (rest_bytes <= 0) { + dev_err(&dev->dev, "invalid control header\n"); + return -EINVAL; + } + + if (rest_bytes < sizeof(*h1)) { + dev_err(&dev->dev, "too short v1 buffer descriptor\n"); + return -EINVAL; + } + if (!h1->bInCollection) { dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); return -EINVAL; } + if (rest_bytes < h1->bLength) { + dev_err(&dev->dev, "invalid buffer length (v1)\n"); + return -EINVAL; + } + if (h1->bLength < sizeof(*h1) + h1->bInCollection) { dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); return -EINVAL; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 08c6bb2e5826bd9e858658a3498a8d16d783a410..4fab7e7f9a629a11836ce0986bcc851cc461e683 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2434,6 +2434,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { + /* kill pending URBs */ + snd_usb_mixer_disconnect(mixer); + kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); @@ -2860,8 +2863,13 @@ _error: void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) { - usb_kill_urb(mixer->urb); - usb_kill_urb(mixer->rc_urb); + if (mixer->disconnected) + return; + if (mixer->urb) + usb_kill_urb(mixer->urb); + if (mixer->rc_urb) + usb_kill_urb(mixer->rc_urb); + mixer->disconnected = true; } #ifdef CONFIG_PM diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 3417ef347e40432482b84de271a3bb98c8724297..096eccb42d3ebb4e14dbfbe3155ea402c4fd369f 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -22,6 +22,8 @@ struct usb_mixer_interface { struct urb *rc_urb; struct usb_ctrlrequest *rc_setup_packet; u8 rc_buffer[6]; + + bool disconnected; }; #define MAX_CHANNELS 16 /* max logical channels */