Skip to content
Snippets Groups Projects
Commit 594609dd authored by Shubhraprakash Das's avatar Shubhraprakash Das Committed by Iliyan Malchev
Browse files

msm: kgsl: Check for mmu pagefault before recovery


Check whether there is a pagefault before running recovery.
If recovery runs before the bottom pagefault handler runs
then there could be a pending pagefault at end of recovery that
can stall the IOMMU. With the IOMMU stalled the GPU would only
read back zeroes even after recovery.

CRs-Fixed: 642562
Change-Id: I78fb225b2ee57e87ac6ebd1f2c9bca18aa81d942
Signed-off-by: default avatarShubhraprakash Das <sadas@codeaurora.org>
parent 7a6b0d74
No related branches found
No related tags found
No related merge requests found
......@@ -965,6 +965,8 @@ static int dispatcher_do_fault(struct kgsl_device *device)
/* Skip the PM dump for a timeout because it confuses people */
set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
}
/* Set pagefault if it occurred */
kgsl_mmu_set_pagefault(&device->mmu);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
......
......
......@@ -42,6 +42,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
{ 0x10, 1 }, /* TTBR0 */
{ 0x14, 1 }, /* TTBR1 */
{ 0x20, 1 }, /* FSR */
{ 0x28, 1 }, /* FAR */
{ 0x800, 1 }, /* TLBIALL */
{ 0x820, 1 }, /* RESUME */
{ 0x03C, 1 }, /* TLBLKCR */
......@@ -59,6 +60,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
{ 0x20, 1 }, /* TTBR0 */
{ 0x28, 1 }, /* TTBR1 */
{ 0x58, 1 }, /* FSR */
{ 0x60, 1 }, /* FAR_0 */
{ 0x618, 1 }, /* TLBIALL */
{ 0x008, 1 }, /* RESUME */
{ 0, 0 }, /* TLBLKCR not in V1 */
......@@ -339,11 +341,13 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
device = mmu->device;
adreno_dev = ADRENO_DEVICE(device);
if (atomic_read(&mmu->fault)) {
if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
ret = -EBUSY;
/*
* If mmu fault not set then set it and continue else
* exit this function since another thread has already set
* it and will execute rest of this function for the fault.
*/
if (1 == atomic_cmpxchg(&mmu->fault, 0, 1))
goto done;
}
iommu_dev = get_iommu_device(iommu_unit, dev);
if (!iommu_dev) {
......@@ -353,6 +357,16 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
}
iommu = mmu->priv;
fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
iommu_dev->ctx_id, FSR);
/*
* If fsr is not set then it means that we cleared the fault while the
* bottom half called from IOMMU driver is running
*/
if (!fsr) {
atomic_set(&mmu->fault, 0);
goto done;
}
/*
* set the fault bits and stuff before any printks so that if fault
* handler runs then it will know it's dealing with a pagefault
......@@ -375,7 +389,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
context = NULL;
}
atomic_set(&mmu->fault, 1);
iommu_dev->fault = 1;
if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
......@@ -385,11 +398,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
adreno_dispatcher_schedule(device);
}
ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
ptbase = KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit,
iommu_dev->ctx_id, TTBR0);
fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
iommu_dev->ctx_id, FSR);
fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
iommu_dev->ctx_id, FSYNR0);
fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
......@@ -1928,7 +1939,7 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
return 0;
/* Return the current pt base by reading IOMMU pt_base register */
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]),
pt_base = KGSL_IOMMU_GET_CTX_REG_LL(iommu, (&iommu->iommu_units[0]),
KGSL_IOMMU_CONTEXT_USER,
TTBR0);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
......@@ -1998,7 +2009,7 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
}
mb();
temp = KGSL_IOMMU_GET_CTX_REG(iommu,
temp = KGSL_IOMMU_GET_CTX_REG_LL(iommu,
(&iommu->iommu_units[i]),
KGSL_IOMMU_CONTEXT_USER, TTBR0);
}
......@@ -2093,6 +2104,101 @@ static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu)
return iommu->unit_count;
}
/*
* kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
* @mmu: Pointer to mmu structure
* @pf_policy: The pagefault polict to set
*
* Check if the new policy indicated by pf_policy is same as current
* policy, if same then return else set the policy
*/
static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
unsigned int pf_policy)
{
int i, j;
struct kgsl_iommu *iommu = mmu->priv;
struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device);
int ret = 0;
unsigned int sctlr_val;
if ((adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) ==
(pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))
return ret;
if (!msm_soc_version_supports_iommu_v1())
return ret;
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
/* Need to idle device before changing options */
ret = mmu->device->ftbl->idle(mmu->device);
if (ret) {
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return ret;
}
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu,
iommu_unit,
iommu_unit->dev[j].ctx_id,
SCTLR);
if (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
sctlr_val &= ~(0x1 <<
KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
else
sctlr_val |= (0x1 <<
KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
KGSL_IOMMU_SET_CTX_REG(iommu,
iommu_unit,
iommu_unit->dev[j].ctx_id,
SCTLR, sctlr_val);
}
}
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return ret;
}
/**
* kgsl_iommu_set_pagefault() - Checks if a IOMMU device has faulted
* @mmu: MMU pointer of the device
*
* This function is called to set the pagefault bits for the device so
* that recovery can run with pagefault in consideration
*/
static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
{
int i, j;
struct kgsl_iommu *iommu = mmu->priv;
unsigned int fsr;
/* fault already detected then return early */
if (atomic_read(&mmu->fault))
return;
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
/* Loop through all IOMMU devices to check for fault */
for (i = 0; i < iommu->unit_count; i++) {
for (j = 0; j < iommu->iommu_units[i].dev_count; j++) {
fsr = KGSL_IOMMU_GET_CTX_REG(iommu,
(&(iommu->iommu_units[i])),
iommu->iommu_units[i].dev[j].ctx_id, FSR);
if (fsr) {
uint64_t far =
KGSL_IOMMU_GET_CTX_REG_LL(iommu,
(&(iommu->iommu_units[i])),
iommu->iommu_units[i].dev[j].ctx_id,
FAR);
kgsl_iommu_fault_handler(NULL,
iommu->iommu_units[i].dev[j].dev, far, 0, NULL);
break;
}
}
}
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
}
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
......@@ -2118,6 +2224,8 @@ struct kgsl_mmu_ops iommu_ops = {
.mmu_cleanup_pt = NULL,
.mmu_sync_lock = kgsl_iommu_sync_lock,
.mmu_sync_unlock = kgsl_iommu_sync_unlock,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
.mmu_set_pagefault = kgsl_iommu_set_pagefault
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
......
......
......@@ -70,6 +70,7 @@ enum kgsl_iommu_reg_map {
KGSL_IOMMU_CTX_TTBR0,
KGSL_IOMMU_CTX_TTBR1,
KGSL_IOMMU_CTX_FSR,
KGSL_IOMMU_CTX_FAR,
KGSL_IOMMU_CTX_TLBIALL,
KGSL_IOMMU_CTX_RESUME,
KGSL_IOMMU_CTX_TLBLKCR,
......@@ -111,7 +112,7 @@ enum kgsl_iommu_units {
iommu->ctx_offset)
#define KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit, ctx, REG) \
readl_relaxed( \
readll_relaxed( \
iommu_unit->reg_map.hostptr + \
iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
(ctx << KGSL_IOMMU_CTX_SHIFT) + \
......
......
......@@ -178,6 +178,8 @@ struct kgsl_mmu_ops {
unsigned int (*mmu_sync_unlock)
(struct kgsl_mmu *mmu, unsigned int *cmds);
int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num);
int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned int pf_policy);
void (*mmu_set_pagefault)(struct kgsl_mmu *mmu);
};
struct kgsl_mmu_pt_ops {
......@@ -481,4 +483,19 @@ static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu,
return 0;
}
static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu,
unsigned int pf_policy)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pf_policy)
return mmu->mmu_ops->mmu_set_pf_policy(mmu, pf_policy);
else
return 0;
}
static inline void kgsl_mmu_set_pagefault(struct kgsl_mmu *mmu)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pagefault)
return mmu->mmu_ops->mmu_set_pagefault(mmu);
}
#endif /* __KGSL_MMU_H */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment