diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index ddf275c56cc28c8e6ebcdc1549878f1123eb16a1..3f39264dd1d5a70850b1b85b6ae1621c326a45e6 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -965,6 +965,8 @@ static int dispatcher_do_fault(struct kgsl_device *device) /* Skip the PM dump for a timeout because it confuses people */ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy); } + /* Set pagefault if it occurred */ + kgsl_mmu_set_pagefault(&device->mmu); adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base); diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 717164fe1fde8ac3c18f8ea15ba57d500f43b251..4c2862bb7919b1aea342f401bd71de90f6580d64 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -42,6 +42,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = { { 0x10, 1 }, /* TTBR0 */ { 0x14, 1 }, /* TTBR1 */ { 0x20, 1 }, /* FSR */ + { 0x28, 1 }, /* FAR */ { 0x800, 1 }, /* TLBIALL */ { 0x820, 1 }, /* RESUME */ { 0x03C, 1 }, /* TLBLKCR */ @@ -59,6 +60,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = { { 0x20, 1 }, /* TTBR0 */ { 0x28, 1 }, /* TTBR1 */ { 0x58, 1 }, /* FSR */ + { 0x60, 1 }, /* FAR_0 */ { 0x618, 1 }, /* TLBIALL */ { 0x008, 1 }, /* RESUME */ { 0, 0 }, /* TLBLKCR not in V1 */ @@ -339,11 +341,13 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, device = mmu->device; adreno_dev = ADRENO_DEVICE(device); - if (atomic_read(&mmu->fault)) { - if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) - ret = -EBUSY; + /* + * If mmu fault not set then set it and continue else + * exit this function since another thread has already set + * it and will execute rest of this function for the fault. + */ + if (1 == atomic_cmpxchg(&mmu->fault, 0, 1)) goto done; - } iommu_dev = get_iommu_device(iommu_unit, dev); if (!iommu_dev) { @@ -353,6 +357,16 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, } iommu = mmu->priv; + fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, + iommu_dev->ctx_id, FSR); + /* + * If fsr is not set then it means that we cleared the fault while the + * bottom half called from IOMMU driver is running + */ + if (!fsr) { + atomic_set(&mmu->fault, 0); + goto done; + } /* * set the fault bits and stuff before any printks so that if fault * handler runs then it will know it's dealing with a pagefault @@ -375,7 +389,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, context = NULL; } - atomic_set(&mmu->fault, 1); iommu_dev->fault = 1; if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) { @@ -385,11 +398,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, adreno_dispatcher_schedule(device); } - ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, + ptbase = KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit, iommu_dev->ctx_id, TTBR0); - fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, - iommu_dev->ctx_id, FSR); fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSYNR0); fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, @@ -1928,7 +1939,7 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu) return 0; /* Return the current pt base by reading IOMMU pt_base register */ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS); - pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]), + pt_base = KGSL_IOMMU_GET_CTX_REG_LL(iommu, (&iommu->iommu_units[0]), KGSL_IOMMU_CONTEXT_USER, TTBR0); kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS); @@ -1998,7 +2009,7 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, } mb(); - temp = KGSL_IOMMU_GET_CTX_REG(iommu, + temp = KGSL_IOMMU_GET_CTX_REG_LL(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0); } @@ -2093,6 +2104,101 @@ static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu) return iommu->unit_count; } +/* + * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU + * @mmu: Pointer to mmu structure + * @pf_policy: The pagefault polict to set + * + * Check if the new policy indicated by pf_policy is same as current + * policy, if same then return else set the policy + */ +static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu, + unsigned int pf_policy) +{ + int i, j; + struct kgsl_iommu *iommu = mmu->priv; + struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device); + int ret = 0; + unsigned int sctlr_val; + + if ((adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) == + (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) + return ret; + if (!msm_soc_version_supports_iommu_v1()) + return ret; + + kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS); + + /* Need to idle device before changing options */ + ret = mmu->device->ftbl->idle(mmu->device); + if (ret) { + kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS); + return ret; + } + + for (i = 0; i < iommu->unit_count; i++) { + struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; + for (j = 0; j < iommu_unit->dev_count; j++) { + sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu, + iommu_unit, + iommu_unit->dev[j].ctx_id, + SCTLR); + if (pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) + sctlr_val &= ~(0x1 << + KGSL_IOMMU_SCTLR_HUPCF_SHIFT); + else + sctlr_val |= (0x1 << + KGSL_IOMMU_SCTLR_HUPCF_SHIFT); + KGSL_IOMMU_SET_CTX_REG(iommu, + iommu_unit, + iommu_unit->dev[j].ctx_id, + SCTLR, sctlr_val); + } + } + kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS); + return ret; +} + +/** + * kgsl_iommu_set_pagefault() - Checks if a IOMMU device has faulted + * @mmu: MMU pointer of the device + * + * This function is called to set the pagefault bits for the device so + * that recovery can run with pagefault in consideration + */ +static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu) +{ + int i, j; + struct kgsl_iommu *iommu = mmu->priv; + unsigned int fsr; + + /* fault already detected then return early */ + if (atomic_read(&mmu->fault)) + return; + + kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS); + /* Loop through all IOMMU devices to check for fault */ + for (i = 0; i < iommu->unit_count; i++) { + for (j = 0; j < iommu->iommu_units[i].dev_count; j++) { + fsr = KGSL_IOMMU_GET_CTX_REG(iommu, + (&(iommu->iommu_units[i])), + iommu->iommu_units[i].dev[j].ctx_id, FSR); + if (fsr) { + uint64_t far = + KGSL_IOMMU_GET_CTX_REG_LL(iommu, + (&(iommu->iommu_units[i])), + iommu->iommu_units[i].dev[j].ctx_id, + FAR); + kgsl_iommu_fault_handler(NULL, + iommu->iommu_units[i].dev[j].dev, far, 0, NULL); + break; + } + } + } + + kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS); +} + struct kgsl_mmu_ops iommu_ops = { .mmu_init = kgsl_iommu_init, .mmu_close = kgsl_iommu_close, @@ -2118,6 +2224,8 @@ struct kgsl_mmu_ops iommu_ops = { .mmu_cleanup_pt = NULL, .mmu_sync_lock = kgsl_iommu_sync_lock, .mmu_sync_unlock = kgsl_iommu_sync_unlock, + .mmu_set_pf_policy = kgsl_iommu_set_pf_policy, + .mmu_set_pagefault = kgsl_iommu_set_pagefault }; struct kgsl_mmu_pt_ops iommu_pt_ops = { diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h index 2ff665a2c97cae14b79c322f5652f6624038b14e..8fd494c6dbda5d766f5c29392ef1c76a89724c76 100644 --- a/drivers/gpu/msm/kgsl_iommu.h +++ b/drivers/gpu/msm/kgsl_iommu.h @@ -70,6 +70,7 @@ enum kgsl_iommu_reg_map { KGSL_IOMMU_CTX_TTBR0, KGSL_IOMMU_CTX_TTBR1, KGSL_IOMMU_CTX_FSR, + KGSL_IOMMU_CTX_FAR, KGSL_IOMMU_CTX_TLBIALL, KGSL_IOMMU_CTX_RESUME, KGSL_IOMMU_CTX_TLBLKCR, @@ -111,7 +112,7 @@ enum kgsl_iommu_units { iommu->ctx_offset) #define KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit, ctx, REG) \ - readl_relaxed( \ + readll_relaxed( \ iommu_unit->reg_map.hostptr + \ iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\ (ctx << KGSL_IOMMU_CTX_SHIFT) + \ diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index 5fcc6f4b19089288ad77e06751c0c4e89c1eee24..7a6c7f2af6f476d896fae8d54b078d9b6c6a945a 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -178,6 +178,8 @@ struct kgsl_mmu_ops { unsigned int (*mmu_sync_unlock) (struct kgsl_mmu *mmu, unsigned int *cmds); int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num); + int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned int pf_policy); + void (*mmu_set_pagefault)(struct kgsl_mmu *mmu); }; struct kgsl_mmu_pt_ops { @@ -481,4 +483,19 @@ static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu, return 0; } +static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu, + unsigned int pf_policy) +{ + if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pf_policy) + return mmu->mmu_ops->mmu_set_pf_policy(mmu, pf_policy); + else + return 0; +} + +static inline void kgsl_mmu_set_pagefault(struct kgsl_mmu *mmu) +{ + if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pagefault) + return mmu->mmu_ops->mmu_set_pagefault(mmu); +} + #endif /* __KGSL_MMU_H */