msm: kgsl: Move global memory region to 0x100000000

On a 64bit kernel, a 32bit user application is not
restricted to 3GB limit of virtual memory. It is
allowed to access complete 4GB range.

Move global memory region to 0x100000000 outside of
32bit range on 64bit kernel to increase the virtual
memory range for a 32bit application running on a
64bit kernel. This will also move secure memory
region to 0xF0000000.

Change-Id: I017ac0c052b4d9466f9f1a66af4a83f0636450cb
Signed-off-by: Deepak Kumar <dkumar@codeaurora.org>
Signed-off-by: Urvashi Agrawal <urvaagra@codeaurora.org>
tirimbino
Deepak Kumar 7 years ago committed by Gerrit - the friendly Code Review server
parent f7672f445f
commit 33be71f8e0
  1. 4
      drivers/gpu/msm/adreno.c
  2. 4
      drivers/gpu/msm/adreno_a5xx.c
  3. 4
      drivers/gpu/msm/adreno_a6xx.c
  4. 43
      drivers/gpu/msm/kgsl_iommu.c
  5. 13
      drivers/gpu/msm/kgsl_iommu.h

@ -1517,7 +1517,7 @@ static void _set_secvid(struct kgsl_device *device)
adreno_writereg64(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
KGSL_IOMMU_SECURE_BASE);
KGSL_IOMMU_SECURE_BASE(&device->mmu));
adreno_writereg(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_SIZE);
@ -2106,7 +2106,7 @@ static int adreno_getproperty(struct kgsl_device *device,
* anything to mmap().
*/
shadowprop.gpuaddr =
(unsigned int) device->memstore.gpuaddr;
(unsigned long)device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
* appears to be meaningless

@ -2424,8 +2424,8 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A5XX_CP_RB_CNTL_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a5xx_microcode_load(adreno_dev);
if (ret)

@ -1067,8 +1067,8 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev,
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A6XX_CP_RB_CNTL_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a6xx_microcode_load(adreno_dev);
if (ret)

@ -38,9 +38,10 @@
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
#define ADDR_IN_GLOBAL(_a) \
(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
#define ADDR_IN_GLOBAL(_mmu, _a) \
(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
KGSL_IOMMU_GLOBAL_MEM_SIZE)))
/*
* Flag to set SMMU memory attributes required to
@ -185,8 +186,8 @@ int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
entry->pagetable = pagetable;
entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;
entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
secure_global_size;
ret = kgsl_mmu_map(pagetable, entry);
if (ret == 0)
@ -225,7 +226,8 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
KGSL_IOMMU_GLOBAL_MEM_SIZE))
return;
memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
global_pt_alloc += memdesc->size;
@ -643,7 +645,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
/* Set the maximum possible size as an initial value */
nextentry->gpuaddr = (uint64_t) -1;
if (ADDR_IN_GLOBAL(faultaddr)) {
if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
} else if (context) {
private = context->proc_priv;
@ -1034,13 +1036,13 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
struct kgsl_iommu_pt *pt)
{
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
pt->compat_va_end = KGSL_IOMMU_SVM_END32;
pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_start = KGSL_IOMMU_VA_BASE64;
pt->va_end = KGSL_IOMMU_VA_END64;
}
@ -1049,7 +1051,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
pagetable->name != KGSL_MMU_SECURE_PT) {
if ((BITS_PER_LONG == 32) || is_compat_task()) {
pt->svm_start = KGSL_IOMMU_SVM_BASE32;
pt->svm_end = KGSL_IOMMU_SVM_END32;
pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
} else {
pt->svm_start = KGSL_IOMMU_SVM_BASE64;
pt->svm_end = KGSL_IOMMU_SVM_END64;
@ -1063,19 +1065,19 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
{
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
pt->va_end = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@ -2385,7 +2387,8 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
struct rb_node *node;
/* Make sure the requested address doesn't fall in the global range */
if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
return -ENOMEM;
spin_lock(&pagetable->lock);

@ -24,12 +24,17 @@
* are mapped into all pagetables.
*/
#define KGSL_IOMMU_GLOBAL_MEM_SIZE (20 * SZ_1M)
#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
#define KGSL_IOMMU_GLOBAL_MEM_BASE32 0xf8000000
#define KGSL_IOMMU_GLOBAL_MEM_BASE64 TASK_SIZE_32
#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu) \
(MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \
KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32)
#define KGSL_IOMMU_SECURE_SIZE SZ_256M
#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE
#define KGSL_IOMMU_SECURE_BASE \
(KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)
#define KGSL_IOMMU_SECURE_BASE(_mmu) \
(KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SVM_BASE32 0x300000
#define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M)

Loading…
Cancel
Save