From 373f56a7bb00109169151364d34f5a8fc161362e Mon Sep 17 00:00:00 2001 From: Swathi Sridhar Date: Wed, 26 Sep 2018 14:23:05 -0700 Subject: [PATCH] iommu: arm-smmu: Preallocate memory for map only on failure page allocation failure: order:0, mode:0x2088020(GFP_ATOMIC|__GFP_ZERO) Call trace: [] dump_backtrace+0x0/0x248 [] show_stack+0x18/0x28 [] dump_stack+0x98/0xc0 [] warn_alloc+0x114/0x134 [] __alloc_pages_nodemask+0x3e8/0xd30 [] alloc_pages_exact+0x4c/0xa4 [] arm_smmu_alloc_pages_exact+0x188/0x1bc [] io_pgtable_alloc_pages_exact+0x30/0xa0 [] __arm_lpae_alloc_pages+0x40/0x1c8 [] __arm_lpae_map+0x224/0x3b4 [] __arm_lpae_map+0x108/0x3b4 [] arm_lpae_map+0x78/0x9c [] arm_smmu_map+0x80/0xdc [] iommu_map+0x118/0x284 [] cam_smmu_alloc_firmware+0x188/0x3c0 [] cam_icp_mgr_hw_open+0x88/0x874 [] cam_icp_mgr_acquire_hw+0x2d4/0xc9c [] cam_context_acquire_dev_to_hw+0xb0/0x26c [] __cam_icp_acquire_dev_in_available+0x1c/0xf0 [] cam_context_handle_acquire_dev+0x5c/0x1a8 [] cam_node_handle_ioctl+0x30c/0xdc8 [] cam_subdev_compat_ioctl+0xe4/0x1dc [] subdev_compat_ioctl32+0x40/0x68 [] v4l2_compat_ioctl32+0x64/0x1780 In order to avoid page allocation failure of order 0 during the smmu map operation, the existing implementation preallocates the required memory using GFP_KERNEL so as to make sure that there is sufficient page table memory available and the atomic allocation succeeds during the map operation.This might not be necessary for every single map call as the atomic allocation might succeed most of the time.Hence preallocate the necessary memory only when the map operation fails due to insufficient memory and again retry the map operation with the preallocated memory.This solution applies only to map calls made from a non-atomic context. Change-Id: I417f311c2224eb863d6c99612b678bbb2dd3db58 Signed-off-by: Swathi Sridhar Signed-off-by: Ruchit --- drivers/iommu/arm-smmu.c | 59 ++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 63030db8f7ec..b2f9f35685f1 100755 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2935,24 +2935,6 @@ static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain, } } -static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain, - struct scatterlist *sgl, int nents, - struct list_head *pool) -{ - int i; - size_t size = 0; - struct scatterlist *sg; - - if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) || - arm_smmu_has_secure_vmid(smmu_domain)) - return; - - for_each_sg(sgl, sg, nents, i) - size += sg->length; - - arm_smmu_prealloc_memory(smmu_domain, size, pool); -} - static void arm_smmu_release_prealloc_memory( struct arm_smmu_domain *smmu_domain, struct list_head *list) { @@ -3050,19 +3032,29 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if (arm_smmu_is_slave_side_secure(smmu_domain)) return msm_secure_smmu_map(domain, iova, paddr, size, prot); - arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool); arm_smmu_secure_domain_lock(smmu_domain); - spin_lock_irqsave(&smmu_domain->cb_lock, flags); - list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool); ret = ops->map(ops, iova, paddr, size, prot); - list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); + /* if the map call failed due to insufficient memory, + * then retry again with preallocated memory to see + * if the map call succeeds. + */ + if (ret == -ENOMEM) { + arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool); + spin_lock_irqsave(&smmu_domain->cb_lock, flags); + list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool); + ret = ops->map(ops, iova, paddr, size, prot); + list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); + arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); + + } + arm_smmu_assign_table(smmu_domain); arm_smmu_secure_domain_unlock(smmu_domain); - arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); return ret; } @@ -3140,7 +3132,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, if (arm_smmu_is_slave_side_secure(smmu_domain)) return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot); - arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool); + arm_smmu_secure_domain_lock(smmu_domain); __saved_iova_start = iova; @@ -3159,12 +3151,26 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova, } spin_lock_irqsave(&smmu_domain->cb_lock, flags); - list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool); ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start, prot, &size); - list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); + + if (ret == -ENOMEM) { + arm_smmu_prealloc_memory(smmu_domain, + batch_size, &nonsecure_pool); + spin_lock_irqsave(&smmu_domain->cb_lock, flags); + list_splice_init(&nonsecure_pool, + &smmu_domain->nonsecure_pool); + ret = ops->map_sg(ops, iova, sg_start, + idx_end - idx_start, prot, &size); + list_splice_init(&smmu_domain->nonsecure_pool, + &nonsecure_pool); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); + arm_smmu_release_prealloc_memory(smmu_domain, + &nonsecure_pool); + } + /* Returns 0 on error */ if (!ret) { size_to_unmap = iova + size - __saved_iova_start; @@ -3184,7 +3190,6 @@ out: iova = __saved_iova_start; } arm_smmu_secure_domain_unlock(smmu_domain); - arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool); return iova - __saved_iova_start; }