iommu: arm-smmu: fix check for need for preallocate memory

commit b312b4f0e2f9 ("iommu: arm-smmu: Preallocate memory for map
only on failure") had the following two errors:

  1. The return code we checking when map_sg fails and we preallocte
     is wrong. The check should be for 0 and not -ENOMEM.
     So the preallocate is never happening when map_sg fails.

  2. map_sg could've have mapped certain elements in sglist and later
     had got failed. With proper check, we are trying to call map_sg
     on the same size again, which would leave to double map of
     previously mapped elements in sglist.

Fix this by returning the actual ret code from arm_lpae_map_sg()
and check it against -ENOMEM if we need to preallocate or not.
Also, unmap any partial iovas that was mapped previously.

Change-Id: Ifee7c0bed6b9cf1c35ebb4a03d51a1a80ab0ed58
Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org>
Signed-off-by: Ruchit <ruchitmarathe@gmail.com>
fourteen
Sudarshan Rajagopalan 5 years ago committed by Jenna
parent 373f56a7bb
commit 26d3c8aa56
  1. 12
      drivers/iommu/arm-smmu.c
  2. 5
      drivers/iommu/io-pgtable-arm.c
  3. 4
      drivers/iommu/io-pgtable.h

@ -3155,8 +3155,13 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
prot, &size);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
if (ret == -ENOMEM) {
/* unmap any partially mapped iova */
if (size) {
arm_smmu_secure_domain_unlock(smmu_domain);
arm_smmu_unmap(domain, iova, size);
arm_smmu_secure_domain_lock(smmu_domain);
}
arm_smmu_prealloc_memory(smmu_domain,
batch_size, &nonsecure_pool);
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
@ -3171,8 +3176,8 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
&nonsecure_pool);
}
/* Returns 0 on error */
if (!ret) {
/* Returns -ve val on error */
if (ret < 0) {
size_to_unmap = iova + size - __saved_iova_start;
goto out;
}
@ -3180,6 +3185,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
iova += batch_size;
idx_start = idx_end;
sg_start = sg_end;
size = 0;
}
out:

@ -634,7 +634,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte prot;
struct scatterlist *s;
size_t mapped = 0;
int i, ret;
int i;
int ret = -EINVAL;
unsigned int min_pagesz;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct map_state ms;
@ -705,7 +706,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
out_err:
/* Return the size of the partial mapping so that they can be undone */
*size = mapped;
return 0;
return ret;
}
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,

@ -156,8 +156,8 @@ struct io_pgtable_cfg {
*
* @map: Map a physically contiguous memory region.
* @map_sg: Map a scatterlist. Returns the number of bytes mapped,
* or 0 on failure. The size parameter contains the size
* of the partial mapping in case of failure.
* or -ve val on failure. The size parameter contains the
* size of the partial mapping in case of failure.
* @unmap: Unmap a physically contiguous memory region.
* @iova_to_phys: Translate iova to physical address.
* @is_iova_coherent: Checks coherency of given IOVA. Returns True if coherent

Loading…
Cancel
Save