iommu: msm: Rewrite to improve clarity and performance

This scope of this driver's lock usage is extremely wide, leading to
excessively long lock hold times. Additionally, there is lots of
excessive linked-list traversal and unnecessary dynamic memory
allocation in a critical path, causing poor performance across the
board.

Fix all of this by greatly reducing the scope of the locks used and by
significantly reducing the amount of operations performed when
msm_dma_map_sg_attrs() is called. The entire driver's code is overhauled
for better cleanliness and performance.

Note that ION must be modified to pass a known structure via the private
dma_buf pointer, so that the IOMMU driver can prevent races when
operating on the same buffer concurrently. This is the only way to
eliminate said buffer races without hurting the IOMMU driver's
performance.

Some additional members are added to the device struct as well to make
these various performance improvements possible.

This also removes the manual cache maintenance since ION already handles
it.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: Ruchit <ruchitmarathe@gmail.com>
fourteen
Sultan Alsawaf 4 years ago committed by Jenna
parent 26d3c8aa56
commit b95daf2c6c
  1. 3
      arch/arm64/mm/dma-mapping.c
  2. 2
      drivers/base/core.c
  3. 538
      drivers/iommu/msm_dma_iommu_mapping.c
  4. 45
      drivers/staging/android/ion/ion.c
  5. 2
      drivers/staging/android/ion/ion.h
  6. 3
      include/linux/device.h
  7. 42
      include/linux/msm_dma_iommu_mapping.h

@ -2206,8 +2206,7 @@ void arm_iommu_detach_device(struct device *dev)
* ION defers dma_unmap calls. Ensure they have all completed prior to
* setting dma_ops to NULL.
*/
if (msm_dma_unmap_all_for_dev(dev))
dev_warn(dev, "IOMMU detach with outstanding mappings\n");
msm_dma_unmap_all_for_dev(dev);
iommu_detach_group(mapping->domain, dev->iommu_group);
dev->archdata.mapping = NULL;

@ -1440,6 +1440,8 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
dev->links.status = DL_DEV_NO_DRIVER;
INIT_LIST_HEAD(&dev->iommu_map_list);
mutex_init(&dev->iommu_map_lock);
}
EXPORT_SYMBOL_GPL(device_initialize);

@ -1,481 +1,165 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2020 Sultan Alsawaf <sultan@kerneltoast.com>.
*/
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/dma-buf.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <asm/barrier.h>
#include <linux/msm_dma_iommu_mapping.h>
/**
* struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
* @lnode - list node to exist in the buffer's list of iommu mappings
* @dev - Device this is mapped to. Used as key
* @sgl - The scatterlist for this mapping
* @nents - Number of entries in sgl
* @dir - The direction for the map.
* @meta - Backpointer to the meta this guy belongs to.
* @ref - for reference counting this mapping
* @attrs - dma mapping attributes
* @buf_start_addr - address of start of buffer
*
* Represents a mapping of one dma_buf buffer to a particular device
* and address range. There may exist other mappings of this buffer in
* different devices. All mappings will have the same cacheability and security.
*/
struct msm_iommu_map {
struct list_head lnode;
struct rb_node node;
struct device *dev;
struct msm_iommu_data *data;
struct list_head data_node;
struct list_head dev_node;
struct scatterlist *sgl;
unsigned int nents;
enum dma_data_direction dir;
struct msm_iommu_meta *meta;
struct kref ref;
unsigned long attrs;
dma_addr_t buf_start_addr;
};
struct msm_iommu_meta {
struct rb_node node;
struct list_head iommu_maps;
struct kref ref;
struct mutex lock;
void *buffer;
int nents;
int refcount;
};
static struct rb_root iommu_root;
static DEFINE_MUTEX(msm_iommu_map_mutex);
static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
static struct msm_iommu_map *msm_iommu_map_lookup(struct msm_iommu_data *data,
struct device *dev)
{
struct rb_root *root = &iommu_root;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct msm_iommu_meta *entry;
struct msm_iommu_map *map;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct msm_iommu_meta, node);
if (meta->buffer < entry->buffer)
p = &(*p)->rb_left;
else if (meta->buffer > entry->buffer)
p = &(*p)->rb_right;
else
pr_err("%s: dma_buf %p already exists\n", __func__,
entry->buffer);
}
rb_link_node(&meta->node, parent, p);
rb_insert_color(&meta->node, root);
}
static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
{
struct rb_root *root = &iommu_root;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct msm_iommu_meta *entry = NULL;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct msm_iommu_meta, node);
if (buffer < entry->buffer)
p = &(*p)->rb_left;
else if (buffer > entry->buffer)
p = &(*p)->rb_right;
else
return entry;
list_for_each_entry(map, &data->map_list, data_node) {
if (map->dev == dev)
return map;
}
return NULL;
}
static void msm_iommu_add(struct msm_iommu_meta *meta,
struct msm_iommu_map *iommu)
static void msm_iommu_map_free(struct msm_iommu_map *map)
{
INIT_LIST_HEAD(&iommu->lnode);
list_add(&iommu->lnode, &meta->iommu_maps);
}
static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
struct device *dev)
{
struct msm_iommu_map *entry;
list_for_each_entry(entry, &meta->iommu_maps, lnode) {
if (entry->dev == dev)
return entry;
}
struct sg_table table = {
.sgl = map->sgl,
.nents = map->nents,
.orig_nents = map->nents
};
return NULL;
dma_unmap_sg_attrs(map->dev, map->sgl, map->nents, map->dir,
map->attrs | DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(&table);
list_del(&map->data_node);
list_del(&map->dev_node);
kfree(map);
}
static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
static struct scatterlist *clone_sgl(struct scatterlist *sgl, int nents)
{
struct msm_iommu_meta *meta;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&meta->iommu_maps);
meta->buffer = dma_buf->priv;
kref_init(&meta->ref);
mutex_init(&meta->lock);
msm_iommu_meta_add(meta);
struct scatterlist *d, *s;
struct sg_table table;
return meta;
}
sg_alloc_table(&table, nents, GFP_KERNEL | __GFP_NOFAIL);
for (d = table.sgl, s = sgl;
nents > SG_MAX_SINGLE_ALLOC; nents -= SG_MAX_SINGLE_ALLOC - 1,
d = sg_chain_ptr(&d[SG_MAX_SINGLE_ALLOC - 1]),
s = sg_chain_ptr(&s[SG_MAX_SINGLE_ALLOC - 1]))
memcpy(d, s, (SG_MAX_SINGLE_ALLOC - 1) * sizeof(*d));
static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
if (nents)
memcpy(d, s, nents * sizeof(*d));
static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents)
{
struct scatterlist *next, *s;
int i;
struct sg_table table;
if (sg_alloc_table(&table, nents, GFP_KERNEL))
return NULL;
next = table.sgl;
for_each_sg(sg, s, nents, i) {
*next = *s;
next = sg_next(next);
}
return table.sgl;
}
static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_buf *dma_buf,
unsigned long attrs)
int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, struct dma_buf *dmabuf,
unsigned long attrs)
{
struct msm_iommu_map *iommu_map;
struct msm_iommu_meta *iommu_meta = NULL;
int ret = 0;
bool extra_meta_ref_taken = false;
int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
mutex_lock(&msm_iommu_map_mutex);
iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
if (!iommu_meta) {
iommu_meta = msm_iommu_meta_create(dma_buf);
if (IS_ERR(iommu_meta)) {
mutex_unlock(&msm_iommu_map_mutex);
ret = PTR_ERR(iommu_meta);
goto out;
}
if (late_unmap) {
kref_get(&iommu_meta->ref);
extra_meta_ref_taken = true;
}
} else {
kref_get(&iommu_meta->ref);
}
mutex_unlock(&msm_iommu_map_mutex);
mutex_lock(&iommu_meta->lock);
iommu_map = msm_iommu_lookup(iommu_meta, dev);
if (!iommu_map) {
iommu_map = kmalloc(sizeof(*iommu_map), GFP_KERNEL);
if (!iommu_map) {
ret = -ENOMEM;
goto out_unlock;
}
ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
if (!ret) {
kfree(iommu_map);
goto out_unlock;
}
iommu_map->sgl = clone_sgl(sg, nents);
if (!iommu_map->sgl) {
kfree(iommu_map);
ret = -ENOMEM;
goto out_unlock;
}
iommu_map->nents = nents;
iommu_map->dev = dev;
iommu_map->dir = dir;
iommu_map->attrs = attrs;
iommu_map->buf_start_addr = sg_phys(sg);
kref_init(&iommu_map->ref);
if (late_unmap)
kref_get(&iommu_map->ref);
iommu_map->meta = iommu_meta;
msm_iommu_add(iommu_meta, iommu_map);
struct msm_iommu_data *data = dmabuf->priv;
struct msm_iommu_map *map;
mutex_lock(&dev->iommu_map_lock);
mutex_lock(&data->lock);
map = msm_iommu_map_lookup(data, dev);
if (map) {
struct scatterlist *d = sgl, *s = map->sgl;
map->refcount++;
do {
d->dma_address = s->dma_address;
d->dma_length = s->dma_length;
} while ((s = sg_next(s)) && s->dma_length && (d = sg_next(d)));
if (is_device_dma_coherent(dev))
dmb(ish);
} else {
if (nents == iommu_map->nents &&
dir == iommu_map->dir &&
(attrs & ~DMA_ATTR_SKIP_CPU_SYNC) ==
(iommu_map->attrs & ~DMA_ATTR_SKIP_CPU_SYNC) &&
sg_phys(sg) == iommu_map->buf_start_addr) {
struct scatterlist *sg_tmp = sg;
struct scatterlist *map_sg;
int i;
for_each_sg(iommu_map->sgl, map_sg, nents, i) {
sg_dma_address(sg_tmp) = sg_dma_address(map_sg);
sg_dma_len(sg_tmp) = sg_dma_len(map_sg);
if (sg_dma_len(map_sg) == 0)
break;
sg_tmp = sg_next(sg_tmp);
if (sg_tmp == NULL)
break;
}
kref_get(&iommu_map->ref);
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
dma_sync_sg_for_device(dev, iommu_map->sgl,
iommu_map->nents, iommu_map->dir);
if (is_device_dma_coherent(dev))
/*
* Ensure all outstanding changes for coherent
* buffers are applied to the cache before any
* DMA occurs.
*/
dmb(ish);
ret = nents;
if (dma_map_sg_attrs(dev, sgl, nents, dir, attrs)) {
map = kmalloc(sizeof(*map), GFP_KERNEL | __GFP_NOFAIL);
map->sgl = clone_sgl(sgl, nents);
map->data = data;
map->dev = dev;
map->dir = dir;
map->nents = nents;
map->refcount = 2;
map->attrs = attrs;
list_add(&map->data_node, &data->map_list);
list_add(&map->dev_node, &dev->iommu_map_list);
} else {
bool start_diff = (sg_phys(sg) !=
iommu_map->buf_start_addr);
dev_err(dev, "lazy map request differs:\n"
"req dir:%d, original dir:%d\n"
"req nents:%d, original nents:%d\n"
"req map attrs:%lu, original map attrs:%lu\n"
"req buffer start address differs:%d\n",
dir, iommu_map->dir, nents,
iommu_map->nents, attrs, iommu_map->attrs,
start_diff);
ret = -EINVAL;
nents = 0;
}
}
mutex_unlock(&iommu_meta->lock);
return ret;
out_unlock:
mutex_unlock(&iommu_meta->lock);
out:
if (!IS_ERR(iommu_meta)) {
if (extra_meta_ref_taken)
msm_iommu_meta_put(iommu_meta);
msm_iommu_meta_put(iommu_meta);
}
return ret;
}
/*
* We are not taking a reference to the dma_buf here. It is expected that
* clients hold reference to the dma_buf until they are done with mapping and
* unmapping.
*/
int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_buf *dma_buf,
unsigned long attrs)
{
int ret;
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: dev pointer is invalid\n", __func__);
return -EINVAL;
}
if (IS_ERR_OR_NULL(sg)) {
pr_err("%s: sg table pointer is invalid\n", __func__);
return -EINVAL;
}
if (IS_ERR_OR_NULL(dma_buf)) {
pr_err("%s: dma_buf pointer is invalid\n", __func__);
return -EINVAL;
}
ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
return ret;
}
EXPORT_SYMBOL(msm_dma_map_sg_attrs);
static void msm_iommu_meta_destroy(struct kref *kref)
{
struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
ref);
if (!list_empty(&meta->iommu_maps)) {
WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
__func__, meta->buffer);
}
rb_erase(&meta->node, &iommu_root);
kfree(meta);
}
static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
{
/*
* Need to lock here to prevent race against map/unmap
*/
mutex_lock(&msm_iommu_map_mutex);
kref_put(&meta->ref, msm_iommu_meta_destroy);
mutex_unlock(&msm_iommu_map_mutex);
}
static void msm_iommu_map_release(struct kref *kref)
{
struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
ref);
struct sg_table table;
mutex_unlock(&data->lock);
mutex_unlock(&dev->iommu_map_lock);
table.nents = table.orig_nents = map->nents;
table.sgl = map->sgl;
list_del(&map->lnode);
/* Skip an additional cache maintenance on the dma unmap path */
if (!(map->attrs & DMA_ATTR_SKIP_CPU_SYNC))
map->attrs |= DMA_ATTR_SKIP_CPU_SYNC;
dma_unmap_sg_attrs(map->dev, map->sgl, map->nents, map->dir,
map->attrs);
sg_free_table(&table);
kfree(map);
return nents;
}
void msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
struct dma_buf *dma_buf, unsigned long attrs)
struct dma_buf *dmabuf, unsigned long attrs)
{
struct msm_iommu_map *iommu_map;
struct msm_iommu_meta *meta;
mutex_lock(&msm_iommu_map_mutex);
meta = msm_iommu_meta_lookup(dma_buf->priv);
if (!meta) {
WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
mutex_unlock(&msm_iommu_map_mutex);
goto out;
}
mutex_unlock(&msm_iommu_map_mutex);
mutex_lock(&meta->lock);
iommu_map = msm_iommu_lookup(meta, dev);
if (!iommu_map) {
WARN(1, "%s: (%p) was never mapped for device %p\n", __func__,
dma_buf, dev);
mutex_unlock(&meta->lock);
goto out;
}
if (dir != iommu_map->dir)
WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
__func__, dma_buf, dir, iommu_map->dir);
if (attrs && ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0))
dma_sync_sg_for_cpu(dev, iommu_map->sgl, iommu_map->nents, dir);
iommu_map->attrs = attrs;
kref_put(&iommu_map->ref, msm_iommu_map_release);
mutex_unlock(&meta->lock);
msm_iommu_meta_put(meta);
out:
return;
struct msm_iommu_data *data = dmabuf->priv;
struct msm_iommu_map *map;
mutex_lock(&dev->iommu_map_lock);
mutex_lock(&data->lock);
map = msm_iommu_map_lookup(data, dev);
if (map && !--map->refcount)
msm_iommu_map_free(map);
mutex_unlock(&data->lock);
mutex_unlock(&dev->iommu_map_lock);
}
EXPORT_SYMBOL(msm_dma_unmap_sg_attrs);
int msm_dma_unmap_all_for_dev(struct device *dev)
void msm_dma_unmap_all_for_dev(struct device *dev)
{
int ret = 0;
struct msm_iommu_meta *meta;
struct rb_root *root;
struct rb_node *meta_node;
mutex_lock(&msm_iommu_map_mutex);
root = &iommu_root;
meta_node = rb_first(root);
while (meta_node) {
struct msm_iommu_map *iommu_map;
struct msm_iommu_map *iommu_map_next;
struct msm_iommu_map *map, *tmp;
meta = rb_entry(meta_node, struct msm_iommu_meta, node);
mutex_lock(&meta->lock);
list_for_each_entry_safe(iommu_map, iommu_map_next,
&meta->iommu_maps, lnode)
if (iommu_map->dev == dev)
if (!kref_put(&iommu_map->ref,
msm_iommu_map_release))
ret = -EINVAL;
mutex_lock(&dev->iommu_map_lock);
list_for_each_entry_safe(map, tmp, &dev->iommu_map_list, dev_node) {
struct msm_iommu_data *data = map->data;
mutex_unlock(&meta->lock);
meta_node = rb_next(meta_node);
mutex_lock(&data->lock);
msm_iommu_map_free(map);
mutex_unlock(&data->lock);
}
mutex_unlock(&msm_iommu_map_mutex);
return ret;
mutex_unlock(&dev->iommu_map_lock);
}
/*
* Only to be called by ION code when a buffer is freed
*/
void msm_dma_buf_freed(void *buffer)
void msm_dma_buf_freed(struct msm_iommu_data *data)
{
struct msm_iommu_map *iommu_map;
struct msm_iommu_map *iommu_map_next;
struct msm_iommu_meta *meta;
struct msm_iommu_map *map, *tmp;
int retry = 0;
mutex_lock(&msm_iommu_map_mutex);
meta = msm_iommu_meta_lookup(buffer);
if (!meta) {
/* Already unmapped (assuming no late unmapping) */
mutex_unlock(&msm_iommu_map_mutex);
return;
}
mutex_unlock(&msm_iommu_map_mutex);
mutex_lock(&meta->lock);
do {
mutex_lock(&data->lock);
list_for_each_entry_safe(map, tmp, &data->map_list, data_node) {
struct device *dev = map->dev;
list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
lnode)
kref_put(&iommu_map->ref, msm_iommu_map_release);
if (!list_empty(&meta->iommu_maps)) {
WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
__func__, meta->buffer);
}
INIT_LIST_HEAD(&meta->iommu_maps);
mutex_unlock(&meta->lock);
if (!mutex_trylock(&dev->iommu_map_lock)) {
retry = 1;
break;
}
msm_iommu_meta_put(meta);
msm_iommu_map_free(map);
mutex_unlock(&dev->iommu_map_lock);
}
mutex_unlock(&data->lock);
} while (retry--);
}

@ -128,6 +128,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
if (!buffer)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->iommu_data.map_list);
mutex_init(&buffer->iommu_data.lock);
buffer->heap = heap;
buffer->flags = flags;
@ -212,7 +214,7 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
msm_dma_buf_freed(buffer);
msm_dma_buf_freed(&buffer->iommu_data);
mutex_lock(&dev->buffer_lock);
rb_erase(&buffer->node, &dev->buffers);
@ -305,7 +307,8 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
{
struct ion_dma_buf_attachment *a;
struct sg_table *table;
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
@ -335,7 +338,8 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a = attachment->priv;
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
mutex_lock(&buffer->lock);
list_del(&a->list);
@ -352,7 +356,8 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
struct ion_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
int count, map_attrs;
struct ion_buffer *buffer = attachment->dmabuf->priv;
struct ion_buffer *buffer = container_of(attachment->dmabuf->priv,
typeof(*buffer), iommu_data);
table = a->table;
@ -402,7 +407,8 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
int map_attrs;
struct ion_buffer *buffer = attachment->dmabuf->priv;
struct ion_buffer *buffer = container_of(attachment->dmabuf->priv,
typeof(*buffer), iommu_data);
struct ion_dma_buf_attachment *a = attachment->priv;
map_attrs = attachment->dma_map_attrs;
@ -491,7 +497,8 @@ static const struct vm_operations_struct ion_vma_ops = {
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
int ret = 0;
if (!buffer->heap->ops->map_user) {
@ -521,7 +528,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
_ion_buffer_destroy(buffer);
kfree(dmabuf->exp_name);
@ -529,7 +537,8 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
void *vaddr = ERR_PTR(-EINVAL);
if (buffer->heap->ops->map_kernel) {
@ -546,7 +555,8 @@ static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
@ -662,7 +672,8 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction,
bool sync_only_mapped)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
struct ion_dma_buf_attachment *a;
int ret = 0;
@ -759,7 +770,8 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction,
bool sync_only_mapped)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
struct ion_dma_buf_attachment *a;
int ret = 0;
@ -876,7 +888,8 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int offset,
unsigned int len)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
struct ion_dma_buf_attachment *a;
int ret = 0;
@ -958,7 +971,8 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int offset,
unsigned int len)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
struct ion_dma_buf_attachment *a;
int ret = 0;
@ -1038,7 +1052,8 @@ out:
static int ion_dma_buf_get_flags(struct dma_buf *dmabuf,
unsigned long *flags)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer),
iommu_data);
*flags = buffer->flags;
return 0;
@ -1118,7 +1133,7 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
exp_info.flags = O_RDWR;
exp_info.priv = buffer;
exp_info.priv = &buffer->iommu_data;
exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME,
heap->name, current->tgid, task_comm);

@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/bitops.h>
#include <linux/msm_dma_iommu_mapping.h>
#include "ion_kernel.h"
#include "../uapi/ion.h"
#include "../uapi/msm_ion.h"
@ -143,6 +144,7 @@ struct ion_buffer {
pid_t pid;
char thread_comm[TASK_COMM_LEN];
pid_t tid;
struct msm_iommu_data iommu_data;
};
void ion_buffer_destroy(struct ion_buffer *buffer);

@ -971,6 +971,9 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
struct list_head iommu_map_list;
struct mutex iommu_map_lock;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)

@ -19,6 +19,11 @@
#include <linux/dma-mapping.h>
#ifdef CONFIG_QCOM_LAZY_MAPPING
struct msm_iommu_data {
struct list_head map_list;
struct mutex lock;
};
/*
* This function is not taking a reference to the dma_buf here. It is expected
* that clients hold reference to the dma_buf until they are done with mapping
@ -28,50 +33,17 @@ int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, struct dma_buf *dma_buf,
unsigned long attrs);
/*
* This function takes an extra reference to the dma_buf.
* What this means is that calling msm_dma_unmap_sg will not result in buffer's
* iommu mapping being removed, which means that subsequent calls to lazy map
* will simply re-use the existing iommu mapping.
* The iommu unmapping of the buffer will occur when the ION buffer is
* destroyed.
* Using lazy mapping can provide a performance benefit because subsequent
* mappings are faster.
*
* The limitation of using this API are that all subsequent iommu mappings
* must be the same as the original mapping, ie they must map the same part of
* the buffer with the same dma data direction. Also there can't be multiple
* mappings of different parts of the buffer.
*/
static inline int msm_dma_map_sg_lazy(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_buf *dma_buf)
{
return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, 0);
}
static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_buf *dma_buf)
{
unsigned long attrs;
attrs = DMA_ATTR_NO_DELAYED_UNMAP;
return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, attrs);
}
void msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
struct dma_buf *dma_buf, unsigned long attrs);
int msm_dma_unmap_all_for_dev(struct device *dev);
void msm_dma_unmap_all_for_dev(struct device *dev);
/*
* Below is private function only to be called by framework (ION) and not by
* clients.
*/
void msm_dma_buf_freed(void *buffer);
void msm_dma_buf_freed(struct msm_iommu_data *data);
#else /*CONFIG_QCOM_LAZY_MAPPING*/

Loading…
Cancel
Save