@ -317,292 +317,6 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
EXPORT_SYMBOL_GPL ( drm_gem_cma_describe ) ;
# endif
/* -----------------------------------------------------------------------------
* DMA - BUF
*/
struct drm_gem_cma_dmabuf_attachment {
struct sg_table sgt ;
enum dma_data_direction dir ;
} ;
static int drm_gem_cma_dmabuf_attach ( struct dma_buf * dmabuf , struct device * dev ,
struct dma_buf_attachment * attach )
{
struct drm_gem_cma_dmabuf_attachment * cma_attach ;
cma_attach = kzalloc ( sizeof ( * cma_attach ) , GFP_KERNEL ) ;
if ( ! cma_attach )
return - ENOMEM ;
cma_attach - > dir = DMA_NONE ;
attach - > priv = cma_attach ;
return 0 ;
}
static void drm_gem_cma_dmabuf_detach ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct drm_gem_cma_dmabuf_attachment * cma_attach = attach - > priv ;
struct sg_table * sgt ;
if ( cma_attach = = NULL )
return ;
sgt = & cma_attach - > sgt ;
if ( cma_attach - > dir ! = DMA_NONE )
dma_unmap_sg ( attach - > dev , sgt - > sgl , sgt - > nents ,
cma_attach - > dir ) ;
sg_free_table ( sgt ) ;
kfree ( cma_attach ) ;
attach - > priv = NULL ;
}
static struct sg_table *
drm_gem_cma_dmabuf_map ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
{
struct drm_gem_cma_dmabuf_attachment * cma_attach = attach - > priv ;
struct drm_gem_cma_object * cma_obj = attach - > dmabuf - > priv ;
struct drm_device * drm = cma_obj - > base . dev ;
struct scatterlist * rd , * wr ;
struct sg_table * sgt ;
unsigned int i ;
int nents , ret ;
DRM_DEBUG_PRIME ( " \n " ) ;
if ( WARN_ON ( dir = = DMA_NONE ) )
return ERR_PTR ( - EINVAL ) ;
/* Return the cached mapping when possible. */
if ( cma_attach - > dir = = dir )
return & cma_attach - > sgt ;
/* Two mappings with different directions for the same attachment are
* not allowed .
*/
if ( WARN_ON ( cma_attach - > dir ! = DMA_NONE ) )
return ERR_PTR ( - EBUSY ) ;
sgt = & cma_attach - > sgt ;
ret = sg_alloc_table ( sgt , cma_obj - > sgt - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
DRM_ERROR ( " failed to alloc sgt. \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
mutex_lock ( & drm - > struct_mutex ) ;
rd = cma_obj - > sgt - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
nents = dma_map_sg ( attach - > dev , sgt - > sgl , sgt - > orig_nents , dir ) ;
if ( ! nents ) {
DRM_ERROR ( " failed to map sgl with iommu. \n " ) ;
sg_free_table ( sgt ) ;
sgt = ERR_PTR ( - EIO ) ;
goto done ;
}
cma_attach - > dir = dir ;
attach - > priv = cma_attach ;
DRM_DEBUG_PRIME ( " buffer size = %zu \n " , cma_obj - > base . size ) ;
done :
mutex_unlock ( & drm - > struct_mutex ) ;
return sgt ;
}
static void drm_gem_cma_dmabuf_unmap ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
{
/* Nothing to do. */
}
static void drm_gem_cma_dmabuf_release ( struct dma_buf * dmabuf )
{
struct drm_gem_cma_object * cma_obj = dmabuf - > priv ;
DRM_DEBUG_PRIME ( " %s \n " , __FILE__ ) ;
/*
* drm_gem_cma_dmabuf_release ( ) call means that file object ' s
* f_count is 0 and it calls drm_gem_object_handle_unreference ( )
* to drop the references that these values had been increased
* at drm_prime_handle_to_fd ( )
*/
if ( cma_obj - > base . export_dma_buf = = dmabuf ) {
cma_obj - > base . export_dma_buf = NULL ;
/*
* drop this gem object refcount to release allocated buffer
* and resources .
*/
drm_gem_object_unreference_unlocked ( & cma_obj - > base ) ;
}
}
static void * drm_gem_cma_dmabuf_kmap_atomic ( struct dma_buf * dmabuf ,
unsigned long page_num )
{
/* TODO */
return NULL ;
}
static void drm_gem_cma_dmabuf_kunmap_atomic ( struct dma_buf * dmabuf ,
unsigned long page_num , void * addr )
{
/* TODO */
}
static void * drm_gem_cma_dmabuf_kmap ( struct dma_buf * dmabuf ,
unsigned long page_num )
{
/* TODO */
return NULL ;
}
static void drm_gem_cma_dmabuf_kunmap ( struct dma_buf * dmabuf ,
unsigned long page_num , void * addr )
{
/* TODO */
}
static int drm_gem_cma_dmabuf_mmap ( struct dma_buf * dmabuf ,
struct vm_area_struct * vma )
{
struct drm_gem_cma_object * cma_obj = dmabuf - > priv ;
struct drm_gem_object * gem_obj = & cma_obj - > base ;
struct drm_device * dev = gem_obj - > dev ;
int ret ;
mutex_lock ( & dev - > struct_mutex ) ;
ret = drm_gem_mmap_obj ( gem_obj , gem_obj - > size , vma ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
if ( ret < 0 )
return ret ;
return drm_gem_cma_mmap_obj ( cma_obj , vma ) ;
}
static void * drm_gem_cma_dmabuf_vmap ( struct dma_buf * dmabuf )
{
struct drm_gem_cma_object * cma_obj = dmabuf - > priv ;
return cma_obj - > vaddr ;
}
static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
. attach = drm_gem_cma_dmabuf_attach ,
. detach = drm_gem_cma_dmabuf_detach ,
. map_dma_buf = drm_gem_cma_dmabuf_map ,
. unmap_dma_buf = drm_gem_cma_dmabuf_unmap ,
. kmap = drm_gem_cma_dmabuf_kmap ,
. kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic ,
. kunmap = drm_gem_cma_dmabuf_kunmap ,
. kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic ,
. mmap = drm_gem_cma_dmabuf_mmap ,
. vmap = drm_gem_cma_dmabuf_vmap ,
. release = drm_gem_cma_dmabuf_release ,
} ;
struct dma_buf * drm_gem_cma_dmabuf_export ( struct drm_device * drm ,
struct drm_gem_object * obj , int flags )
{
struct drm_gem_cma_object * cma_obj = to_drm_gem_cma_obj ( obj ) ;
return dma_buf_export ( cma_obj , & drm_gem_cma_dmabuf_ops ,
cma_obj - > base . size , flags ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_dmabuf_export ) ;
struct drm_gem_object * drm_gem_cma_dmabuf_import ( struct drm_device * drm ,
struct dma_buf * dma_buf )
{
struct drm_gem_cma_object * cma_obj ;
struct dma_buf_attachment * attach ;
struct sg_table * sgt ;
int ret ;
DRM_DEBUG_PRIME ( " %s \n " , __FILE__ ) ;
/* is this one of own objects? */
if ( dma_buf - > ops = = & drm_gem_cma_dmabuf_ops ) {
struct drm_gem_object * obj ;
cma_obj = dma_buf - > priv ;
obj = & cma_obj - > base ;
/* is it from our device? */
if ( obj - > dev = = drm ) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
drm_gem_object_reference ( obj ) ;
dma_buf_put ( dma_buf ) ;
return obj ;
}
}
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create ( drm , dma_buf - > size ) ;
if ( IS_ERR ( cma_obj ) )
return ERR_PTR ( PTR_ERR ( cma_obj ) ) ;
/* Attach to the buffer and map it. Make sure the mapping is contiguous
* on the device memory bus , as that ' s all we support .
*/
attach = dma_buf_attach ( dma_buf , drm - > dev ) ;
if ( IS_ERR ( attach ) ) {
ret = - EINVAL ;
goto error_gem_free ;
}
sgt = dma_buf_map_attachment ( attach , DMA_BIDIRECTIONAL ) ;
if ( IS_ERR_OR_NULL ( sgt ) ) {
ret = sgt ? PTR_ERR ( sgt ) : - ENOMEM ;
goto error_buf_detach ;
}
if ( sgt - > nents ! = 1 ) {
ret = - EINVAL ;
goto error_buf_unmap ;
}
cma_obj - > base . import_attach = attach ;
cma_obj - > paddr = sg_dma_address ( sgt - > sgl ) ;
cma_obj - > sgt = sgt ;
DRM_DEBUG_PRIME ( " dma_addr = 0x%x, size = %zu \n " , cma_obj - > paddr ,
dma_buf - > size ) ;
return & cma_obj - > base ;
error_buf_unmap :
dma_buf_unmap_attachment ( attach , sgt , DMA_BIDIRECTIONAL ) ;
error_buf_detach :
dma_buf_detach ( dma_buf , attach ) ;
error_gem_free :
drm_gem_cma_free_object ( & cma_obj - > base ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL_GPL ( drm_gem_cma_dmabuf_import ) ;
/* low-level interface prime helpers */
struct sg_table * drm_gem_cma_prime_get_sg_table ( struct drm_gem_object * obj )
{