@ -219,16 +219,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
ttm_eu_backoff_reservation ( & ticket , & list ) ;
}
static int amdgpu_gem_handle_lockup ( struct amdgpu_device * adev , int r )
{
if ( r = = - EDEADLK ) {
r = amdgpu_gpu_reset ( adev ) ;
if ( ! r )
r = - EAGAIN ;
}
return r ;
}
/*
* GEM ioctls .
*/
@ -249,20 +239,17 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ) ) {
r = - EINVAL ;
goto error_unlock ;
}
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ) )
return - EINVAL ;
/* reject invalid gem domains */
if ( args - > in . domains & ~ ( AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA ) ) {
r = - EINVAL ;
goto error_unlock ;
}
AMDGPU_GEM_DOMAIN_OA ) )
return - EINVAL ;
/* create a gem object to contain this object in */
if ( args - > in . domains & ( AMDGPU_GEM_DOMAIN_GDS |
@ -274,10 +261,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
size = size < < AMDGPU_GWS_SHIFT ;
else if ( args - > in . domains = = AMDGPU_GEM_DOMAIN_OA )
size = size < < AMDGPU_OA_SHIFT ;
else {
r = - EINVAL ;
goto error_unlock ;
}
else
return - EINVAL ;
}
size = roundup ( size , PAGE_SIZE ) ;
@ -286,21 +271,17 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
args - > in . domain_flags ,
kernel , & gobj ) ;
if ( r )
goto error_unlock ;
return r ;
r = drm_gem_handle_create ( filp , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( gobj ) ;
if ( r )
goto error_unlock ;
return r ;
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . handle = handle ;
return 0 ;
error_unlock :
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}
int amdgpu_gem_userptr_ioctl ( struct drm_device * dev , void * data ,
@ -334,7 +315,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_DOMAIN_CPU , 0 ,
0 , & gobj ) ;
if ( r )
goto handle_lockup ;
return r ;
bo = gem_to_amdgpu_bo ( gobj ) ;
bo - > prefered_domains = AMDGPU_GEM_DOMAIN_GTT ;
@ -374,7 +355,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( gobj ) ;
if ( r )
goto handle_lockup ;
return r ;
args - > handle = handle ;
return 0 ;
@ -388,9 +369,6 @@ unlock_mmap_sem:
release_object :
drm_gem_object_unreference_unlocked ( gobj ) ;
handle_lockup :
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}
@ -456,7 +434,6 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
int amdgpu_gem_wait_idle_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct amdgpu_device * adev = dev - > dev_private ;
union drm_amdgpu_gem_wait_idle * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
@ -484,7 +461,6 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret ;
drm_gem_object_unreference_unlocked ( gobj ) ;
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}