@ -243,6 +243,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
nouveau_fence_unref ( ( void * ) & prev_fence ) ;
}
if ( unlikely ( nvbo - > validate_mapped ) ) {
ttm_bo_kunmap ( & nvbo - > kmap ) ;
nvbo - > validate_mapped = false ;
}
list_del ( & nvbo - > entry ) ;
nvbo - > reserved_by = NULL ;
ttm_bo_unreserve ( & nvbo - > bo ) ;
@ -302,11 +307,14 @@ retry:
if ( ret = = - EAGAIN )
ret = ttm_bo_wait_unreserved ( & nvbo - > bo , false ) ;
drm_gem_object_unreference ( gem ) ;
if ( ret )
if ( ret ) {
NV_ERROR ( dev , " fail reserve \n " ) ;
return ret ;
}
goto retry ;
}
b - > user_priv = ( uint64_t ) ( unsigned long ) nvbo ;
nvbo - > reserved_by = file_priv ;
nvbo - > pbbo_index = i ;
if ( ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
@ -336,8 +344,10 @@ retry:
}
ret = ttm_bo_wait_cpu ( & nvbo - > bo , false ) ;
if ( ret )
if ( ret ) {
NV_ERROR ( dev , " fail wait_cpu \n " ) ;
return ret ;
}
goto retry ;
}
}
@ -351,6 +361,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
{
struct drm_nouveau_gem_pushbuf_bo __user * upbbo =
( void __force __user * ) ( uintptr_t ) user_pbbo_ptr ;
struct drm_device * dev = chan - > dev ;
struct nouveau_bo * nvbo ;
int ret , relocs = 0 ;
@ -362,39 +373,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
spin_lock ( & nvbo - > bo . lock ) ;
ret = ttm_bo_wait ( & nvbo - > bo , false , false , false ) ;
spin_unlock ( & nvbo - > bo . lock ) ;
if ( unlikely ( ret ) )
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail wait other chan \n " ) ;
return ret ;
}
}
ret = nouveau_gem_set_domain ( nvbo - > gem , b - > read_domains ,
b - > write_domains ,
b - > valid_domains ) ;
if ( unlikely ( ret ) )
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail set_domain \n " ) ;
return ret ;
}
nvbo - > channel = chan ;
ret = ttm_bo_validate ( & nvbo - > bo , & nvbo - > placement ,
false , false ) ;
nvbo - > channel = NULL ;
if ( unlikely ( ret ) )
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail ttm_validate \n " ) ;
return ret ;
}
if ( nvbo - > bo . offset = = b - > presumed_ offset & &
if ( nvbo - > bo . offset = = b - > presumed . offset & &
( ( nvbo - > bo . mem . mem_type = = TTM_PL_VRAM & &
b - > presumed_ domain & NOUVEAU_GEM_DOMAIN_VRAM ) | |
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_VRAM ) | |
( nvbo - > bo . mem . mem_type = = TTM_PL_TT & &
b - > presumed_ domain & NOUVEAU_GEM_DOMAIN_GART ) ) )
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_GART ) ) )
continue ;
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
b - > presumed_ domain = NOUVEAU_GEM_DOMAIN_GART ;
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_GART ;
else
b - > presumed_ domain = NOUVEAU_GEM_DOMAIN_VRAM ;
b - > presumed_ offset = nvbo - > bo . offset ;
b - > presumed_ok = 0 ;
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_VRAM ;
b - > presumed . offset = nvbo - > bo . offset ;
b - > presumed . valid = 0 ;
relocs + + ;
if ( DRM_COPY_TO_USER ( & upbbo [ nvbo - > pbbo_index ] , b , sizeof ( * b ) ) )
if ( DRM_COPY_TO_USER ( & upbbo [ nvbo - > pbbo_index ] . presumed ,
& b - > presumed , sizeof ( b - > presumed ) ) )
return - EFAULT ;
}
@ -408,6 +426,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers , int nr_buffers ,
struct validate_op * op , int * apply_relocs )
{
struct drm_device * dev = chan - > dev ;
int ret , relocs = 0 ;
INIT_LIST_HEAD ( & op - > vram_list ) ;
@ -418,11 +437,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0 ;
ret = validate_init ( chan , file_priv , pbbo , nr_buffers , op ) ;
if ( unlikely ( ret ) )
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " validate_init \n " ) ;
return ret ;
}
ret = validate_list ( chan , & op - > vram_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
NV_ERROR ( dev , " validate vram_list \n " ) ;
validate_fini ( op , NULL ) ;
return ret ;
}
@ -430,6 +452,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list ( chan , & op - > gart_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
NV_ERROR ( dev , " validate gart_list \n " ) ;
validate_fini ( op , NULL ) ;
return ret ;
}
@ -437,6 +460,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list ( chan , & op - > both_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
NV_ERROR ( dev , " validate both_list \n " ) ;
validate_fini ( op , NULL ) ;
return ret ;
}
@ -465,59 +489,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
nouveau_gem_pushbuf_reloc_apply ( struct nouveau_channel * chan , int nr_bo ,
struct drm_nouveau_gem_pushbuf_bo * bo ,
unsigned nr_relocs , uint64_t ptr_relocs ,
unsigned nr_dwords , unsigned first_dword ,
uint32_t * pushbuf , bool is_iomem )
nouveau_gem_pushbuf_reloc_apply ( struct drm_device * dev ,
struct drm_nouveau_gem_pushbuf * req ,
struct drm_nouveau_gem_pushbuf_bo * bo )
{
struct drm_nouveau_gem_pushbuf_reloc * reloc = NULL ;
struct drm_device * dev = chan - > dev ;
int ret = 0 ;
unsigned i ;
reloc = u_memcpya ( ptr_relocs , nr_relocs , sizeof ( * reloc ) ) ;
reloc = u_memcpya ( req - > relocs , req - > nr_relocs , sizeof ( * reloc ) ) ;
if ( IS_ERR ( reloc ) )
return PTR_ERR ( reloc ) ;
for ( i = 0 ; i < nr_relocs ; i + + ) {
for ( i = 0 ; i < req - > nr_relocs ; i + + ) {
struct drm_nouveau_gem_pushbuf_reloc * r = & reloc [ i ] ;
struct drm_nouveau_gem_pushbuf_bo * b ;
struct nouveau_bo * nvbo ;
uint32_t data ;
if ( r - > bo_index > = nr_bo | | r - > reloc_index < first_dword | |
r - > reloc_index > = first_dword + nr_dwords ) {
NV_ERROR ( dev , " Bad relocation %d \n " , i ) ;
NV_ERROR ( dev , " bo: %d max %d \n " , r - > bo_index , nr_bo ) ;
NV_ERROR ( dev , " id: %d max %d \n " , r - > reloc_index , nr_dwords ) ;
if ( unlikely ( r - > bo_index > req - > nr_buffers ) ) {
NV_ERROR ( dev , " reloc bo index invalid \n " ) ;
ret = - EINVAL ;
break ;
}
b = & bo [ r - > bo_index ] ;
if ( b - > presumed_ok )
if ( b - > presumed . valid )
continue ;
if ( unlikely ( r - > reloc_bo_index > req - > nr_buffers ) ) {
NV_ERROR ( dev , " reloc container bo index invalid \n " ) ;
ret = - EINVAL ;
break ;
}
nvbo = ( void * ) ( unsigned long ) bo [ r - > reloc_bo_index ] . user_priv ;
if ( unlikely ( r - > reloc_bo_offset + 4 >
nvbo - > bo . mem . num_pages < < PAGE_SHIFT ) ) {
NV_ERROR ( dev , " reloc outside of bo \n " ) ;
ret = - EINVAL ;
break ;
}
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 , nvbo - > bo . mem . num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
NV_ERROR ( dev , " failed kmap for reloc \n " ) ;
break ;
}
nvbo - > validate_mapped = true ;
}
if ( r - > flags & NOUVEAU_GEM_RELOC_LOW )
data = b - > presumed_offset + r - > data ;
data = b - > presumed . offset + r - > data ;
else
if ( r - > flags & NOUVEAU_GEM_RELOC_HIGH )
data = ( b - > presumed_offset + r - > data ) > > 32 ;
data = ( b - > presumed . offset + r - > data ) > > 32 ;
else
data = r - > data ;
if ( r - > flags & NOUVEAU_GEM_RELOC_OR ) {
if ( b - > presumed_domain = = NOUVEAU_GEM_DOMAIN_GART )
if ( b - > presumed . domain = = NOUVEAU_GEM_DOMAIN_GART )
data | = r - > tor ;
else
data | = r - > vor ;
}
if ( is_iomem )
iowrite32_native ( data , ( void __force __iomem * )
& pushbuf [ r - > reloc_index ] ) ;
else
pushbuf [ r - > reloc_index ] = data ;
spin_lock ( & nvbo - > bo . lock ) ;
ret = ttm_bo_wait ( & nvbo - > bo , false , false , false ) ;
if ( ret ) {
NV_ERROR ( dev , " reloc wait_idle failed: %d \n " , ret ) ;
break ;
}
spin_unlock ( & nvbo - > bo . lock ) ;
nouveau_bo_wr32 ( nvbo , r - > reloc_bo_offset > > 2 , data ) ;
}
kfree ( reloc ) ;
@ -528,125 +575,50 @@ int
nouveau_gem_ioctl_pushbuf ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct drm_nouveau_gem_pushbuf * req = data ;
struct drm_nouveau_gem_pushbuf_bo * bo = NULL ;
struct drm_nouveau_gem_pushbuf_push * push ;
struct drm_nouveau_gem_pushbuf_bo * bo ;
struct nouveau_channel * chan ;
struct validate_op op ;
struct nouveau_fence * fence = 0 ;
uint32_t * pushbuf = NULL ;
int ret = 0 , do_reloc = 0 , i ;
struct nouveau_fence * fence = 0 ;
int i , j , ret = 0 , do_reloc = 0 ;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN ;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN ( req - > channel , file_priv , chan ) ;
if ( req - > nr_dwords > = chan - > dma . max | |
req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS | |
req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) {
NV_ERROR ( dev , " Pushbuf config exceeds limits: \n " ) ;
NV_ERROR ( dev , " dwords : %d max %d \n " , req - > nr_dwords ,
chan - > dma . max - 1 ) ;
NV_ERROR ( dev , " buffers: %d max %d \n " , req - > nr_buffers ,
NOUVEAU_GEM_MAX_BUFFERS ) ;
NV_ERROR ( dev , " relocs : %d max %d \n " , req - > nr_relocs ,
NOUVEAU_GEM_MAX_RELOCS ) ;
return - EINVAL ;
}
pushbuf = u_memcpya ( req - > dwords , req - > nr_dwords , sizeof ( uint32_t ) ) ;
if ( IS_ERR ( pushbuf ) )
return PTR_ERR ( pushbuf ) ;
bo = u_memcpya ( req - > buffers , req - > nr_buffers , sizeof ( * bo ) ) ;
if ( IS_ERR ( bo ) ) {
kfree ( pushbuf ) ;
return PTR_ERR ( bo ) ;
}
mutex_lock ( & dev - > struct_mutex ) ;
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate ( chan , file_priv , bo , req - > buffers ,
req - > nr_buffers , & op , & do_reloc ) ;
if ( ret )
goto out ;
/* Apply any relocations that are required */
if ( do_reloc ) {
ret = nouveau_gem_pushbuf_reloc_apply ( chan , req - > nr_buffers ,
bo , req - > nr_relocs ,
req - > relocs ,
req - > nr_dwords , 0 ,
pushbuf , false ) ;
if ( ret )
goto out ;
}
/* Emit push buffer to the hw
*/
ret = RING_SPACE ( chan , req - > nr_dwords ) ;
if ( ret )
goto out ;
OUT_RINGp ( chan , pushbuf , req - > nr_dwords ) ;
req - > vram_available = dev_priv - > fb_aper_free ;
req - > gart_available = dev_priv - > gart_info . aper_free ;
if ( unlikely ( req - > nr_push = = 0 ) )
goto out_next ;
ret = nouveau_fence_new ( chan , & fence , true ) ;
if ( ret ) {
NV_ERROR ( dev , " error fencing pushbuf: %d \n " , ret ) ;
WIND_RING ( chan ) ;
goto out ;
if ( unlikely ( req - > nr_push > NOUVEAU_GEM_MAX_PUSH ) ) {
NV_ERROR ( dev , " pushbuf push count exceeds limit: %d max %d \n " ,
req - > nr_push , NOUVEAU_GEM_MAX_PUSH ) ;
return - EINVAL ;
}
if ( nouveau_gem_pushbuf_sync ( chan ) ) {
ret = nouveau_fence_wait ( fence , NULL , false , false ) ;
if ( ret ) {
for ( i = 0 ; i < req - > nr_dwords ; i + + )
NV_ERROR ( dev , " 0x%08x \n " , pushbuf [ i ] ) ;
NV_ERROR ( dev , " ^^ above push buffer is fail :( \n " ) ;
}
if ( unlikely ( req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ) ) {
NV_ERROR ( dev , " pushbuf bo count exceeds limit: %d max %d \n " ,
req - > nr_buffers , NOUVEAU_GEM_MAX_BUFFERS ) ;
return - EINVAL ;
}
out :
validate_fini ( & op , fence ) ;
nouveau_fence_unref ( ( void * * ) & fence ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
kfree ( pushbuf ) ;
kfree ( bo ) ;
return ret ;
}
int
nouveau_gem_ioctl_pushbuf_call ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct drm_nouveau_gem_pushbuf_call * req = data ;
struct drm_nouveau_gem_pushbuf_bo * bo = NULL ;
struct nouveau_channel * chan ;
struct drm_gem_object * gem ;
struct nouveau_bo * pbbo ;
struct validate_op op ;
struct nouveau_fence * fence = 0 ;
int i , ret = 0 , do_reloc = 0 ;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN ;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN ( req - > channel , file_priv , chan ) ;
if ( unlikely ( req - > handle = = 0 ) )
goto out_next ;
if ( req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS | |
req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) {
NV_ERROR ( dev , " Pushbuf config exceeds limits: \n " ) ;
NV_ERROR ( dev , " buffers: %d max %d \n " , req - > nr_buffers ,
NOUVEAU_GEM_MAX_BUFFERS ) ;
NV_ERROR ( dev , " relocs : %d max %d \n " , req - > nr_relocs ,
NOUVEAU_GEM_MAX_RELOCS ) ;
if ( unlikely ( req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) ) {
NV_ERROR ( dev , " pushbuf reloc count exceeds limit: %d max %d \n " ,
req - > nr_relocs , NOUVEAU_GEM_MAX_RELOCS ) ;
return - EINVAL ;
}
push = u_memcpya ( req - > push , req - > nr_push , sizeof ( * push ) ) ;
if ( IS_ERR ( push ) )
return PTR_ERR ( push ) ;
bo = u_memcpya ( req - > buffers , req - > nr_buffers , sizeof ( * bo ) ) ;
if ( IS_ERR ( bo ) )
if ( IS_ERR ( bo ) ) {
kfree ( push ) ;
return PTR_ERR ( bo ) ;
}
mutex_lock ( & dev - > struct_mutex ) ;
@ -658,94 +630,9 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
goto out ;
}
/* Validate DMA push buffer */
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem ) {
NV_ERROR ( dev , " Unknown pb handle 0x%08x \n " , req - > handle ) ;
ret = - EINVAL ;
goto out ;
}
pbbo = nouveau_gem_object ( gem ) ;
if ( ( req - > offset & 3 ) | | req - > nr_dwords < 2 | |
( unsigned long ) req - > offset > ( unsigned long ) pbbo - > bo . mem . size | |
( unsigned long ) req - > nr_dwords >
( ( unsigned long ) ( pbbo - > bo . mem . size - req - > offset ) > > 2 ) ) {
NV_ERROR ( dev , " pb call misaligned or out of bounds: "
" %d + %d * 4 > %ld \n " ,
req - > offset , req - > nr_dwords , pbbo - > bo . mem . size ) ;
ret = - EINVAL ;
drm_gem_object_unreference ( gem ) ;
goto out ;
}
ret = ttm_bo_reserve ( & pbbo - > bo , false , false , true ,
chan - > fence . sequence ) ;
if ( ret ) {
NV_ERROR ( dev , " resv pb: %d \n " , ret ) ;
drm_gem_object_unreference ( gem ) ;
goto out ;
}
nouveau_bo_placement_set ( pbbo , 1 < < chan - > pushbuf_bo - > bo . mem . mem_type ) ;
ret = ttm_bo_validate ( & pbbo - > bo , & pbbo - > placement , false , false ) ;
if ( ret ) {
NV_ERROR ( dev , " validate pb: %d \n " , ret ) ;
ttm_bo_unreserve ( & pbbo - > bo ) ;
drm_gem_object_unreference ( gem ) ;
goto out ;
}
list_add_tail ( & pbbo - > entry , & op . both_list ) ;
/* If presumed return address doesn't match, we need to map the
* push buffer and fix it . .
*/
if ( dev_priv - > card_type < NV_20 ) {
uint32_t retaddy ;
if ( chan - > dma . free < 4 + NOUVEAU_DMA_SKIPS ) {
ret = nouveau_dma_wait ( chan , 0 , 4 + NOUVEAU_DMA_SKIPS ) ;
if ( ret ) {
NV_ERROR ( dev , " jmp_space: %d \n " , ret ) ;
goto out ;
}
}
retaddy = chan - > pushbuf_base + ( ( chan - > dma . cur + 2 ) < < 2 ) ;
retaddy | = 0x20000000 ;
if ( retaddy ! = req - > suffix0 ) {
req - > suffix0 = retaddy ;
do_reloc = 1 ;
}
}
/* Apply any relocations that are required */
if ( do_reloc ) {
void * pbvirt ;
bool is_iomem ;
ret = ttm_bo_kmap ( & pbbo - > bo , 0 , pbbo - > bo . mem . num_pages ,
& pbbo - > kmap ) ;
if ( ret ) {
NV_ERROR ( dev , " kmap pb: %d \n " , ret ) ;
goto out ;
}
pbvirt = ttm_kmap_obj_virtual ( & pbbo - > kmap , & is_iomem ) ;
ret = nouveau_gem_pushbuf_reloc_apply ( chan , req - > nr_buffers , bo ,
req - > nr_relocs ,
req - > relocs ,
req - > nr_dwords ,
req - > offset / 4 ,
pbvirt , is_iomem ) ;
if ( dev_priv - > card_type < NV_20 ) {
nouveau_bo_wr32 ( pbbo ,
req - > offset / 4 + req - > nr_dwords - 2 ,
req - > suffix0 ) ;
}
ttm_bo_kunmap ( & pbbo - > kmap ) ;
ret = nouveau_gem_pushbuf_reloc_apply ( dev , req , bo ) ;
if ( ret ) {
NV_ERROR ( dev , " reloc apply: %d \n " , ret ) ;
goto out ;
@ -753,36 +640,74 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
if ( chan - > dma . ib_max ) {
ret = nouveau_dma_wait ( chan , 2 , 6 ) ;
ret = nouveau_dma_wait ( chan , req - > nr_push + 1 , 6 ) ;
if ( ret ) {
NV_INFO ( dev , " nv50cal_space: %d \n " , ret ) ;
goto out ;
}
nv50_dma_push ( chan , pbbo , req - > offset , req - > nr_dwords ) ;
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
nv50_dma_push ( chan , nvbo , push [ i ] . offset ,
push [ i ] . length ) ;
}
} else
if ( dev_priv - > card_type > = NV_20 ) {
ret = RING_SPACE ( chan , 2 ) ;
ret = RING_SPACE ( chan , req - > nr_push * 2 ) ;
if ( ret ) {
NV_ERROR ( dev , " cal_space: %d \n " , ret ) ;
goto out ;
}
OUT_RING ( chan , ( ( pbbo - > bo . mem . mm_node - > start < < PAGE_SHIFT ) +
req - > offset ) | 2 ) ;
OUT_RING ( chan , 0 ) ;
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
struct drm_mm_node * mem = nvbo - > bo . mem . mm_node ;
OUT_RING ( chan , ( ( mem - > start < < PAGE_SHIFT ) +
push [ i ] . offset ) | 2 ) ;
OUT_RING ( chan , 0 ) ;
}
} else {
ret = RING_SPACE ( chan , 2 + NOUVEAU_DMA_SKIPS ) ;
ret = RING_SPACE ( chan , req - > nr_push * ( 2 + NOUVEAU_DMA_SKIPS ) ) ;
if ( ret ) {
NV_ERROR ( dev , " jmp_space: %d \n " , ret ) ;
goto out ;
}
OUT_RING ( chan , ( ( pbbo - > bo . mem . mm_node - > start < < PAGE_SHIFT ) +
req - > offset ) | 0x20000000 ) ;
OUT_RING ( chan , 0 ) ;
/* Space the jumps apart with NOPs. */
for ( i = 0 ; i < NOUVEAU_DMA_SKIPS ; i + + )
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
struct drm_mm_node * mem = nvbo - > bo . mem . mm_node ;
uint32_t cmd ;
cmd = chan - > pushbuf_base + ( ( chan - > dma . cur + 2 ) < < 2 ) ;
cmd | = 0x20000000 ;
if ( unlikely ( cmd ! = req - > suffix0 ) ) {
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 ,
nvbo - > bo . mem .
num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
WIND_RING ( chan ) ;
goto out ;
}
nvbo - > validate_mapped = true ;
}
nouveau_bo_wr32 ( nvbo , ( push [ i ] . offset +
push [ i ] . length - 8 ) / 4 , cmd ) ;
}
OUT_RING ( chan , ( ( mem - > start < < PAGE_SHIFT ) +
push [ i ] . offset ) | 0x20000000 ) ;
OUT_RING ( chan , 0 ) ;
for ( j = 0 ; j < NOUVEAU_DMA_SKIPS ; j + + )
OUT_RING ( chan , 0 ) ;
}
}
ret = nouveau_fence_new ( chan , & fence , true ) ;
@ -797,6 +722,7 @@ out:
nouveau_fence_unref ( ( void * * ) & fence ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
kfree ( bo ) ;
kfree ( push ) ;
out_next :
if ( chan - > dma . ib_max ) {
@ -815,19 +741,6 @@ out_next:
return ret ;
}
int
nouveau_gem_ioctl_pushbuf_call2 ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct drm_nouveau_gem_pushbuf_call * req = data ;
req - > vram_available = dev_priv - > fb_aper_free ;
req - > gart_available = dev_priv - > gart_info . aper_free ;
return nouveau_gem_ioctl_pushbuf_call ( dev , data , file_priv ) ;
}
static inline uint32_t
domain_to_ttm ( struct nouveau_bo * nvbo , uint32_t domain )
{
@ -841,74 +754,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
return flags ;
}
int
nouveau_gem_ioctl_pin ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_pin * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
int ret = 0 ;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN ;
if ( drm_core_check_feature ( dev , DRIVER_MODESET ) ) {
NV_ERROR ( dev , " pin only allowed without kernel modesetting \n " ) ;
return - EINVAL ;
}
if ( ! DRM_SUSER ( DRM_CURPROC ) )
return - EPERM ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
return - EINVAL ;
nvbo = nouveau_gem_object ( gem ) ;
ret = nouveau_bo_pin ( nvbo , domain_to_ttm ( nvbo , req - > domain ) ) ;
if ( ret )
goto out ;
req - > offset = nvbo - > bo . offset ;
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
req - > domain = NOUVEAU_GEM_DOMAIN_GART ;
else
req - > domain = NOUVEAU_GEM_DOMAIN_VRAM ;
out :
mutex_lock ( & dev - > struct_mutex ) ;
drm_gem_object_unreference ( gem ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
int
nouveau_gem_ioctl_unpin ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_pin * req = data ;
struct drm_gem_object * gem ;
int ret ;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN ;
if ( drm_core_check_feature ( dev , DRIVER_MODESET ) )
return - EINVAL ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
return - EINVAL ;
ret = nouveau_bo_unpin ( nouveau_gem_object ( gem ) ) ;
mutex_lock ( & dev - > struct_mutex ) ;
drm_gem_object_unreference ( gem ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
int
nouveau_gem_ioctl_cpu_prep ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )