@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0 ;
rbo - > placement . fpfn = 0 ;
rbo - > placement . lpfn = rbo - > rdev - > mc . active_vram_size > > PAGE_SHIFT ;
rbo - > placement . lpfn = 0 ;
rbo - > placement . placement = rbo - > placements ;
rbo - > placement . busy_placement = rbo - > placements ;
if ( domain & RADEON_GEM_DOMAIN_VRAM )
@ -91,7 +91,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
{
struct radeon_bo * bo ;
enum ttm_bo_type type ;
int page_align = roundup ( byte_align , PAGE_SIZE ) > > PAGE_SHIFT ;
unsigned long page_align = roundup ( byte_align , PAGE_SIZE ) > > PAGE_SHIFT ;
unsigned long max_size = 0 ;
int r ;
if ( unlikely ( rdev - > mman . bdev . dev_mapping = = NULL ) ) {
@ -104,6 +105,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
}
* bo_ptr = NULL ;
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min ( rdev - > mc . visible_vram_size , rdev - > mc . gtt_size ) ;
if ( ( page_align < < PAGE_SHIFT ) > = max_size ) {
printk ( KERN_WARNING " %s:%d alloc size %ldM bigger than %ldMb limit \n " ,
__func__ , __LINE__ , page_align > > ( 20 - PAGE_SHIFT ) , max_size > > 20 ) ;
return - ENOMEM ;
}
retry :
bo = kzalloc ( sizeof ( struct radeon_bo ) , GFP_KERNEL ) ;
if ( bo = = NULL )