@ -1635,6 +1635,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_i915_gem_object * obj = to_intel_bo ( vma - > vm_private_data ) ;
struct drm_device * dev = obj - > base . dev ;
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct i915_ggtt_view view = i915_ggtt_view_normal ;
pgoff_t page_offset ;
unsigned long pfn ;
int ret = 0 ;
@ -1667,8 +1668,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock ;
}
/* Now bind it into the GTT if needed */
ret = i915_gem_obj_ggtt_pin ( obj , 0 , PIN_MAPPABLE ) ;
/* Use a partial view if the object is bigger than the aperture. */
if ( obj - > base . size > = dev_priv - > gtt . mappable_end ) {
static const unsigned int chunk_size = 256 ; // 1 MiB
memset ( & view , 0 , sizeof ( view ) ) ;
view . type = I915_GGTT_VIEW_PARTIAL ;
view . params . partial . offset = rounddown ( page_offset , chunk_size ) ;
view . params . partial . size =
min_t ( unsigned int ,
chunk_size ,
( vma - > vm_end - vma - > vm_start ) / PAGE_SIZE -
view . params . partial . offset ) ;
}
/* Now pin it into the GTT if needed */
ret = i915_gem_object_ggtt_pin ( obj , & view , 0 , PIN_MAPPABLE ) ;
if ( ret )
goto unlock ;
@ -1681,30 +1695,50 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin ;
/* Finally, remap it using the new GTT offset */
pfn = dev_priv - > gtt . mappable_base + i915_gem_obj_ggtt_offset ( obj ) ;
pfn = dev_priv - > gtt . mappable_base +
i915_gem_obj_ggtt_offset_view ( obj , & view ) ;
pfn > > = PAGE_SHIFT ;
if ( ! obj - > fault_mappable ) {
unsigned long size = min_t ( unsigned long ,
vma - > vm_end - vma - > vm_start ,
obj - > base . size ) ;
int i ;
if ( unlikely ( view . type = = I915_GGTT_VIEW_PARTIAL ) ) {
/* Overriding existing pages in partial view does not cause
* us any trouble as TLBs are still valid because the fault
* is due to userspace losing part of the mapping or never
* having accessed it before ( at this partials ' range ) .
*/
unsigned long base = vma - > vm_start +
( view . params . partial . offset < < PAGE_SHIFT ) ;
unsigned int i ;
for ( i = 0 ; i < size > > PAGE_SHIFT ; i + + ) {
ret = vm_insert_pfn ( vma ,
( unsigned long ) vma - > vm_start + i * PAGE_SIZE ,
pfn + i ) ;
for ( i = 0 ; i < view . params . partial . size ; i + + ) {
ret = vm_insert_pfn ( vma , base + i * PAGE_SIZE , pfn + i ) ;
if ( ret )
break ;
}
obj - > fault_mappable = true ;
} else
ret = vm_insert_pfn ( vma ,
( unsigned long ) vmf - > virtual_address ,
pfn + page_offset ) ;
} else {
if ( ! obj - > fault_mappable ) {
unsigned long size = min_t ( unsigned long ,
vma - > vm_end - vma - > vm_start ,
obj - > base . size ) ;
int i ;
for ( i = 0 ; i < size > > PAGE_SHIFT ; i + + ) {
ret = vm_insert_pfn ( vma ,
( unsigned long ) vma - > vm_start + i * PAGE_SIZE ,
pfn + i ) ;
if ( ret )
break ;
}
obj - > fault_mappable = true ;
} else
ret = vm_insert_pfn ( vma ,
( unsigned long ) vmf - > virtual_address ,
pfn + page_offset ) ;
}
unpin :
i915_gem_object_ggtt_unpin ( obj ) ;
i915_gem_object_ggtt_unpin_view ( obj , & view ) ;
unlock :
mutex_unlock ( & dev - > struct_mutex ) ;
out :
@ -1897,11 +1931,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock ;
}
if ( obj - > base . size > dev_priv - > gtt . mappable_end ) {
ret = - E2BIG ;
goto out ;
}
if ( obj - > madv ! = I915_MADV_WILLNEED ) {
DRM_DEBUG ( " Attempting to mmap a purgeable buffer \n " ) ;
ret = - EFAULT ;