@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc = ctx - > desc_template ; /* bits 3-4 */
desc | = engine - > ctx_desc_template ; /* bits 0-11 */
desc | = ce - > lrc_vma - > node . start + LRC_PPHWSP_PN * PAGE_SIZE ;
desc | = ce - > state - > node . start + LRC_PPHWSP_PN * PAGE_SIZE ;
/* bits 12-31 */
desc | = ( u64 ) ctx - > hw_id < < GEN8_CTX_ID_SHIFT ; /* bits 32-52 */
@ -763,7 +763,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
static int intel_lr_context_pin ( struct i915_gem_context * ctx ,
struct intel_engine_cs * engine )
{
struct drm_i915_private * dev_priv = ctx - > i915 ;
struct intel_context * ce = & ctx - > engine [ engine - > id ] ;
void * vaddr ;
u32 * lrc_reg_state ;
@ -774,16 +773,15 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if ( ce - > pin_count + + )
return 0 ;
ret = i915_gem_object_ggtt_pin ( ce - > state , NULL ,
0 , GEN8_LR_CONTEXT_ALIGN ,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP ) ;
ret = i915_vma_pin ( ce - > state , 0 , GEN8_LR_CONTEXT_ALIGN ,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL ) ;
if ( ret )
goto err ;
vaddr = i915_gem_object_pin_map ( ce - > state , I915_MAP_WB ) ;
vaddr = i915_gem_object_pin_map ( ce - > state - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
ret = PTR_ERR ( vaddr ) ;
goto unpin_ctx_obj ;
goto unpin_vma ;
}
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE ;
@ -792,24 +790,25 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if ( ret )
goto unpin_map ;
ce - > lrc_vma = i915_gem_obj_to_ggtt ( ce - > state ) ;
intel_lr_context_descriptor_update ( ctx , engine ) ;
lrc_reg_state [ CTX_RING_BUFFER_START + 1 ] = ce - > ring - > vma - > node . start ;
ce - > lrc_reg_state = lrc_reg_state ;
ce - > state - > dirty = true ;
ce - > state - > obj - > dirty = true ;
/* Invalidate GuC TLB. */
if ( i915 . enable_guc_submission )
if ( i915 . enable_guc_submission ) {
struct drm_i915_private * dev_priv = ctx - > i915 ;
I915_WRITE ( GEN8_GTCR , GEN8_GTCR_INVALIDATE ) ;
}
i915_gem_context_get ( ctx ) ;
return 0 ;
unpin_map :
i915_gem_object_unpin_map ( ce - > state ) ;
unpin_ctx_obj :
i915_gem_object_ggtt _unpin( ce - > state ) ;
i915_gem_object_unpin_map ( ce - > state - > obj ) ;
unpin_vma :
__i915_vma _unpin( ce - > state ) ;
err :
ce - > pin_count = 0 ;
return ret ;
@ -828,12 +827,8 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
intel_ring_unpin ( ce - > ring ) ;
i915_gem_object_unpin_map ( ce - > state ) ;
i915_gem_object_ggtt_unpin ( ce - > state ) ;
ce - > lrc_vma = NULL ;
ce - > lrc_desc = 0 ;
ce - > lrc_reg_state = NULL ;
i915_gem_object_unpin_map ( ce - > state - > obj ) ;
i915_vma_unpin ( ce - > state ) ;
i915_gem_context_put ( ctx ) ;
}
@ -1747,19 +1742,18 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
}
static int
lrc_setup_hws ( struct intel_engine_cs * engine ,
struct drm_i915_gem_object * dctx_obj )
lrc_setup_hws ( struct intel_engine_cs * engine , struct i915_vma * vma )
{
void * hws ;
/* The HWSP is part of the default context object in LRC mode. */
engine - > status_page . gfx_addr = i915_gem_obj_ggtt_offset ( dctx_obj ) +
LRC_PPHWSP_PN * PAGE_SIZE ;
hws = i915_gem_object_pin_map ( dctx_ obj, I915_MAP_WB ) ;
engine - > status_page . gfx_addr =
vma - > node . start + LRC_PPHWSP_PN * PAGE_SIZE ;
hws = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( hws ) )
return PTR_ERR ( hws ) ;
engine - > status_page . page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE ;
engine - > status_page . obj = dctx_ obj;
engine - > status_page . obj = vma - > obj ;
return 0 ;
}
@ -2131,6 +2125,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object * ctx_obj ;
struct intel_context * ce = & ctx - > engine [ engine - > id ] ;
struct i915_vma * vma ;
uint32_t context_size ;
struct intel_ring * ring ;
int ret ;
@ -2148,6 +2143,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return PTR_ERR ( ctx_obj ) ;
}
vma = i915_vma_create ( ctx_obj , & ctx - > i915 - > ggtt . base , NULL ) ;
if ( IS_ERR ( vma ) ) {
ret = PTR_ERR ( vma ) ;
goto error_deref_obj ;
}
ring = intel_engine_create_ring ( engine , ctx - > ring_size ) ;
if ( IS_ERR ( ring ) ) {
ret = PTR_ERR ( ring ) ;
@ -2161,7 +2162,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
}
ce - > ring = ring ;
ce - > state = ctx_obj ;
ce - > state = vma ;
ce - > initialised = engine - > init_context = = NULL ;
return 0 ;
@ -2170,8 +2171,6 @@ error_ring_free:
intel_ring_free ( ring ) ;
error_deref_obj :
i915_gem_object_put ( ctx_obj ) ;
ce - > ring = NULL ;
ce - > state = NULL ;
return ret ;
}
@ -2182,24 +2181,23 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
for_each_engine ( engine , dev_priv ) {
struct intel_context * ce = & ctx - > engine [ engine - > id ] ;
struct drm_i915_gem_object * ctx_obj = ce - > state ;
void * vaddr ;
uint32_t * reg_state ;
if ( ! ctx_obj )
if ( ! ce - > state )
continue ;
vaddr = i915_gem_object_pin_map ( ctx_ obj , I915_MAP_WB ) ;
vaddr = i915_gem_object_pin_map ( ce - > state - > obj , I915_MAP_WB ) ;
if ( WARN_ON ( IS_ERR ( vaddr ) ) )
continue ;
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE ;
ctx_obj - > dirty = true ;
reg_state [ CTX_RING_HEAD + 1 ] = 0 ;
reg_state [ CTX_RING_TAIL + 1 ] = 0 ;
i915_gem_object_unpin_map ( ctx_obj ) ;
ce - > state - > obj - > dirty = true ;
i915_gem_object_unpin_map ( ce - > state - > obj ) ;
ce - > ring - > head = 0 ;
ce - > ring - > tail = 0 ;