@ -535,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t adj_xri ;
struct lpfc_node_rrq * rrq ;
int empty ;
uint32_t did = 0 ;
if ( ! ndlp )
return - EINVAL ;
if ( ! phba - > cfg_enable_rrq )
return - EINVAL ;
if ( phba - > pport - > load_flag & FC_UNLOADING ) {
phba - > hba_flag & = ~ HBA_RRQ_ACTIVE ;
goto out ;
}
did = ndlp - > nlp_DID ;
/*
* set the active bit even if there is no mem available .
*/
adj_xri = xritag - phba - > sli4_hba . max_cfg_param . xri_base ;
if ( ! ndlp )
return - EINVAL ;
if ( NLP_CHK_FREE_REQ ( ndlp ) )
goto out ;
if ( ndlp - > vport & & ( ndlp - > vport - > load_flag & FC_UNLOADING ) )
goto out ;
if ( test_and_set_bit ( adj_xri , ndlp - > active_rrqs . xri_bitmap ) )
return - EINVAL ;
goto out ;
rrq = mempool_alloc ( phba - > rrq_pool , GFP_KERNEL ) ;
if ( rrq ) {
rrq - > send_rrq = send_rrq ;
@ -554,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq - > vport = ndlp - > vport ;
rrq - > rxid = rxid ;
empty = list_empty ( & phba - > active_rrq_list ) ;
if ( phba - > cfg_enable_rrq & & send_rrq )
/*
* We need the xri before we can add this to the
* phba active rrq list .
*/
rrq - > send_rrq = send_rrq ;
else
rrq - > send_rrq = 0 ;
rrq - > send_rrq = send_rrq ;
list_add_tail ( & rrq - > list , & phba - > active_rrq_list ) ;
if ( ! ( phba - > hba_flag & HBA_RRQ_ACTIVE ) ) {
phba - > hba_flag | = HBA_RRQ_ACTIVE ;
@ -570,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
return 0 ;
}
return - ENOMEM ;
out :
lpfc_printf_log ( phba , KERN_INFO , LOG_SLI ,
" 2921 Can't set rrq active xri:0x%x rxid:0x%x "
" DID:0x%x Send:%d \n " ,
xritag , rxid , did , send_rrq ) ;
return - EINVAL ;
}
/**
* __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap .
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap .
* @ phba : Pointer to HBA context object .
* @ xritag : xri used in this exchange .
* @ rrq : The RRQ to be cleared .
*
* This function is called with hbalock held . This function
* */
static void
__ lpfc_clr_rrq_active( struct lpfc_hba * phba ,
uint16_t xritag ,
struct lpfc_node_rrq * rrq )
void
lpfc_clr_rrq_active ( struct lpfc_hba * phba ,
uint16_t xritag ,
struct lpfc_node_rrq * rrq )
{
uint16_t adj_xri ;
struct lpfc_nodelist * ndlp ;
struct lpfc_nodelist * ndlp = NULL ;
ndlp = lpfc_findnode_did ( rrq - > vport , rrq - > nlp_DID ) ;
if ( ( rrq - > vport ) & & NLP_CHK_NODE_ACT ( rrq - > ndlp ) )
ndlp = lpfc_findnode_did ( rrq - > vport , rrq - > nlp_DID ) ;
/* The target DID could have been swapped (cable swap)
* we should use the ndlp from the findnode if it is
* available .
*/
if ( ! ndlp )
if ( ( ! ndlp ) & & rrq - > ndlp )
ndlp = rrq - > ndlp ;
if ( ! ndlp )
goto out ;
adj_xri = xritag - phba - > sli4_hba . max_cfg_param . xri_base ;
if ( test_and_clear_bit ( adj_xri , ndlp - > active_rrqs . xri_bitmap ) ) {
rrq - > send_rrq = 0 ;
rrq - > xritag = 0 ;
rrq - > rrq_stop_time = 0 ;
}
out :
mempool_free ( rrq , phba - > rrq_pool ) ;
}
@ -628,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
struct lpfc_node_rrq * nextrrq ;
unsigned long next_time ;
unsigned long iflags ;
LIST_HEAD ( send_rrq ) ;
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
phba - > hba_flag & = ~ HBA_RRQ_ACTIVE ;
next_time = jiffies + HZ * ( phba - > fc_ratov + 1 ) ;
list_for_each_entry_safe ( rrq , nextrrq ,
& phba - > active_rrq_list , list ) {
if ( time_after ( jiffies , rrq - > rrq_stop_time ) ) {
list_del ( & rrq - > list ) ;
if ( ! rrq - > send_rrq )
/* this call will free the rrq */
__lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
else {
/* if we send the rrq then the completion handler
* will clear the bit in the xribitmap .
*/
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
if ( lpfc_send_rrq ( phba , rrq ) ) {
lpfc_clr_rrq_active ( phba , rrq - > xritag ,
rrq ) ;
}
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
}
} else if ( time_before ( rrq - > rrq_stop_time , next_time ) )
& phba - > active_rrq_list , list ) {
if ( time_after ( jiffies , rrq - > rrq_stop_time ) )
list_move ( & rrq - > list , & send_rrq ) ;
else if ( time_before ( rrq - > rrq_stop_time , next_time ) )
next_time = rrq - > rrq_stop_time ;
}
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
if ( ! list_empty ( & phba - > active_rrq_list ) )
mod_timer ( & phba - > rrq_tmr , next_time ) ;
list_for_each_entry_safe ( rrq , nextrrq , & send_rrq , list ) {
list_del ( & rrq - > list ) ;
if ( ! rrq - > send_rrq )
/* this call will free the rrq */
lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
else if ( lpfc_send_rrq ( phba , rrq ) ) {
/* if we send the rrq then the completion handler
* will clear the bit in the xribitmap .
*/
lpfc_clr_rrq_active ( phba , rrq - > xritag ,
rrq ) ;
}
}
}
/**
@ -693,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
/**
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport .
* @ vport : Pointer to vport context object .
*
* Remove all active RRQs for this vport from the phba - > active_rrq_list and
* clear the rrq .
* @ ndlp : Pointer to the lpfc_node_list structure .
* If ndlp is NULL Remove all active RRQs for this vport from the
* phba - > active_rrq_list and clear the rrq .
* If ndlp is not NULL then only remove rrqs for this vport & this ndlp .
* */
void
lpfc_cleanup_vports_rrqs ( struct lpfc_vport * vport )
lpfc_cleanup_vports_rrqs ( struct lpfc_vport * vport , struct lpfc_nodelist * ndlp )
{
struct lpfc_hba * phba = vport - > phba ;
struct lpfc_node_rrq * rrq ;
struct lpfc_node_rrq * nextrrq ;
unsigned long iflags ;
LIST_HEAD ( rrq_list ) ;
if ( phba - > sli_rev ! = LPFC_SLI_REV4 )
return ;
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
list_for_each_entry_safe ( rrq , nextrrq , & phba - > active_rrq_list , list ) {
if ( rrq - > vport = = vport ) {
list_del ( & rrq - > list ) ;
__lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
}
if ( ! ndlp ) {
lpfc_sli4_vport_delete_els_xri_aborted ( vport ) ;
lpfc_sli4_vport_delete_fcp_xri_aborted ( vport ) ;
}
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
list_for_each_entry_safe ( rrq , nextrrq , & phba - > active_rrq_list , list )
if ( ( rrq - > vport = = vport ) & & ( ! ndlp | | rrq - > ndlp = = ndlp ) )
list_move ( & rrq - > list , & rrq_list ) ;
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
list_for_each_entry_safe ( rrq , nextrrq , & rrq_list , list ) {
list_del ( & rrq - > list ) ;
lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
}
}
/**
@ -733,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
struct lpfc_node_rrq * nextrrq ;
unsigned long next_time ;
unsigned long iflags ;
LIST_HEAD ( rrq_list ) ;
if ( phba - > sli_rev ! = LPFC_SLI_REV4 )
return ;
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
phba - > hba_flag & = ~ HBA_RRQ_ACTIVE ;
next_time = jiffies + HZ * ( phba - > fc_ratov * 2 ) ;
list_for_each_entry_safe ( rrq , nextrrq , & phba - > active_rrq_list , list ) {
list_splice_init ( & phba - > active_rrq_list , & rrq_list ) ;
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
list_for_each_entry_safe ( rrq , nextrrq , & rrq_list , list ) {
list_del ( & rrq - > list ) ;
__lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
lpfc_clr_rrq_active ( phba , rrq - > xritag , rrq ) ;
}
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
if ( ! list_empty ( & phba - > active_rrq_list ) )
mod_timer ( & phba - > rrq_tmr , next_time ) ;
}
/**
* __ lpfc_test_rrq_active - Test RRQ bit in xri_bitmap .
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap .
* @ phba : Pointer to HBA context object .
* @ ndlp : Targets nodelist pointer for this exchange .
* @ xritag the xri in the bitmap to test .
@ -759,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri .
* */
static int
__ lpfc_test_rrq_active( struct lpfc_hba * phba , struct lpfc_nodelist * ndlp ,
int
lpfc_test_rrq_active ( struct lpfc_hba * phba , struct lpfc_nodelist * ndlp ,
uint16_t xritag )
{
uint16_t adj_xri ;
@ -802,52 +835,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return ret ;
}
/**
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap .
* @ phba : Pointer to HBA context object .
* @ xritag : xri used in this exchange .
* @ rrq : The RRQ to be cleared .
*
* This function is takes the hbalock .
* */
void
lpfc_clr_rrq_active ( struct lpfc_hba * phba ,
uint16_t xritag ,
struct lpfc_node_rrq * rrq )
{
unsigned long iflags ;
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
__lpfc_clr_rrq_active ( phba , xritag , rrq ) ;
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
return ;
}
/**
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap .
* @ phba : Pointer to HBA context object .
* @ ndlp : Targets nodelist pointer for this exchange .
* @ xritag the xri in the bitmap to test .
*
* This function takes the hbalock .
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri .
* */
int
lpfc_test_rrq_active ( struct lpfc_hba * phba , struct lpfc_nodelist * ndlp ,
uint16_t xritag )
{
int ret ;
unsigned long iflags ;
spin_lock_irqsave ( & phba - > hbalock , iflags ) ;
ret = __lpfc_test_rrq_active ( phba , ndlp , xritag ) ;
spin_unlock_irqrestore ( & phba - > hbalock , iflags ) ;
return ret ;
}
/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @ phba : Pointer to HBA context object .
@ -885,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
return NULL ;
adj_xri = sglq - > sli4_xritag -
phba - > sli4_hba . max_cfg_param . xri_base ;
if ( __ lpfc_test_rrq_active( phba , ndlp , sglq - > sli4_xritag ) ) {
if ( lpfc_test_rrq_active ( phba , ndlp , sglq - > sli4_xritag ) ) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri .
*/