@ -868,7 +868,7 @@ done:
/*
* Back up requester to resend the last un - ACKed request .
* The QP s_lock should be held and interrupts disabled .
* The QP r_lock and s_lock should be held and interrupts disabled .
*/
static void qib_restart_rc ( struct qib_qp * qp , u32 psn , int wait )
{
@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg)
struct qib_ibport * ibp ;
unsigned long flags ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
spin_lock_irqsave ( & qp - > r_lock , flags ) ;
spin_lock ( & qp - > s_lock ) ;
if ( qp - > s_flags & QIB_S_TIMER ) {
ibp = to_iport ( qp - > ibqp . device , qp - > port_num ) ;
ibp - > n_rc_timeouts + + ;
@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg)
qib_restart_rc ( qp , qp - > s_last_psn + 1 , 1 ) ;
qib_schedule_send ( qp ) ;
}
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
spin_unlock ( & qp - > s_lock ) ;
spin_unlock_irqrestore ( & qp - > r_lock , flags ) ;
}
/*
@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
/* Double check we can process this now that we hold the s_lock. */
if ( ! ( ib_qib_state_ops [ qp - > state ] & QIB_PROCESS_RECV_OK ) )
goto ack_done ;
/* Ignore invalid responses. */
if ( qib_cmp24 ( psn , qp - > s_next_psn ) > = 0 )
goto ack_done ;
@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
ibp - > n_rc_dupreq + + ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
/* Double check we can process this now that we hold the s_lock. */
if ( ! ( ib_qib_state_ops [ qp - > state ] & QIB_PROCESS_RECV_OK ) )
goto unlock_done ;
for ( i = qp - > r_head_ack_queue ; ; i = prev ) {
if ( i = = qp - > s_tail_ack_queue )
@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
psn = be32_to_cpu ( ohdr - > bth [ 2 ] ) ;
opcode > > = 24 ;
/* Prevent simultaneous processing after APM on different CPUs */
spin_lock ( & qp - > r_lock ) ;
/*
* Process responses ( ACKs ) before anything else . Note that the
* packet sequence number will be for something in the send work
@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
opcode < = OP ( ATOMIC_ACKNOWLEDGE ) ) {
qib_rc_rcv_resp ( ibp , ohdr , data , tlen , qp , opcode , psn ,
hdrsize , pmtu , rcd ) ;
goto runlock ;
return ;
}
/* Compute 24 bits worth of difference. */
diff = qib_cmp24 ( psn , qp - > r_psn ) ;
if ( unlikely ( diff ) ) {
if ( qib_rc_rcv_error ( ohdr , data , qp , opcode , psn , diff , rcd ) )
goto runlock ;
return ;
goto send_ack ;
}
@ -2090,9 +2082,6 @@ send_last:
if ( next > QIB_MAX_RDMA_ATOMIC )
next = 0 ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
/* Double check we can process this while holding the s_lock. */
if ( ! ( ib_qib_state_ops [ qp - > state ] & QIB_PROCESS_RECV_OK ) )
goto srunlock ;
if ( unlikely ( next = = qp - > s_tail_ack_queue ) ) {
if ( ! qp - > s_ack_queue [ next ] . sent )
goto nack_inv_unlck ;
@ -2146,7 +2135,7 @@ send_last:
qp - > s_flags | = QIB_S_RESP_PENDING ;
qib_schedule_send ( qp ) ;
goto sr unlock ;
goto sunlock ;
}
case OP ( COMPARE_SWAP ) :
@ -2165,9 +2154,6 @@ send_last:
if ( next > QIB_MAX_RDMA_ATOMIC )
next = 0 ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
/* Double check we can process this while holding the s_lock. */
if ( ! ( ib_qib_state_ops [ qp - > state ] & QIB_PROCESS_RECV_OK ) )
goto srunlock ;
if ( unlikely ( next = = qp - > s_tail_ack_queue ) ) {
if ( ! qp - > s_ack_queue [ next ] . sent )
goto nack_inv_unlck ;
@ -2213,7 +2199,7 @@ send_last:
qp - > s_flags | = QIB_S_RESP_PENDING ;
qib_schedule_send ( qp ) ;
goto sr unlock ;
goto sunlock ;
}
default :
@ -2227,7 +2213,7 @@ send_last:
/* Send an ACK if requested or required. */
if ( psn & ( 1 < < 31 ) )
goto send_ack ;
goto runlock ;
return ;
rnr_nak :
qp - > r_nak_state = IB_RNR_NAK | qp - > r_min_rnr_timer ;
@ -2238,7 +2224,7 @@ rnr_nak:
atomic_inc ( & qp - > refcount ) ;
list_add_tail ( & qp - > rspwait , & rcd - > qp_wait_list ) ;
}
goto runlock ;
return ;
nack_op_err :
qib_rc_error ( qp , IB_WC_LOC_QP_OP_ERR ) ;
@ -2250,7 +2236,7 @@ nack_op_err:
atomic_inc ( & qp - > refcount ) ;
list_add_tail ( & qp - > rspwait , & rcd - > qp_wait_list ) ;
}
goto runlock ;
return ;
nack_inv_unlck :
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
@ -2264,7 +2250,7 @@ nack_inv:
atomic_inc ( & qp - > refcount ) ;
list_add_tail ( & qp - > rspwait , & rcd - > qp_wait_list ) ;
}
goto runlock ;
return ;
nack_acc_unlck :
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
@ -2274,13 +2260,6 @@ nack_acc:
qp - > r_ack_psn = qp - > r_psn ;
send_ack :
qib_send_rc_ack ( qp ) ;
runlock :
spin_unlock ( & qp - > r_lock ) ;
return ;
srunlock :
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
spin_unlock ( & qp - > r_lock ) ;
return ;
sunlock :