@ -147,12 +147,12 @@ struct cm_id_private {
__be32 rq_psn ;
int timeout_ms ;
enum ib_mtu path_mtu ;
__be16 pkey ;
u8 private_data_len ;
u8 max_cm_retries ;
u8 peer_to_peer ;
u8 responder_resources ;
u8 initiator_depth ;
u8 local_ack_timeout ;
u8 retry_count ;
u8 rnr_retry_count ;
u8 service_timeout ;
@ -690,7 +690,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
* timewait before notifying the user that we ' ve exited timewait .
*/
cm_id_priv - > id . state = IB_CM_TIMEWAIT ;
wait_time = cm_convert_to_ms ( cm_id_priv - > local_ack_timeout ) ;
wait_time = cm_convert_to_ms ( cm_id_priv - > av . packet_life_time + 1 ) ;
queue_delayed_work ( cm . wq , & cm_id_priv - > timewait_info - > work . work ,
msecs_to_jiffies ( wait_time ) ) ;
cm_id_priv - > timewait_info = NULL ;
@ -1009,6 +1009,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv - > responder_resources = param - > responder_resources ;
cm_id_priv - > retry_count = param - > retry_count ;
cm_id_priv - > path_mtu = param - > primary_path - > mtu ;
cm_id_priv - > pkey = param - > primary_path - > pkey ;
cm_id_priv - > qp_type = param - > qp_type ;
ret = cm_alloc_msg ( cm_id_priv , & cm_id_priv - > msg ) ;
@ -1023,8 +1024,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv - > local_qpn = cm_req_get_local_qpn ( req_msg ) ;
cm_id_priv - > rq_psn = cm_req_get_starting_psn ( req_msg ) ;
cm_id_priv - > local_ack_timeout =
cm_req_get_primary_local_ack_timeout ( req_msg ) ;
spin_lock_irqsave ( & cm_id_priv - > lock , flags ) ;
ret = ib_post_send_mad ( cm_id_priv - > msg , NULL ) ;
@ -1409,9 +1408,8 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv - > initiator_depth = cm_req_get_resp_res ( req_msg ) ;
cm_id_priv - > responder_resources = cm_req_get_init_depth ( req_msg ) ;
cm_id_priv - > path_mtu = cm_req_get_path_mtu ( req_msg ) ;
cm_id_priv - > pkey = req_msg - > pkey ;
cm_id_priv - > sq_psn = cm_req_get_starting_psn ( req_msg ) ;
cm_id_priv - > local_ack_timeout =
cm_req_get_primary_local_ack_timeout ( req_msg ) ;
cm_id_priv - > retry_count = cm_req_get_retry_count ( req_msg ) ;
cm_id_priv - > rnr_retry_count = cm_req_get_rnr_retry_count ( req_msg ) ;
cm_id_priv - > qp_type = cm_req_get_qp_type ( req_msg ) ;
@ -1715,7 +1713,7 @@ static int cm_establish_handler(struct cm_work *work)
unsigned long flags ;
int ret ;
/* See comment in ib_ cm_establish about lookup. */
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id ( work - > local_id , work - > remote_id ) ;
if ( ! cm_id_priv )
return - EINVAL ;
@ -2401,11 +2399,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
cm_id_priv = container_of ( cm_id , struct cm_id_private , id ) ;
spin_lock_irqsave ( & cm_id_priv - > lock , flags ) ;
if ( cm_id - > state ! = IB_CM_ESTABLISHED | |
cm_id - > lap_state ! = IB_CM_LAP_IDLE ) {
( cm_id - > lap_state ! = IB_CM_LAP_UNINIT & &
cm_id - > lap_state ! = IB_CM_LAP_IDLE ) ) {
ret = - EINVAL ;
goto out ;
}
ret = cm_init_av_by_path ( alternate_path , & cm_id_priv - > alt_av ) ;
if ( ret )
goto out ;
ret = cm_alloc_msg ( cm_id_priv , & msg ) ;
if ( ret )
goto out ;
@ -2430,7 +2433,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL ( ib_send_cm_lap ) ;
static void cm_format_path_from_lap ( struct ib_sa_path_rec * path ,
static void cm_format_path_from_lap ( struct cm_id_private * cm_id_priv ,
struct ib_sa_path_rec * path ,
struct cm_lap_msg * lap_msg )
{
memset ( path , 0 , sizeof * path ) ;
@ -2442,10 +2446,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
path - > hop_limit = lap_msg - > alt_hop_limit ;
path - > traffic_class = cm_lap_get_traffic_class ( lap_msg ) ;
path - > reversible = 1 ;
/* pkey is same as in REQ */
path - > pkey = cm_id_priv - > pkey ;
path - > sl = cm_lap_get_sl ( lap_msg ) ;
path - > mtu_selector = IB_SA_EQ ;
/* mtu is same as in REQ */
path - > mtu = cm_id_priv - > path_mtu ;
path - > rate_selector = IB_SA_EQ ;
path - > rate = cm_lap_get_packet_rate ( lap_msg ) ;
path - > packet_life_time_selector = IB_SA_EQ ;
@ -2471,7 +2475,7 @@ static int cm_lap_handler(struct cm_work *work)
param = & work - > cm_event . param . lap_rcvd ;
param - > alternate_path = & work - > path [ 0 ] ;
cm_format_path_from_lap ( param - > alternate_path , lap_msg ) ;
cm_format_path_from_lap ( cm_id_priv , param - > alternate_path , lap_msg ) ;
work - > cm_event . private_data = & lap_msg - > private_data ;
spin_lock_irqsave ( & cm_id_priv - > lock , flags ) ;
@ -2479,6 +2483,7 @@ static int cm_lap_handler(struct cm_work *work)
goto unlock ;
switch ( cm_id_priv - > id . lap_state ) {
case IB_CM_LAP_UNINIT :
case IB_CM_LAP_IDLE :
break ;
case IB_CM_MRA_LAP_SENT :
@ -2501,6 +2506,10 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv - > id . lap_state = IB_CM_LAP_RCVD ;
cm_id_priv - > tid = lap_msg - > hdr . tid ;
cm_init_av_for_response ( work - > port , work - > mad_recv_wc - > wc ,
work - > mad_recv_wc - > recv_buf . grh ,
& cm_id_priv - > av ) ;
cm_init_av_by_path ( param - > alternate_path , & cm_id_priv - > alt_av ) ;
ret = atomic_inc_and_test ( & cm_id_priv - > work_count ) ;
if ( ! ret )
list_add_tail ( & work - > list , & cm_id_priv - > work_list ) ;
@ -3039,7 +3048,7 @@ static void cm_work_handler(void *data)
cm_free_work ( work ) ;
}
int ib_ cm_establish( struct ib_cm_id * cm_id )
static int cm_establish ( struct ib_cm_id * cm_id )
{
struct cm_id_private * cm_id_priv ;
struct cm_work * work ;
@ -3087,7 +3096,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
out :
return ret ;
}
EXPORT_SYMBOL ( ib_cm_establish ) ;
static int cm_migrate ( struct ib_cm_id * cm_id )
{
struct cm_id_private * cm_id_priv ;
unsigned long flags ;
int ret = 0 ;
cm_id_priv = container_of ( cm_id , struct cm_id_private , id ) ;
spin_lock_irqsave ( & cm_id_priv - > lock , flags ) ;
if ( cm_id - > state = = IB_CM_ESTABLISHED & &
( cm_id - > lap_state = = IB_CM_LAP_UNINIT | |
cm_id - > lap_state = = IB_CM_LAP_IDLE ) ) {
cm_id - > lap_state = IB_CM_LAP_IDLE ;
cm_id_priv - > av = cm_id_priv - > alt_av ;
} else
ret = - EINVAL ;
spin_unlock_irqrestore ( & cm_id_priv - > lock , flags ) ;
return ret ;
}
int ib_cm_notify ( struct ib_cm_id * cm_id , enum ib_event_type event )
{
int ret ;
switch ( event ) {
case IB_EVENT_COMM_EST :
ret = cm_establish ( cm_id ) ;
break ;
case IB_EVENT_PATH_MIG :
ret = cm_migrate ( cm_id ) ;
break ;
default :
ret = - EINVAL ;
}
return ret ;
}
EXPORT_SYMBOL ( ib_cm_notify ) ;
static void cm_recv_handler ( struct ib_mad_agent * mad_agent ,
struct ib_mad_recv_wc * mad_recv_wc )
@ -3220,6 +3266,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
if ( cm_id_priv - > alt_av . ah_attr . dlid ) {
* qp_attr_mask | = IB_QP_ALT_PATH ;
qp_attr - > alt_port_num = cm_id_priv - > alt_av . port - > port_num ;
qp_attr - > alt_pkey_index = cm_id_priv - > alt_av . pkey_index ;
qp_attr - > alt_timeout =
cm_id_priv - > alt_av . packet_life_time + 1 ;
qp_attr - > alt_ah_attr = cm_id_priv - > alt_av . ah_attr ;
}
ret = 0 ;
@ -3246,19 +3295,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
case IB_CM_REP_SENT :
case IB_CM_MRA_REP_RCVD :
case IB_CM_ESTABLISHED :
* qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN ;
qp_attr - > sq_psn = be32_to_cpu ( cm_id_priv - > sq_psn ) ;
if ( cm_id_priv - > qp_type = = IB_QPT_RC ) {
* qp_attr_mask | = IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC ;
qp_attr - > timeout = cm_id_priv - > local_ack_timeout ;
qp_attr - > retry_cnt = cm_id_priv - > retry_count ;
qp_attr - > rnr_retry = cm_id_priv - > rnr_retry_count ;
qp_attr - > max_rd_atomic = cm_id_priv - > initiator_depth ;
}
if ( cm_id_priv - > alt_av . ah_attr . dlid ) {
* qp_attr_mask | = IB_QP_PATH_MIG_STATE ;
if ( cm_id_priv - > id . lap_state = = IB_CM_LAP_UNINIT ) {
* qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN ;
qp_attr - > sq_psn = be32_to_cpu ( cm_id_priv - > sq_psn ) ;
if ( cm_id_priv - > qp_type = = IB_QPT_RC ) {
* qp_attr_mask | = IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC ;
qp_attr - > timeout =
cm_id_priv - > av . packet_life_time + 1 ;
qp_attr - > retry_cnt = cm_id_priv - > retry_count ;
qp_attr - > rnr_retry = cm_id_priv - > rnr_retry_count ;
qp_attr - > max_rd_atomic =
cm_id_priv - > initiator_depth ;
}
if ( cm_id_priv - > alt_av . ah_attr . dlid ) {
* qp_attr_mask | = IB_QP_PATH_MIG_STATE ;
qp_attr - > path_mig_state = IB_MIG_REARM ;
}
} else {
* qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE ;
qp_attr - > alt_port_num = cm_id_priv - > alt_av . port - > port_num ;
qp_attr - > alt_pkey_index = cm_id_priv - > alt_av . pkey_index ;
qp_attr - > alt_timeout =
cm_id_priv - > alt_av . packet_life_time + 1 ;
qp_attr - > alt_ah_attr = cm_id_priv - > alt_av . ah_attr ;
qp_attr - > path_mig_state = IB_MIG_REARM ;
}
ret = 0 ;