@ -71,30 +71,30 @@
struct workqueue_struct * rds_wq ;
EXPORT_SYMBOL_GPL ( rds_wq ) ;
void rds_connect_path_complete ( struct rds_connection * conn , int curr )
void rds_connect_path_complete ( struct rds_conn_path * cp , int curr )
{
if ( ! rds_conn_transition ( conn , curr , RDS_CONN_UP ) ) {
if ( ! rds_conn_path_ transition ( cp , curr , RDS_CONN_UP ) ) {
printk ( KERN_WARNING " %s: Cannot transition to state UP, "
" current state is %d \n " ,
__func__ ,
atomic_read ( & conn - > c_state ) ) ;
rds_conn_drop ( conn ) ;
atomic_read ( & cp - > cp _state ) ) ;
rds_conn_path_ drop ( cp ) ;
return ;
}
rdsdebug ( " conn %p for %pI4 to %pI4 complete \n " ,
conn , & conn - > c_laddr , & conn - > c_faddr ) ;
cp - > cp_c onn , & cp - > cp_c onn - > c_laddr , & cp - > cp_ conn- > c_faddr ) ;
conn - > c_reconnect_jiffies = 0 ;
set_bit ( 0 , & conn - > c_map_queued ) ;
queue_delayed_work ( rds_wq , & conn - > c_send_w , 0 ) ;
queue_delayed_work ( rds_wq , & conn - > c_recv_w , 0 ) ;
cp - > cp _reconnect_jiffies = 0 ;
set_bit ( 0 , & cp - > cp_c onn - > c_map_queued ) ;
queue_delayed_work ( rds_wq , & cp - > cp _send_w , 0 ) ;
queue_delayed_work ( rds_wq , & cp - > cp _recv_w , 0 ) ;
}
EXPORT_SYMBOL_GPL ( rds_connect_path_complete ) ;
void rds_connect_complete ( struct rds_connection * conn )
{
rds_connect_path_complete ( conn , RDS_CONN_CONNECTING ) ;
rds_connect_path_complete ( & conn - > c_path [ 0 ] , RDS_CONN_CONNECTING ) ;
}
EXPORT_SYMBOL_GPL ( rds_connect_complete ) ;
@ -116,46 +116,52 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
* We should * always * start with a random backoff ; otherwise a broken connection
* will always take several iterations to be re - established .
*/
void rds_queue_reconnect ( struct rds_connection * conn )
void rds_queue_reconnect ( struct rds_conn_path * cp )
{
unsigned long rand ;
struct rds_connection * conn = cp - > cp_conn ;
rdsdebug ( " conn %p for %pI4 to %pI4 reconnect jiffies %lu \n " ,
conn , & conn - > c_laddr , & conn - > c_faddr ,
conn - > c_reconnect_jiffies ) ;
cp - > cp _reconnect_jiffies ) ;
set_bit ( RDS_RECONNECT_PENDING , & conn - > c_flags ) ;
if ( conn - > c_reconnect_jiffies = = 0 ) {
conn - > c_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies ;
queue_delayed_work ( rds_wq , & conn - > c_conn_w , 0 ) ;
set_bit ( RDS_RECONNECT_PENDING , & cp - > cp _flags ) ;
if ( cp - > cp _reconnect_jiffies = = 0 ) {
cp - > cp _reconnect_jiffies = rds_sysctl_reconnect_min_jiffies ;
queue_delayed_work ( rds_wq , & cp - > cp _conn_w , 0 ) ;
return ;
}
get_random_bytes ( & rand , sizeof ( rand ) ) ;
rdsdebug ( " %lu delay %lu ceil conn %p for %pI4 -> %pI4 \n " ,
rand % conn - > c_reconnect_jiffies , conn - > c_reconnect_jiffies ,
rand % cp - > cp _reconnect_jiffies , cp - > cp _reconnect_jiffies ,
conn , & conn - > c_laddr , & conn - > c_faddr ) ;
queue_delayed_work ( rds_wq , & conn - > c_conn_w ,
rand % conn - > c_reconnect_jiffies ) ;
queue_delayed_work ( rds_wq , & cp - > cp _conn_w ,
rand % cp - > cp _reconnect_jiffies ) ;
conn - > c_reconnect_jiffies = min ( conn - > c_reconnect_jiffies * 2 ,
cp - > cp _reconnect_jiffies = min ( cp - > cp _reconnect_jiffies * 2 ,
rds_sysctl_reconnect_max_jiffies ) ;
}
void rds_connect_worker ( struct work_struct * work )
{
struct rds_connection * conn = container_of ( work , struct rds_connection , c_conn_w . work ) ;
struct rds_conn_path * cp = container_of ( work ,
struct rds_conn_path ,
cp_conn_w . work ) ;
struct rds_connection * conn = cp - > cp_conn ;
int ret ;
clear_bit ( RDS_RECONNECT_PENDING , & conn - > c_flags ) ;
if ( rds_conn_transition ( conn , RDS_CONN_DOWN , RDS_CONN_CONNECTING ) ) {
clear_bit ( RDS_RECONNECT_PENDING , & cp - > cp _flags ) ;
if ( rds_conn_path_ transition ( cp , RDS_CONN_DOWN , RDS_CONN_CONNECTING ) ) {
ret = conn - > c_trans - > conn_connect ( conn ) ;
rdsdebug ( " conn %p for %pI4 to %pI4 dispatched, ret %d \n " ,
conn , & conn - > c_laddr , & conn - > c_faddr , ret ) ;
if ( ret ) {
if ( rds_conn_transition ( conn , RDS_CONN_CONNECTING , RDS_CONN_DOWN ) )
rds_queue_reconnect ( conn ) ;
if ( rds_conn_path_transition ( cp ,
RDS_CONN_CONNECTING ,
RDS_CONN_DOWN ) )
rds_queue_reconnect ( cp ) ;
else
rds_conn_error ( conn , " RDS: connect failed \n " ) ;
}
@ -164,22 +170,24 @@ void rds_connect_worker(struct work_struct *work)
void rds_send_worker ( struct work_struct * work )
{
struct rds_connection * conn = container_of ( work , struct rds_connection , c_send_w . work ) ;
struct rds_conn_path * cp = container_of ( work ,
struct rds_conn_path ,
cp_send_w . work ) ;
int ret ;
if ( rds_conn_state ( conn ) = = RDS_CONN_UP ) {
clear_bit ( RDS_LL_SEND_FULL , & conn - > c_flags ) ;
ret = rds_send_xmit ( conn ) ;
if ( rds_conn_path_ state ( cp ) = = RDS_CONN_UP ) {
clear_bit ( RDS_LL_SEND_FULL , & cp - > cp _flags ) ;
ret = rds_send_xmit ( cp - > cp_c onn ) ;
cond_resched ( ) ;
rdsdebug ( " conn %p ret %d \n " , conn , ret ) ;
rdsdebug ( " conn %p ret %d \n " , cp - > cp_c onn , ret ) ;
switch ( ret ) {
case - EAGAIN :
rds_stats_inc ( s_send_immediate_retry ) ;
queue_delayed_work ( rds_wq , & conn - > c_send_w , 0 ) ;
queue_delayed_work ( rds_wq , & cp - > cp _send_w , 0 ) ;
break ;
case - ENOMEM :
rds_stats_inc ( s_send_delayed_retry ) ;
queue_delayed_work ( rds_wq , & conn - > c_send_w , 2 ) ;
queue_delayed_work ( rds_wq , & cp - > cp _send_w , 2 ) ;
default :
break ;
}
@ -188,20 +196,22 @@ void rds_send_worker(struct work_struct *work)
void rds_recv_worker ( struct work_struct * work )
{
struct rds_connection * conn = container_of ( work , struct rds_connection , c_recv_w . work ) ;
struct rds_conn_path * cp = container_of ( work ,
struct rds_conn_path ,
cp_recv_w . work ) ;
int ret ;
if ( rds_conn_state ( conn ) = = RDS_CONN_UP ) {
ret = conn - > c_trans - > recv ( conn ) ;
rdsdebug ( " conn %p ret %d \n " , conn , ret ) ;
if ( rds_conn_path_ state ( cp ) = = RDS_CONN_UP ) {
ret = cp - > cp_c onn - > c_trans - > recv ( cp - > cp_ conn) ;
rdsdebug ( " conn %p ret %d \n " , cp - > cp_c onn , ret ) ;
switch ( ret ) {
case - EAGAIN :
rds_stats_inc ( s_recv_immediate_retry ) ;
queue_delayed_work ( rds_wq , & conn - > c_recv_w , 0 ) ;
queue_delayed_work ( rds_wq , & cp - > cp _recv_w , 0 ) ;
break ;
case - ENOMEM :
rds_stats_inc ( s_recv_delayed_retry ) ;
queue_delayed_work ( rds_wq , & conn - > c_recv_w , 2 ) ;
queue_delayed_work ( rds_wq , & cp - > cp _recv_w , 2 ) ;
default :
break ;
}
@ -210,9 +220,11 @@ void rds_recv_worker(struct work_struct *work)
void rds_shutdown_worker ( struct work_struct * work )
{
struct rds_connection * conn = container_of ( work , struct rds_connection , c_down_w ) ;
struct rds_conn_path * cp = container_of ( work ,
struct rds_conn_path ,
cp_down_w ) ;
rds_conn_shutdown ( conn ) ;
rds_conn_shutdown ( cp - > cp_c onn ) ;
}
void rds_threads_exit ( void )