@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue ( skb , child ) ;
if ( likely ( ret = = NET_XMIT_SUCCESS ) ) {
qdisc_qstats_backlog_inc ( sch , skb ) ;
sch - > q . qlen + + ;
} else if ( net_xmit_drop_count ( ret ) ) {
q - > stats . pdrop + + ;
@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
skb = child - > dequeue ( child ) ;
if ( skb ) {
qdisc_bstats_update ( sch , skb ) ;
qdisc_qstats_backlog_dec ( sch , skb ) ;
sch - > q . qlen - - ;
} else {
if ( ! red_is_idling ( & q - > vars ) )
@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
if ( child - > ops - > drop & & ( len = child - > ops - > drop ( child ) ) > 0 ) {
q - > stats . other + + ;
qdisc_qstats_drop ( sch ) ;
sch - > qstats . backlog - = len ;
sch - > q . qlen - - ;
return len ;
}
@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
struct red_sched_data * q = qdisc_priv ( sch ) ;
qdisc_reset ( q - > qdisc ) ;
sch - > qstats . backlog = 0 ;
sch - > q . qlen = 0 ;
red_restart ( & q - > vars ) ;
}