@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
{
struct sk_buff_head temp ;
struct sctp_ulpevent * event ;
int event_eor = 0 ;
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg ( chunk - > asoc , chunk , gfp ) ;
@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the ' temp ' list .
*/
if ( event )
if ( event ) {
event_eor = ( event - > msg_flags & MSG_EOR ) ? 1 : 0 ;
sctp_ulpq_tail_event ( ulpq , event ) ;
}
return 0 ;
return event_eor ;
}
/* Add a new event for propagation to the ULP. */
@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
ctsn = cevent - > tsn ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
if ( ! first_frag )
return NULL ;
goto done ;
case SCTP_DATA_MIDDLE_FRAG :
if ( ! first_frag ) {
first_frag = pos ;
next_tsn = ctsn + 1 ;
last_frag = pos ;
} else if ( next_tsn = = ctsn )
} else if ( next_tsn = = ctsn ) {
next_tsn + + ;
else
last_frag = pos ;
} else
goto done ;
break ;
case SCTP_DATA_LAST_FRAG :
@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
} else
goto done ;
break ;
case SCTP_DATA_LAST_FRAG :
if ( ! first_frag )
return NULL ;
else
goto done ;
break ;
default :
return NULL ;
}
@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head * list , __u16 needed )
{
__u16 freed = 0 ;
__u32 tsn ;
struct sk_buff * skb ;
__u32 tsn , last_tsn ;
struct sk_buff * skb , * flist , * last ;
struct sctp_ulpevent * event ;
struct sctp_tsnmap * tsnmap ;
tsnmap = & ulpq - > asoc - > peer . tsn_map ;
while ( ( skb = __skb_dequeue_tail ( list ) ) ! = NULL ) {
freed + = skb_headlen ( skb ) ;
while ( ( skb = skb_peek_tail ( list ) ) ! = NULL ) {
event = sctp_skb2event ( skb ) ;
tsn = event - > tsn ;
/* Don't renege below the Cumulative TSN ACK Point. */
if ( TSN_lte ( tsn , sctp_tsnmap_get_ctsn ( tsnmap ) ) )
break ;
/* Events in ordering queue may have multiple fragments
* corresponding to additional TSNs . Sum the total
* freed space ; find the last TSN .
*/
freed + = skb_headlen ( skb ) ;
flist = skb_shinfo ( skb ) - > frag_list ;
for ( last = flist ; flist ; flist = flist - > next ) {
last = flist ;
freed + = skb_headlen ( last ) ;
}
if ( last )
last_tsn = sctp_skb2event ( last ) - > tsn ;
else
last_tsn = tsn ;
/* Unlink the event, then renege all applicable TSNs. */
__skb_unlink ( skb , list ) ;
sctp_ulpevent_free ( event ) ;
sctp_tsnmap_renege ( tsnmap , tsn ) ;
while ( TSN_lte ( tsn , last_tsn ) ) {
sctp_tsnmap_renege ( tsnmap , tsn ) ;
tsn + + ;
}
if ( freed > = needed )
return freed ;
}
@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_ulpevent * event ;
struct sctp_association * asoc ;
struct sctp_sock * sp ;
__u32 ctsn ;
struct sk_buff * skb ;
asoc = ulpq - > asoc ;
sp = sctp_sk ( asoc - > base . sk ) ;
/* If the association is already in Partial Delivery mode
* we have noting to do .
* we have noth ing to do .
*/
if ( ulpq - > pd_mode )
return ;
/* Data must be at or below the Cumulative TSN ACK Point to
* start partial delivery .
*/
skb = skb_peek ( & asoc - > ulpq . reasm ) ;
if ( skb ! = NULL ) {
ctsn = sctp_skb2event ( skb ) - > tsn ;
if ( ! TSN_lte ( ctsn , sctp_tsnmap_get_ctsn ( & asoc - > peer . tsn_map ) ) )
return ;
}
/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery .
* Otherwise , we can only enter partial delivery if the
@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
}
/* If able to free enough room, accept this chunk. */
if ( chunk & & ( freed > = needed ) ) {
__u32 tsn ;
tsn = ntohl ( chunk - > subh . data_hdr - > tsn ) ;
sctp_tsnmap_mark ( & asoc - > peer . tsn_map , tsn , chunk - > transport ) ;
sctp_ulpq_tail_data ( ulpq , chunk , gfp ) ;
sctp_ulpq_partial_delivery ( ulpq , gfp ) ;
int retval ;
retval = sctp_ulpq_tail_data ( ulpq , chunk , gfp ) ;
/*
* Enter partial delivery if chunk has not been
* delivered ; otherwise , drain the reassembly queue .
*/
if ( retval < = 0 )
sctp_ulpq_partial_delivery ( ulpq , gfp ) ;
else if ( retval = = 1 )
sctp_ulpq_reasm_drain ( ulpq ) ;
}
sk_mem_reclaim ( asoc - > base . sk ) ;