@ -810,8 +810,7 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
* Do not take into account this skb truesize ,
* Do not take into account this skb truesize ,
* to allow even a single big packet to come .
* to allow even a single big packet to come .
*/
*/
static inline bool sk_rcvqueues_full ( const struct sock * sk , const struct sk_buff * skb ,
static inline bool sk_rcvqueues_full ( const struct sock * sk , unsigned int limit )
unsigned int limit )
{
{
unsigned int qsize = sk - > sk_backlog . len + atomic_read ( & sk - > sk_rmem_alloc ) ;
unsigned int qsize = sk - > sk_backlog . len + atomic_read ( & sk - > sk_rmem_alloc ) ;
@ -822,7 +821,7 @@ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff
static inline __must_check int sk_add_backlog ( struct sock * sk , struct sk_buff * skb ,
static inline __must_check int sk_add_backlog ( struct sock * sk , struct sk_buff * skb ,
unsigned int limit )
unsigned int limit )
{
{
if ( sk_rcvqueues_full ( sk , skb , limit ) )
if ( sk_rcvqueues_full ( sk , limit ) )
return - ENOBUFS ;
return - ENOBUFS ;
__sk_add_backlog ( sk , skb ) ;
__sk_add_backlog ( sk , skb ) ;