@ -16,6 +16,238 @@
# include <net/netfilter/nft_reject.h>
# include <net/netfilter/ipv4/nf_reject.h>
# include <net/netfilter/ipv6/nf_reject.h>
# include <linux/ip.h>
# include <net/ip.h>
# include <linux/netfilter_bridge.h>
# include "../br_private.h"
static void nft_reject_br_push_etherhdr ( struct sk_buff * oldskb ,
struct sk_buff * nskb )
{
struct ethhdr * eth ;
eth = ( struct ethhdr * ) skb_push ( nskb , ETH_HLEN ) ;
skb_reset_mac_header ( nskb ) ;
ether_addr_copy ( eth - > h_source , eth_hdr ( oldskb ) - > h_dest ) ;
ether_addr_copy ( eth - > h_dest , eth_hdr ( oldskb ) - > h_source ) ;
eth - > h_proto = eth_hdr ( oldskb ) - > h_proto ;
skb_pull ( nskb , ETH_HLEN ) ;
}
static int nft_reject_iphdr_validate ( struct sk_buff * oldskb )
{
struct iphdr * iph ;
u32 len ;
if ( ! pskb_may_pull ( oldskb , sizeof ( struct iphdr ) ) )
return 0 ;
iph = ip_hdr ( oldskb ) ;
if ( iph - > ihl < 5 | | iph - > version ! = 4 )
return 0 ;
len = ntohs ( iph - > tot_len ) ;
if ( oldskb - > len < len )
return 0 ;
else if ( len < ( iph - > ihl * 4 ) )
return 0 ;
if ( ! pskb_may_pull ( oldskb , iph - > ihl * 4 ) )
return 0 ;
return 1 ;
}
static void nft_reject_br_send_v4_tcp_reset ( struct sk_buff * oldskb , int hook )
{
struct sk_buff * nskb ;
struct iphdr * niph ;
const struct tcphdr * oth ;
struct tcphdr _oth ;
if ( ! nft_reject_iphdr_validate ( oldskb ) )
return ;
oth = nf_reject_ip_tcphdr_get ( oldskb , & _oth , hook ) ;
if ( ! oth )
return ;
nskb = alloc_skb ( sizeof ( struct iphdr ) + sizeof ( struct tcphdr ) +
LL_MAX_HEADER , GFP_ATOMIC ) ;
if ( ! nskb )
return ;
skb_reserve ( nskb , LL_MAX_HEADER ) ;
niph = nf_reject_iphdr_put ( nskb , oldskb , IPPROTO_TCP ,
sysctl_ip_default_ttl ) ;
nf_reject_ip_tcphdr_put ( nskb , oldskb , oth ) ;
niph - > ttl = sysctl_ip_default_ttl ;
niph - > tot_len = htons ( nskb - > len ) ;
ip_send_check ( niph ) ;
nft_reject_br_push_etherhdr ( oldskb , nskb ) ;
br_deliver ( br_port_get_rcu ( oldskb - > dev ) , nskb ) ;
}
static void nft_reject_br_send_v4_unreach ( struct sk_buff * oldskb , int hook ,
u8 code )
{
struct sk_buff * nskb ;
struct iphdr * niph ;
struct icmphdr * icmph ;
unsigned int len ;
void * payload ;
__wsum csum ;
if ( ! nft_reject_iphdr_validate ( oldskb ) )
return ;
/* IP header checks: fragment. */
if ( ip_hdr ( oldskb ) - > frag_off & htons ( IP_OFFSET ) )
return ;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t ( unsigned int , 536 , oldskb - > len ) ;
if ( ! pskb_may_pull ( oldskb , len ) )
return ;
if ( nf_ip_checksum ( oldskb , hook , ip_hdrlen ( oldskb ) , 0 ) )
return ;
nskb = alloc_skb ( sizeof ( struct iphdr ) + sizeof ( struct icmphdr ) +
LL_MAX_HEADER + len , GFP_ATOMIC ) ;
if ( ! nskb )
return ;
skb_reserve ( nskb , LL_MAX_HEADER ) ;
niph = nf_reject_iphdr_put ( nskb , oldskb , IPPROTO_ICMP ,
sysctl_ip_default_ttl ) ;
skb_reset_transport_header ( nskb ) ;
icmph = ( struct icmphdr * ) skb_put ( nskb , sizeof ( struct icmphdr ) ) ;
memset ( icmph , 0 , sizeof ( * icmph ) ) ;
icmph - > type = ICMP_DEST_UNREACH ;
icmph - > code = code ;
payload = skb_put ( nskb , len ) ;
memcpy ( payload , skb_network_header ( oldskb ) , len ) ;
csum = csum_partial ( ( void * ) icmph , len + sizeof ( struct icmphdr ) , 0 ) ;
icmph - > checksum = csum_fold ( csum ) ;
niph - > tot_len = htons ( nskb - > len ) ;
ip_send_check ( niph ) ;
nft_reject_br_push_etherhdr ( oldskb , nskb ) ;
br_deliver ( br_port_get_rcu ( oldskb - > dev ) , nskb ) ;
}
static int nft_reject_ip6hdr_validate ( struct sk_buff * oldskb )
{
struct ipv6hdr * hdr ;
u32 pkt_len ;
if ( ! pskb_may_pull ( oldskb , sizeof ( struct ipv6hdr ) ) )
return 0 ;
hdr = ipv6_hdr ( oldskb ) ;
if ( hdr - > version ! = 6 )
return 0 ;
pkt_len = ntohs ( hdr - > payload_len ) ;
if ( pkt_len + sizeof ( struct ipv6hdr ) > oldskb - > len )
return 0 ;
return 1 ;
}
static void nft_reject_br_send_v6_tcp_reset ( struct net * net ,
struct sk_buff * oldskb , int hook )
{
struct sk_buff * nskb ;
const struct tcphdr * oth ;
struct tcphdr _oth ;
unsigned int otcplen ;
struct ipv6hdr * nip6h ;
if ( ! nft_reject_ip6hdr_validate ( oldskb ) )
return ;
oth = nf_reject_ip6_tcphdr_get ( oldskb , & _oth , & otcplen , hook ) ;
if ( ! oth )
return ;
nskb = alloc_skb ( sizeof ( struct ipv6hdr ) + sizeof ( struct tcphdr ) +
LL_MAX_HEADER , GFP_ATOMIC ) ;
if ( ! nskb )
return ;
skb_reserve ( nskb , LL_MAX_HEADER ) ;
nip6h = nf_reject_ip6hdr_put ( nskb , oldskb , IPPROTO_TCP ,
net - > ipv6 . devconf_all - > hop_limit ) ;
nf_reject_ip6_tcphdr_put ( nskb , oldskb , oth , otcplen ) ;
nip6h - > payload_len = htons ( nskb - > len - sizeof ( struct ipv6hdr ) ) ;
nft_reject_br_push_etherhdr ( oldskb , nskb ) ;
br_deliver ( br_port_get_rcu ( oldskb - > dev ) , nskb ) ;
}
static void nft_reject_br_send_v6_unreach ( struct net * net ,
struct sk_buff * oldskb , int hook ,
u8 code )
{
struct sk_buff * nskb ;
struct ipv6hdr * nip6h ;
struct icmp6hdr * icmp6h ;
unsigned int len ;
void * payload ;
if ( ! nft_reject_ip6hdr_validate ( oldskb ) )
return ;
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU " in the ICMP payload.
*/
len = min_t ( unsigned int , 1220 , oldskb - > len ) ;
if ( ! pskb_may_pull ( oldskb , len ) )
return ;
nskb = alloc_skb ( sizeof ( struct iphdr ) + sizeof ( struct icmp6hdr ) +
LL_MAX_HEADER + len , GFP_ATOMIC ) ;
if ( ! nskb )
return ;
skb_reserve ( nskb , LL_MAX_HEADER ) ;
nip6h = nf_reject_ip6hdr_put ( nskb , oldskb , IPPROTO_ICMPV6 ,
net - > ipv6 . devconf_all - > hop_limit ) ;
skb_reset_transport_header ( nskb ) ;
icmp6h = ( struct icmp6hdr * ) skb_put ( nskb , sizeof ( struct icmp6hdr ) ) ;
memset ( icmp6h , 0 , sizeof ( * icmp6h ) ) ;
icmp6h - > icmp6_type = ICMPV6_DEST_UNREACH ;
icmp6h - > icmp6_code = code ;
payload = skb_put ( nskb , len ) ;
memcpy ( payload , skb_network_header ( oldskb ) , len ) ;
nip6h - > payload_len = htons ( nskb - > len - sizeof ( struct ipv6hdr ) ) ;
icmp6h - > icmp6_cksum =
csum_ipv6_magic ( & nip6h - > saddr , & nip6h - > daddr ,
nskb - > len - sizeof ( struct ipv6hdr ) ,
IPPROTO_ICMPV6 ,
csum_partial ( icmp6h ,
nskb - > len - sizeof ( struct ipv6hdr ) ,
0 ) ) ;
nft_reject_br_push_etherhdr ( oldskb , nskb ) ;
br_deliver ( br_port_get_rcu ( oldskb - > dev ) , nskb ) ;
}
static void nft_reject_bridge_eval ( const struct nft_expr * expr ,
struct nft_data data [ NFT_REG_MAX + 1 ] ,
@ -23,35 +255,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
{
struct nft_reject * priv = nft_expr_priv ( expr ) ;
struct net * net = dev_net ( ( pkt - > in ! = NULL ) ? pkt - > in : pkt - > out ) ;
const unsigned char * dest = eth_hdr ( pkt - > skb ) - > h_dest ;
if ( is_broadcast_ether_addr ( dest ) | |
is_multicast_ether_addr ( dest ) )
goto out ;
switch ( eth_hdr ( pkt - > skb ) - > h_proto ) {
case htons ( ETH_P_IP ) :
switch ( priv - > type ) {
case NFT_REJECT_ICMP_UNREACH :
nf_send_unreach ( pkt - > skb , priv - > icmp_code ) ;
nft_reject_br_send_v4_unreach ( pkt - > skb ,
pkt - > ops - > hooknum ,
priv - > icmp_code ) ;
break ;
case NFT_REJECT_TCP_RST :
nf_send_reset ( pkt - > skb , pkt - > ops - > hooknum ) ;
nft_reject_br_send_v4_tcp_reset ( pkt - > skb ,
pkt - > ops - > hooknum ) ;
break ;
case NFT_REJECT_ICMPX_UNREACH :
nf_send_unreach ( pkt - > skb ,
nft_reject_icmp_code ( priv - > icmp_code ) ) ;
nft_reject_br_send_v4_unreach ( pkt - > skb ,
pkt - > ops - > hooknum ,
nft_reject_icmp_code ( priv - > icmp_code ) ) ;
break ;
}
break ;
case htons ( ETH_P_IPV6 ) :
switch ( priv - > type ) {
case NFT_REJECT_ICMP_UNREACH :
nf_send_unreach6 ( net , pkt - > skb , priv - > icmp_code ,
pkt - > ops - > hooknum ) ;
nft_reject_br_send_v6_unreach ( net , pkt - > skb ,
pkt - > ops - > hooknum ,
priv - > icmp_code ) ;
break ;
case NFT_REJECT_TCP_RST :
nf_send_reset6 ( net , pkt - > skb , pkt - > ops - > hooknum ) ;
nft_reject_br_send_v6_tcp_reset ( net , pkt - > skb ,
pkt - > ops - > hooknum ) ;
break ;
case NFT_REJECT_ICMPX_UNREACH :
nf_send_unreach6 ( net , pkt - > skb ,
nft_reject_icmpv6_code ( priv - > icmp_code ) ,
pkt - > ops - > hooknum ) ;
nft_reject_br _send_v6_ unreach ( net , pkt - > skb ,
pkt - > ops - > hooknum ,
nft_reject_icmpv6_code ( priv - > icmp_code ) ) ;
break ;
}
break ;
@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
/* No explicit way to reject this protocol, drop it. */
break ;
}
out :
data [ NFT_REG_VERDICT ] . verdict = NF_DROP ;
}
static int nft_reject_bridge_validate_hooks ( const struct nft_chain * chain )
{
struct nft_base_chain * basechain ;
if ( chain - > flags & NFT_BASE_CHAIN ) {
basechain = nft_base_chain ( chain ) ;
switch ( basechain - > ops [ 0 ] . hooknum ) {
case NF_BR_PRE_ROUTING :
case NF_BR_LOCAL_IN :
break ;
default :
return - EOPNOTSUPP ;
}
}
return 0 ;
}
static int nft_reject_bridge_init ( const struct nft_ctx * ctx ,
const struct nft_expr * expr ,
const struct nlattr * const tb [ ] )
{
struct nft_reject * priv = nft_expr_priv ( expr ) ;
int icmp_code ;
int icmp_code , err ;
err = nft_reject_bridge_validate_hooks ( ctx - > chain ) ;
if ( err < 0 )
return err ;
if ( tb [ NFTA_REJECT_TYPE ] = = NULL )
return - EINVAL ;
@ -116,6 +382,13 @@ nla_put_failure:
return - 1 ;
}
static int nft_reject_bridge_validate ( const struct nft_ctx * ctx ,
const struct nft_expr * expr ,
const struct nft_data * * data )
{
return nft_reject_bridge_validate_hooks ( ctx - > chain ) ;
}
static struct nft_expr_type nft_reject_bridge_type ;
static const struct nft_expr_ops nft_reject_bridge_ops = {
. type = & nft_reject_bridge_type ,
@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
. eval = nft_reject_bridge_eval ,
. init = nft_reject_bridge_init ,
. dump = nft_reject_bridge_dump ,
. validate = nft_reject_bridge_validate ,
} ;
static struct nft_expr_type nft_reject_bridge_type __read_mostly = {