Merge "net: qualcomm: rmnet: Add support for UL aggregation"

tirimbino
qctecmdr Service 7 years ago committed by Gerrit - the friendly Code Review server
commit f95967f731
  1. 8
      drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
  2. 13
      drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
  3. 20
      drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
  4. 4
      drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
  5. 193
      drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
  6. 3
      drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h

@ -21,6 +21,7 @@
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
@ -66,6 +67,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
if (port->nr_rmnet_devs)
return -EINVAL;
rmnet_map_tx_aggregate_exit(port);
kfree(port);
netdev_rx_handler_unregister(real_dev);
@ -104,6 +107,8 @@ static int rmnet_register_real_device(struct net_device *real_dev)
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
rmnet_map_tx_aggregate_init(port);
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
@ -136,7 +141,8 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION |
RMNET_EGRESS_FORMAT_AGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;

@ -37,6 +37,19 @@ struct rmnet_port {
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
u16 egress_agg_size;
u16 egress_agg_count;
/* Protect aggregation related elements */
spinlock_t agg_lock;
struct sk_buff *agg_skb;
int agg_state;
u8 agg_count;
struct timespec agg_time;
struct timespec agg_last;
struct hrtimer hrtimer;
};
extern struct rtnl_link_ops rmnet_link_ops;

@ -191,8 +191,26 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
int non_linear_skb;
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
skb_is_nonlinear(skb);
if (non_linear_skb) {
if (unlikely(__skb_linearize(skb)))
goto done;
}
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
done:
skb->protocol = htons(ETH_P_MAP);
return 0;
fail:

@ -91,5 +91,9 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

@ -400,3 +400,196 @@ sw_csum:
ul_header->csum_enabled = 0;
ul_header->udp_ip4_ind = 0;
}
struct rmnet_agg_work {
struct work_struct work;
struct rmnet_port *port;
};
long rmnet_agg_time_limit __read_mostly = 1000000L;
long rmnet_agg_bypass_time __read_mostly = 10000000L;
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
{
u8 *packet_start = skb->data + offset;
int is_icmp = 0;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = (struct iphdr *)(packet_start);
if (ip4h->protocol == IPPROTO_ICMP)
is_icmp = 1;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
if (ip6h->nexthdr == IPPROTO_ICMPV6) {
is_icmp = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag;
frag = (struct frag_hdr *)(packet_start
+ sizeof(struct ipv6hdr));
if (frag->nexthdr == IPPROTO_ICMPV6)
is_icmp = 1;
}
}
return is_icmp;
}
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
struct rmnet_agg_work *real_work;
struct rmnet_port *port;
unsigned long flags;
struct sk_buff *skb;
int agg_count = 0;
real_work = (struct rmnet_agg_work *)work;
port = real_work->port;
skb = NULL;
spin_lock_irqsave(&port->agg_lock, flags);
if (likely(port->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(port->agg_skb)) {
skb = port->agg_skb;
agg_count = port->agg_count;
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
if (skb) {
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
}
kfree(work);
}
enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
{
struct rmnet_agg_work *work;
struct rmnet_port *port;
port = container_of(t, struct rmnet_port, hrtimer);
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
port->agg_state = 0;
return HRTIMER_NORESTART;
}
INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
work->port = port;
schedule_work((struct work_struct *)work);
return HRTIMER_NORESTART;
}
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec diff, last;
int size, agg_count = 0;
struct sk_buff *agg_skb;
unsigned long flags;
u8 *dest_buff;
new_packet:
spin_lock_irqsave(&port->agg_lock, flags);
memcpy(&last, &port->agg_last, sizeof(struct timespec));
getnstimeofday(&port->agg_last);
if (!port->agg_skb) {
/* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(port->agg_last, last);
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
size = port->egress_agg_size - skb->len;
port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
port->agg_count = 1;
getnstimeofday(&port->agg_time);
dev_kfree_skb_any(skb);
goto schedule;
}
diff = timespec_sub(port->agg_last, port->agg_time);
if (skb->len > (port->egress_agg_size - port->agg_skb->len) ||
port->agg_count >= port->egress_agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
agg_skb = port->agg_skb;
agg_count = port->agg_count;
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
agg_skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(agg_skb);
goto new_packet;
}
dest_buff = skb_put(port->agg_skb, skb->len);
memcpy(dest_buff, skb->data, skb->len);
port->agg_count++;
dev_kfree_skb_any(skb);
schedule:
if (port->agg_state != -EINPROGRESS) {
port->agg_state = -EINPROGRESS;
hrtimer_start(&port->hrtimer, ns_to_ktime(3000000),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
port->egress_agg_size = 8192;
port->egress_agg_count = 20;
spin_lock_init(&port->agg_lock);
}
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
{
unsigned long flags;
hrtimer_cancel(&port->hrtimer);
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_state == -EINPROGRESS) {
if (port->agg_skb) {
kfree_skb(port->agg_skb);
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}

@ -18,6 +18,9 @@
#define RMNET_NEEDED_HEADROOM 16
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */

Loading…
Cancel
Save