From 4f41de941e2af4894da5b99768db1728ac9f277f Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Mon, 16 Dec 2019 17:02:13 -0800 Subject: [PATCH] net: qrtr: Add backup skb pool Add a pool of SKBs that can be used when the system is in low memory conditions. This pool will be shared between all nodes and replenished by a worker function. Change-Id: I639a9ac76db726dc8ad46b12d3b3d560c674939c Signed-off-by: Chris Lew Signed-off-by: Vivek Golani --- net/qrtr/qrtr.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 2 deletions(-) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 514445b1741a..54f2d331d212 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -135,6 +135,15 @@ static DECLARE_RWSEM(qrtr_node_lock); static DEFINE_IDR(qrtr_ports); static DEFINE_MUTEX(qrtr_port_lock); +/* backup buffers */ +#define QRTR_BACKUP_HI_NUM 5 +#define QRTR_BACKUP_HI_SIZE SZ_16K +#define QRTR_BACKUP_LO_NUM 20 +#define QRTR_BACKUP_LO_SIZE SZ_1K +static struct sk_buff_head qrtr_backup_lo; +static struct sk_buff_head qrtr_backup_hi; +static struct work_struct qrtr_backup_work; + /** * struct qrtr_node - endpoint node * @ep_lock: lock for endpoint management and callbacks @@ -694,6 +703,54 @@ int qrtr_peek_pkt_size(const void *data) } EXPORT_SYMBOL(qrtr_peek_pkt_size); +static void qrtr_alloc_backup(struct work_struct *work) +{ + struct sk_buff *skb; + + while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) { + skb = alloc_skb(QRTR_BACKUP_LO_SIZE, GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_lo, skb); + } + while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) { + skb = alloc_skb(QRTR_BACKUP_HI_SIZE, GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_hi, skb); + } +} + +static struct sk_buff *qrtr_get_backup(size_t len) +{ + struct sk_buff *skb = NULL; + + if (len < QRTR_BACKUP_LO_SIZE) + skb = skb_dequeue(&qrtr_backup_lo); + else if (len < QRTR_BACKUP_HI_SIZE) + skb = skb_dequeue(&qrtr_backup_hi); + + if (skb) + queue_work(system_unbound_wq, &qrtr_backup_work); + + return skb; +} + +static void qrtr_backup_init(void) +{ + skb_queue_head_init(&qrtr_backup_lo); + skb_queue_head_init(&qrtr_backup_hi); + INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup); + queue_work(system_unbound_wq, &qrtr_backup_work); +} + +static void qrtr_backup_deinit(void) +{ + cancel_work_sync(&qrtr_backup_work); + skb_queue_purge(&qrtr_backup_lo); + skb_queue_purge(&qrtr_backup_hi); +} + /** * qrtr_endpoint_post() - post incoming data * @ep: endpoint handle @@ -718,8 +775,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) return -EINVAL; skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC); - if (!skb) - return -ENOMEM; + if (!skb) { + skb = qrtr_get_backup(len); + if (!skb) { + pr_err("qrtr: Unable to get skb with len:%lu\n", len); + return -ENOMEM; + } + } skb_reserve(skb, sizeof(*v1)); cb = (struct qrtr_cb *)skb->cb; @@ -1951,7 +2013,10 @@ static int __init qrtr_proto_init(void) rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0); + qrtr_backup_init(); + return 0; + } postcore_initcall(qrtr_proto_init); @@ -1960,6 +2025,8 @@ static void __exit qrtr_proto_fini(void) rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR); sock_unregister(qrtr_family.family); proto_unregister(&qrtr_proto); + + qrtr_backup_deinit(); } module_exit(qrtr_proto_fini);