[PATCH OLK-6.6] net/venetcls: Fix flow table init for rps_policy
hulk inclusion category: bugfix Link: https://gitee.com/openeuler/kernel/issues/ICBFCS CVE: NA -------------------------------- Initialize flow table while rps_policy enabled, and add vecls_flow_enabled static key to optimize performance. Signed-off-by: Yue Haibing <yuehaibing@huawei.com> --- include/linux/venetcls.h | 3 +-- net/core/dev.c | 14 ++++++++++---- net/venetcls/venetcls_flow.c | 22 +++++++++++++--------- net/venetcls/venetcls_main.c | 4 ++-- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/include/linux/venetcls.h b/include/linux/venetcls.h index acbffdb91ee8..fdafe47e8f9f 100644 --- a/include/linux/venetcls.h +++ b/include/linux/venetcls.h @@ -16,6 +16,7 @@ struct vecls_hook_ops { typedef int (*enqueue_f)(struct sk_buff *skb, int cpu, unsigned int *qtail); extern const struct vecls_hook_ops __rcu *vecls_ops; extern struct static_key_false vecls_localrps_needed; +extern struct static_key_false vecls_flow_enabled; static inline void venetcls_cfg_rxcls(struct sock *sk, int is_del) { @@ -75,8 +76,6 @@ venetcls_skb_set_localcpu(struct sk_buff *skb, enqueue_f enq_func, int *ret) struct net_device *dev = skb->dev; bool result = false; - if (!static_branch_unlikely(&vecls_localrps_needed)) - return result; if (!dev || !(dev->type == ARPHRD_LOOPBACK && dev->flags & IFF_LOOPBACK)) return result; diff --git a/net/core/dev.c b/net/core/dev.c index b62fcd0a6daf..10445e98c8a4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -166,6 +166,8 @@ const struct vecls_hook_ops __rcu *vecls_ops __read_mostly; EXPORT_SYMBOL_GPL(vecls_ops); struct static_key_false vecls_localrps_needed __read_mostly; EXPORT_SYMBOL(vecls_localrps_needed); +struct static_key_false vecls_flow_enabled __read_mostly; +EXPORT_SYMBOL(vecls_flow_enabled); #endif static DEFINE_SPINLOCK(ptype_lock); @@ -5889,9 +5891,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { - rcu_read_unlock(); - return ret; + if (static_branch_unlikely(&vecls_flow_enabled)) { + if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { + rcu_read_unlock(); + return ret; + } } #endif #ifdef CONFIG_RPS @@ -5927,7 +5931,9 @@ static void netif_receive_skb_list_internal(struct list_head *head) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - venetcls_skblist_set_cpu(head, enqueue_to_backlog); + if (static_branch_unlikely(&vecls_flow_enabled)) { + venetcls_skblist_set_cpu(head, enqueue_to_backlog); + } #endif #ifdef CONFIG_RPS if (static_branch_unlikely(&rps_needed)) { diff --git a/net/venetcls/venetcls_flow.c b/net/venetcls/venetcls_flow.c index 758067a7c6f1..9562dc9ae03c 100644 --- a/net/venetcls/venetcls_flow.c +++ b/net/venetcls/venetcls_flow.c @@ -122,13 +122,13 @@ void _vecls_flow_update(struct sock *sk, struct sk_buff *skb) rcu_read_unlock(); } -static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb) +static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb, u32 hash) { struct vecls_numa_bound_dev_info *bound_dev = NULL; struct vecls_netdev_info *vecls_dev; struct vecls_numa_info *numa_info; int i, devid, rxq_num, rxq_id; - u32 hash, index; + u32 index; numa_info = get_vecls_numa_info(nid); if (!numa_info) @@ -154,7 +154,6 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } if (rxq_num == 0) return -1; - hash = skb_get_hash(skb); index = hash % rxq_num; i = 0; @@ -170,19 +169,19 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, - struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu) + struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu, u32 hash) { struct netdev_rx_queue *rxqueue; struct vecls_dev_flow_table *dtb; struct vecls_dev_flow *rflow; - u32 flow_id, hash; int rxq_index, rc; + u32 flow_id; if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || !(dev->features & NETIF_F_NTUPLE)) return; - rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb); + rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb, hash); if (rxq_index == skb_get_rx_queue(skb) || rxq_index < 0) { vecls_debug("%s skb:%p, old_rxq:%d, next_cpu:%d new_rxq:%d\n", __func__, skb, old_rxq_id, next_cpu, rxq_index); @@ -194,7 +193,6 @@ static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, if (!dtb) return; - hash = skb_get_hash(skb); flow_id = hash & dtb->mask; rflow = &dtb->flows[flow_id]; @@ -245,7 +243,6 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, u32 last_recv_cpu, hash, val, cpu, tcpu, newcpu; struct vecls_dev_flow *rflow; - cpu = raw_smp_processor_id(); skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) @@ -260,17 +257,22 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, return; newcpu = get_rps_cpu(last_recv_cpu, hash, rps_policy); + if (rps_policy) + *rcpu = newcpu; + vecls_debug("last:%u curcpu:%d newcpu:%d rcpu:%d\n", + last_recv_cpu, raw_smp_processor_id(), newcpu, *rcpu); if (rflow->isvalid && cpu_to_node(rflow->cpu) == cpu_to_node(newcpu)) { rflow->timeout = jiffies; return; } + cpu = raw_smp_processor_id(); if (cpu_to_node(cpu) == cpu_to_node(newcpu)) return; if (tcpu >= nr_cpu_ids) - set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu); + set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu, hash); } static inline void do_loopback_rps(struct sk_buff *skb, int *rcpu) @@ -618,6 +620,7 @@ int vecls_flow_res_init(void) if (mode != 0) //for lo rps RCU_INIT_POINTER(vecls_ops, &vecls_flow_ops); synchronize_rcu(); + static_branch_inc(&vecls_flow_enabled); return 0; clean: @@ -627,6 +630,7 @@ int vecls_flow_res_init(void) void vecls_flow_res_clean(void) { + static_branch_dec(&vecls_flow_enabled); RCU_INIT_POINTER(vecls_ops, NULL); synchronize_rcu(); vecls_sock_flow_table_release(); diff --git a/net/venetcls/venetcls_main.c b/net/venetcls/venetcls_main.c index 00ec0b0e2498..d75f1fb9fff7 100644 --- a/net/venetcls/venetcls_main.c +++ b/net/venetcls/venetcls_main.c @@ -1125,7 +1125,7 @@ static __init int vecls_init(void) err = vecls_ntuple_res_init(); if (err) goto clean_rxq; - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) err = vecls_flow_res_init(); } else { err = vecls_flow_res_init(); @@ -1163,7 +1163,7 @@ static __exit void vecls_exit(void) #endif if (mode == 0) { vecls_ntuple_res_clean(); - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) vecls_flow_res_clean(); } else { vecls_flow_res_clean(); -- 2.34.1
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,转换为PR失败! 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/5IV... 失败原因:应用补丁/补丁集失败,Patch failed at 0001 net/venetcls: Fix flow table init for rps_policy 建议解决方法:请查看失败原因, 确认补丁是否可以应用在当前期望分支的最新代码上 FeedBack: The patch(es) which you have sent to kernel@openeuler.org has been converted to PR failed! Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/5IV... Failed Reason: apply patch(es) failed, Patch failed at 0001 net/venetcls: Fix flow table init for rps_policy Suggest Solution: please checkout if the failed patch(es) can work on the newest codes in expected branch
participants (2)
-
patchwork bot -
Yue Haibing