
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICS3XV -------------------------------- Use workqueue for ntuple cfg to avoid mutex_lock in rcu context. Fixes: 4bed6ba0e88f ("net/oenetcls: introduce oenetcls for network optimization") Signed-off-by: Wang Liang <wangliang74@huawei.com> --- net/oenetcls/oenetcls.h | 10 +++ net/oenetcls/oenetcls_ntuple.c | 128 ++++++++++++++++++--------------- 2 files changed, 82 insertions(+), 56 deletions(-) diff --git a/net/oenetcls/oenetcls.h b/net/oenetcls/oenetcls.h index 215ae3e7e153..2eef768e365e 100644 --- a/net/oenetcls/oenetcls.h +++ b/net/oenetcls/oenetcls.h @@ -121,6 +121,16 @@ struct rmgr_ctrl { __u32 size; }; +struct cfg_param { + struct work_struct work; + struct cmd_context ctx; + struct oecls_sk_rule *rule; + struct sock *sk; + bool is_del; + int devid; + int nid; +}; + extern int match_ip_flag; extern int debug; extern int oecls_netdev_num; diff --git a/net/oenetcls/oenetcls_ntuple.c b/net/oenetcls/oenetcls_ntuple.c index 3b69f927d25b..8b133ed42e4c 100644 --- a/net/oenetcls/oenetcls_ntuple.c +++ b/net/oenetcls/oenetcls_ntuple.c @@ -12,6 +12,7 @@ #include "oenetcls.h" struct oecls_sk_rule_list oecls_sk_rules, oecls_sk_list; +static struct workqueue_struct *do_cfg_workqueue; static void init_oecls_sk_rules(void) { @@ -422,85 +423,94 @@ static int cfg_ethtool_rule(struct cmd_context *ctx, bool is_del) return ret; } -static void del_ntuple_rule(struct sock *sk) +static void cfg_work(struct work_struct *work) { + struct cfg_param *ctx_p = container_of(work, struct cfg_param, work); struct oecls_netdev_info *oecls_dev; - struct cmd_context ctx = { 0 }; struct oecls_sk_rule *rule; - int devid; - u16 dport; - u32 dip4; + int devid, rxq_id; int err; - get_sk_rule_addr(sk, &dip4, &dport); - mutex_lock(&oecls_sk_rules.mutex); for_each_oecls_netdev(devid, oecls_dev) { - strncpy(ctx.netdev, oecls_dev->dev_name, IFNAMSIZ); - rule = get_rule_from_sk(devid, sk); - if (!rule) { - oecls_debug("rule not found! sk:%p, devid:%d, dip4:%pI4, dport:%d\n", - sk, devid, &dip4, ntohs(dport)); - continue; - } + strncpy(ctx_p->ctx.netdev, oecls_dev->dev_name, IFNAMSIZ); + if (!ctx_p->is_del) { + if (reuseport_check(devid, ctx_p->ctx.dip4, ctx_p->ctx.dport)) { + oecls_error("dip4:%pI4, dport:%d reuse!\n", &ctx_p->ctx.dip4, + ctx_p->ctx.dport); + continue; + } - // Config Ntuple rule to dev - ctx.del_ruleid = rule->ruleid; - err = cfg_ethtool_rule(&ctx, true); - if (err) { - oecls_error("del sk:%p, nid:%d, devid:%d, action:%d, ruleid:%d, err:%d\n", - sk, rule->nid, devid, rule->action, rule->ruleid, err); - } + // Calculate the bound queue + rxq_id = alloc_rxq_id(ctx_p->nid, devid); + if (rxq_id < 0) + continue; - // Free the bound queue - free_rxq_id(rule->nid, devid, rule->action); + // Config Ntuple rule to dev + ctx_p->ctx.action = (u16)rxq_id; + err = cfg_ethtool_rule(&ctx_p->ctx, ctx_p->is_del); + // Add sk rule only on success + if (!err) + add_sk_rule(ctx_p->devid, ctx_p->ctx.dip4, ctx_p->ctx.dport, + ctx_p->sk, ctx_p->ctx.action, ctx_p->ctx.ret_loc, + ctx_p->nid); + } else { + rule = get_rule_from_sk(ctx_p->devid, ctx_p->sk); + if (!rule) { + oecls_debug("rule not found! sk:%p, devid:%d, dip4:%pI4, dport:%d\n", + ctx_p->sk, ctx_p->devid, &ctx_p->ctx.dip4, + ntohs(ctx_p->ctx.dport)); + continue; + } - // Delete sk rule - del_sk_rule(rule); + // Config Ntuple rule to dev + ctx_p->ctx.del_ruleid = rule->ruleid; + ctx_p->rule = rule; + err = cfg_ethtool_rule(&ctx_p->ctx, ctx_p->is_del); + // Free the bound queue + free_rxq_id(ctx_p->rule->nid, ctx_p->devid, ctx_p->rule->action); + // Delete sk rule + del_sk_rule(ctx_p->rule); + } } mutex_unlock(&oecls_sk_rules.mutex); + kfree(ctx_p); +} + +static void del_ntuple_rule(struct sock *sk) +{ + struct cfg_param *ctx_p; + + ctx_p = kzalloc(sizeof(*ctx_p), GFP_ATOMIC); + if (!ctx_p) + return; + get_sk_rule_addr(sk, &ctx_p->ctx.dip4, &ctx_p->ctx.dport); + + ctx_p->is_del = true; + ctx_p->sk = sk; + INIT_WORK(&ctx_p->work, cfg_work); + queue_work(do_cfg_workqueue, &ctx_p->work); } static void add_ntuple_rule(struct sock *sk) { - struct oecls_netdev_info *oecls_dev; - struct cmd_context ctx = { 0 }; + struct cfg_param *ctx_p; int cpu = raw_smp_processor_id(); int nid = cpu_to_node(cpu); - int rxq_id; - int devid; - int err; if (check_appname(current->comm)) return; - get_sk_rule_addr(sk, &ctx.dip4, &ctx.dport); - - mutex_lock(&oecls_sk_rules.mutex); - for_each_oecls_netdev(devid, oecls_dev) { - strncpy(ctx.netdev, oecls_dev->dev_name, IFNAMSIZ); - if (reuseport_check(devid, ctx.dip4, ctx.dport)) { - oecls_error("dip4:%pI4, dport:%d reuse!\n", &ctx.dip4, ctx.dport); - continue; - } - - // Calculate the bound queue - rxq_id = alloc_rxq_id(nid, devid); - if (rxq_id < 0) - continue; - // Config Ntuple rule to dev - ctx.action = (u16)rxq_id; - err = cfg_ethtool_rule(&ctx, false); - if (err) { - oecls_error("add sk:%p, nid:%d, devid:%d, action:%d, ruleid:%d, err:%d\n", - sk, nid, devid, ctx.action, ctx.ret_loc, err); - continue; - } + ctx_p = kzalloc(sizeof(*ctx_p), GFP_ATOMIC); + if (!ctx_p) + return; + get_sk_rule_addr(sk, &ctx_p->ctx.dip4, &ctx_p->ctx.dport); - // Add sk rule - add_sk_rule(devid, ctx.dip4, ctx.dport, sk, ctx.action, ctx.ret_loc, nid); - } - mutex_unlock(&oecls_sk_rules.mutex); + ctx_p->is_del = false; + ctx_p->sk = sk; + ctx_p->nid = nid; + INIT_WORK(&ctx_p->work, cfg_work); + queue_work(do_cfg_workqueue, &ctx_p->work); } static void ethtool_cfg_rxcls(struct sock *sk, int is_del) @@ -561,6 +571,12 @@ static const struct oecls_hook_ops oecls_ntuple_ops = { void oecls_ntuple_res_init(void) { + do_cfg_workqueue = alloc_ordered_workqueue("oecls_cfg", 0); + if (!do_cfg_workqueue) { + oecls_debug("alloc_ordered_workqueue fails\n"); + return; + } + init_oecls_sk_rules(); RCU_INIT_POINTER(oecls_ops, &oecls_ntuple_ops); } -- 2.34.1