[PATCH OLK-5.10] net/oenetcls: Support cluster sched

hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICBFCS CVE: NA -------------------------------- Configure NIC irqs as cluster or custom for support cluster sched. Signed-off-by: Yue Haibing <yuehaibing@huawei.com> --- net/oenetcls/oenetcls.h | 19 ++- net/oenetcls/oenetcls_main.c | 241 +++++++++++++++++++++++---------- net/oenetcls/oenetcls_ntuple.c | 84 ++++++++++-- 3 files changed, 260 insertions(+), 84 deletions(-) diff --git a/net/oenetcls/oenetcls.h b/net/oenetcls/oenetcls.h index 72aeedcaf0a0..b2c4a3c61956 100644 --- a/net/oenetcls/oenetcls.h +++ b/net/oenetcls/oenetcls.h @@ -7,7 +7,6 @@ #define OECLS_MAX_NETDEV_NUM 8 #define OECLS_MAX_RXQ_NUM_PER_DEV 256 -#define OECLS_MAX_NUMA_NUM 16 #define OECLS_MAX_CPU_NUM 1024 #define OECLS_TIMEOUT (5 * HZ) @@ -27,8 +26,20 @@ struct oecls_netdev_info { int old_filter_state; }; +struct oecls_rxq { + int rxq_id; + int status; +}; + +struct oecls_numa_clusterinfo { + int cluster_id; + int cur_freeidx; + struct oecls_rxq rxqs[OECLS_MAX_RXQ_NUM_PER_DEV]; +}; + struct oecls_numa_bound_dev_info { DECLARE_BITMAP(bitmap_rxq, OECLS_MAX_RXQ_NUM_PER_DEV); + struct oecls_numa_clusterinfo *cluster_info; }; struct oecls_numa_info { @@ -66,6 +77,12 @@ struct oecls_sk_rule { int nid; }; +struct oecls_sk_entry { + struct hlist_node node; + void *sk; + u32 sk_rule_hash; +}; + struct oecls_dev_flow { unsigned short cpu; unsigned short filter; diff --git a/net/oenetcls/oenetcls_main.c b/net/oenetcls/oenetcls_main.c index 8cf5d02bb5b9..1c40357ac58b 100644 --- a/net/oenetcls/oenetcls_main.c +++ b/net/oenetcls/oenetcls_main.c @@ -12,7 +12,8 @@ int oecls_netdev_num; static struct oecls_netdev_info oecls_netdev_info_table[OECLS_MAX_NETDEV_NUM]; int oecls_numa_num; -static struct oecls_numa_info oecls_numa_info_table[OECLS_MAX_NUMA_NUM]; +static int oecls_cluster_cpu_num, oecls_cluster_per_numa; +static struct oecls_numa_info *oecls_numa_info_table; int debug; module_param(debug, int, 0644); @@ -51,8 +52,7 @@ static bool check_params(void) int check_appname(char *task_name) { - char *start = appname; - char *end; + char *start = appname, *end; if (!strlen(appname)) return 0; @@ -384,7 +384,7 @@ static void get_netdev_queue_info(struct oecls_netdev_info *oecls_dev) { struct oecls_netdev_queue_info *rxq_info; struct irq_desc *desc; - int irq; + int irq, cpu; for_each_irq_desc(irq, desc) { if (!desc->action) @@ -393,14 +393,14 @@ static void get_netdev_queue_info(struct oecls_netdev_info *oecls_dev) continue; if (!check_irq_name(desc->action->name, oecls_dev)) continue; - - oecls_debug("irq=%d, [%s], rxq_id=%d\n", irq, desc->action->name, - oecls_dev->rxq_num); - if (oecls_dev->rxq_num >= OECLS_MAX_RXQ_NUM_PER_DEV) break; rxq_info = &oecls_dev->rxq[oecls_dev->rxq_num++]; rxq_info->irq = irq; + cpu = cpumask_first(irq_data_get_effective_affinity_mask(&desc->irq_data)); + rxq_info->affinity_cpu = cpu; + oecls_debug("irq=%d, [%s], rxq_id=%d affinity_cpu:%d\n", + irq, desc->action->name, oecls_dev->rxq_num, cpu); } } @@ -555,8 +555,7 @@ static void clean_oecls_netdev_info(void) static int init_oecls_netdev_info(char *netdev_str) { - char *start = netdev_str; - char *end; + char *start = netdev_str, *end; int err = -ENODEV; while (*start != '\0') { @@ -584,7 +583,7 @@ static int init_oecls_netdev_info(char *netdev_str) struct oecls_numa_info *get_oecls_numa_info(unsigned int nid) { - if (nid >= OECLS_MAX_NUMA_NUM) + if (nid >= oecls_numa_num) return NULL; return &oecls_numa_info_table[nid]; } @@ -609,25 +608,52 @@ static void init_numa_avail_cpus(int nid, struct oecls_numa_info *numa_info) } } -static void init_numa_rxq_bitmap(int nid, struct oecls_numa_info *numa_info) +static void clean_oecls_rxq(void) { struct oecls_numa_bound_dev_info *bound_dev; struct oecls_netdev_info *oecls_dev; - int bound_rxq_num; - int rxq_id; - int devid; - int cpu; + struct oecls_numa_info *numa_info; + int nid, devid; + + for_each_oecls_numa(nid, numa_info) { + for_each_oecls_netdev(devid, oecls_dev) { + bound_dev = &numa_info->bound_dev[devid]; + free_to_l0(bound_dev->cluster_info); + } + } +} + +static int init_numa_rxq_bitmap(int nid, struct oecls_numa_info *numa_info) +{ + int bound_rxq_num, cluster_id, cluster_idx, cur_idx; + struct oecls_numa_bound_dev_info *bound_dev; + struct oecls_netdev_info *oecls_dev; + int rxq_id, devid, cpu, ret = 0; for_each_oecls_netdev(devid, oecls_dev) { bound_rxq_num = 0; bound_dev = &numa_info->bound_dev[devid]; bitmap_zero(bound_dev->bitmap_rxq, OECLS_MAX_RXQ_NUM_PER_DEV); + bound_dev->cluster_info = alloc_from_l0(sizeof(struct oecls_numa_clusterinfo) + * oecls_cluster_per_numa); + if (!bound_dev->cluster_info) { + ret = -ENOMEM; + goto out; + } for (rxq_id = 0; rxq_id < oecls_dev->rxq_num; rxq_id++) { cpu = oecls_dev->rxq[rxq_id].affinity_cpu; if (cpu_to_node(cpu) == nid) { set_bit(rxq_id, bound_dev->bitmap_rxq); + cluster_id = cpu / oecls_cluster_cpu_num; + cluster_idx = cluster_id % oecls_cluster_per_numa; + bound_dev->cluster_info[cluster_idx].cluster_id = cluster_id; + cur_idx = bound_dev->cluster_info[cluster_idx].cur_freeidx++; + bound_dev->cluster_info[cluster_idx].rxqs[cur_idx].rxq_id = rxq_id; + bound_dev->cluster_info[cluster_idx].rxqs[cur_idx].status = 1; bound_rxq_num++; + oecls_debug("cpu:%d cluster_id:%d cluster_idx:%d rxq_id:%d cur_idx:%d\n", + cpu, cluster_id, cluster_idx, rxq_id, cur_idx); } } @@ -635,6 +661,52 @@ static void init_numa_rxq_bitmap(int nid, struct oecls_numa_info *numa_info) nid, devid, oecls_dev->dev_name, oecls_dev->rxq_num, bound_rxq_num, OECLS_MAX_RXQ_NUM_PER_DEV, bound_dev->bitmap_rxq); } + return ret; + +out: + clean_oecls_rxq(); + return ret; +} + +static int get_cluster_rxq(struct oecls_numa_bound_dev_info *bound_dev) +{ + int cpu = smp_processor_id(); + int cluster_id = cpu / oecls_cluster_cpu_num; + int i, j, rxq_id; + + for (i = 0; i < oecls_cluster_per_numa; i++) { + if (cluster_id != bound_dev->cluster_info[i].cluster_id) + continue; + for (j = 0; j < OECLS_MAX_RXQ_NUM_PER_DEV; j++) { + if (bound_dev->cluster_info[i].rxqs[j].status == 1) { + bound_dev->cluster_info[i].rxqs[j].status = 2; + rxq_id = bound_dev->cluster_info[i].rxqs[j].rxq_id; + oecls_debug("cluster:%d cpu:%d alloc rxq_id:%d\n", + cluster_id, cpu, rxq_id); + return rxq_id; + } + } + } + oecls_debug("cluster:%d no free rxq for cpu:%d\n", cluster_id, cpu); + return -1; +} + +static int put_cluster_rxq(struct oecls_numa_bound_dev_info *bound_dev, int rxq_id) +{ + int i, j; + + for (i = 0; i < oecls_cluster_per_numa; i++) { + for (j = 0; j < OECLS_MAX_RXQ_NUM_PER_DEV; j++) { + if (bound_dev->cluster_info[i].rxqs[j].status == 2 && + bound_dev->cluster_info[i].rxqs[j].rxq_id == rxq_id) { + bound_dev->cluster_info[i].rxqs[j].status = 1; + oecls_debug("free rxq_id:%d\n", rxq_id); + return 0; + } + } + } + oecls_debug("no match malloced rxq_id:%d\n", rxq_id); + return -1; } int alloc_rxq_id(int nid, int devid) @@ -655,12 +727,21 @@ int alloc_rxq_id(int nid, int devid) } bound_dev = &numa_info->bound_dev[devid]; + if (strategy == 1) { + rxq_id = get_cluster_rxq(bound_dev); + if (rxq_id < 0 || rxq_id >= OECLS_MAX_RXQ_NUM_PER_DEV) + pr_info("failed to get rxq_id:%d in cluster, try numa\n", rxq_id); + else + goto found; + } + rxq_id = find_first_bit(bound_dev->bitmap_rxq, OECLS_MAX_RXQ_NUM_PER_DEV); if (rxq_id >= OECLS_MAX_RXQ_NUM_PER_DEV) { oecls_error("error rxq_id:%d\n", rxq_id); return -EINVAL; } +found: clear_bit(rxq_id, bound_dev->bitmap_rxq); oecls_debug("alloc nid:%d, dev_id:%d, rxq_id:%d\n", nid, devid, rxq_id); return rxq_id; @@ -688,6 +769,9 @@ void free_rxq_id(int nid, int devid, int rxq_id) return; } + if (strategy == 1) + put_cluster_rxq(bound_dev, rxq_id); + if (test_bit(rxq_id, bound_dev->bitmap_rxq)) { oecls_error("error nid:%d, devid:%d, rxq_id:%d\n", nid, devid, rxq_id); return; @@ -697,22 +781,28 @@ void free_rxq_id(int nid, int devid, int rxq_id) oecls_debug("free nid:%d, dev_id:%d, rxq_id:%d\n", nid, devid, rxq_id); } -static void init_oecls_numa_info(void) +static int init_oecls_numa_info(void) { struct oecls_numa_info *numa_info; - unsigned int numa_num; - int nid; + int nid, ret = 0; - numa_num = num_online_nodes(); - if (numa_num > OECLS_MAX_NUMA_NUM) { - oecls_error("online numa num:%d is too much!\n", numa_num); - numa_num = OECLS_MAX_NUMA_NUM; + oecls_numa_num = num_online_nodes(); + oecls_numa_info_table = alloc_from_l0(sizeof(struct oecls_numa_info) * oecls_numa_num); + if (!oecls_numa_info_table) { + ret = -ENOMEM; + oecls_error("oecls_numa_info_table alloc failed:%d\n", ret); + return ret; } - oecls_numa_num = numa_num; - oecls_debug("set oecls_numa_num=%d\n", numa_num); + + oecls_cluster_cpu_num = cpumask_weight(topology_cluster_cpumask(smp_processor_id())); + oecls_cluster_per_numa = (nr_cpu_ids / oecls_cluster_cpu_num) / oecls_numa_num; + oecls_debug("oecls_numa_num=%d cluster_cpu_num:%d cluster_cpu_num:%d\n", + oecls_numa_num, oecls_cluster_per_numa, oecls_cluster_cpu_num); for_each_oecls_numa(nid, numa_info) init_numa_avail_cpus(nid, numa_info); + + return ret; } static int alloc_available_cpu(int nid, struct oecls_numa_info *numa_info) @@ -746,10 +836,7 @@ static void config_affinity_strategy_default(struct oecls_netdev_info *oecls_dev int rxq_num = oecls_dev->rxq_num; int rxq_per_numa = rxq_num / oecls_numa_num; int remain = rxq_num - rxq_per_numa * oecls_numa_num; - int numa_rxq_id; - int rxq_id; - int nid; - int cpu; + int numa_rxq_id, rxq_id, nid, cpu; oecls_debug("dev=%s, rxq_num=%d, rxq_per_numa=%d, remain=%d\n", oecls_dev->dev_name, rxq_num, rxq_per_numa, remain); @@ -786,47 +873,62 @@ static void config_affinity_strategy_default(struct oecls_netdev_info *oecls_dev static void config_affinity_strategy_cluster(struct oecls_netdev_info *oecls_dev) { - int cluster_cpu_num = 8; - int cluster_num = num_online_cpus() / cluster_cpu_num; - int cluster_cpu_id = 0; - int rxq_id = 0; - int cluster; - int cpu; + int rxq_num = oecls_dev->rxq_num; + int rxq_per_numa = rxq_num / oecls_numa_num; + int remain = rxq_num - rxq_per_numa * oecls_numa_num; + int cpu_idx = oecls_cluster_cpu_num - 1; + int cluster, cpu, rxq_id = 0, round; + + round = rxq_per_numa < oecls_cluster_per_numa ? rxq_per_numa : oecls_cluster_per_numa; + if (remain > 0) + round++; + oecls_debug("round=%d\n", round); - // average config rxq to every cluster while (rxq_id < oecls_dev->rxq_num) { - for (cluster = 0; cluster < cluster_num; cluster++) { - cpu = cluster * cluster_cpu_num + cluster_cpu_id; + for (cluster = 0; cluster < oecls_cluster_per_numa * oecls_numa_num; cluster++) { + if (cluster % oecls_cluster_per_numa >= round) + continue; + cpu = cluster * oecls_cluster_cpu_num + cpu_idx; if (rxq_id >= oecls_dev->rxq_num) break; add_netdev_irq_affinity_cpu(oecls_dev, rxq_id++, cpu); } - cluster_cpu_id++; + cpu_idx--; + if (--cpu_idx < 0) + cpu_idx = oecls_cluster_cpu_num - 1; } } -static void config_affinity_strategy_16cores(struct oecls_netdev_info *oecls_dev) +static void config_affinity_strategy_numa(struct oecls_netdev_info *oecls_dev) { + int rxq_num = oecls_dev->rxq_num; + int rxq_per_numa = rxq_num / oecls_numa_num; + int cpu_per_numa = nr_cpu_ids /oecls_numa_num; + int remain = rxq_num - rxq_per_numa * oecls_numa_num; struct oecls_numa_info *numa_info; - int numa_start_cpu; - int numa_cpu_id; - int rxq_id = 0; - int nid; - int cpu; + int numa_start_cpu, numa_cpu_id; + int rxq_id = 0, nid, cpu; - // only use 16 cores of one numa for_each_oecls_numa(nid, numa_info) { numa_start_cpu = find_first_bit(numa_info->avail_cpus, OECLS_MAX_CPU_NUM); - for (numa_cpu_id = 0; numa_cpu_id < 16; numa_cpu_id++) { - cpu = numa_start_cpu + numa_cpu_id; - + for (numa_cpu_id = 0; numa_cpu_id < rxq_per_numa; numa_cpu_id++) { + cpu = numa_start_cpu + (numa_cpu_id % cpu_per_numa); if (rxq_id >= oecls_dev->rxq_num) break; add_netdev_irq_affinity_cpu(oecls_dev, rxq_id++, cpu); } + if (remain-- > 0) { + cpu = numa_start_cpu + (numa_cpu_id % cpu_per_numa); + add_netdev_irq_affinity_cpu(oecls_dev, rxq_id++, cpu); + } } } +static void config_affinity_strategy_custom(struct oecls_netdev_info *oecls_dev) +{ + oecls_debug("dev=%s\n", oecls_dev->dev_name); +} + static void config_affinity_strategy(void) { struct oecls_netdev_info *oecls_dev; @@ -834,15 +936,16 @@ static void config_affinity_strategy(void) for_each_oecls_netdev(devid, oecls_dev) { switch (strategy) { - case 0: - config_affinity_strategy_default(oecls_dev); - break; case 1: config_affinity_strategy_cluster(oecls_dev); break; case 2: - config_affinity_strategy_16cores(oecls_dev); + config_affinity_strategy_numa(oecls_dev); + break; + case 3: + config_affinity_strategy_custom(oecls_dev); break; + case 0: default: config_affinity_strategy_default(oecls_dev); break; @@ -862,8 +965,7 @@ static void enable_affinity_strategy(void) { struct oecls_netdev_queue_info *rxq_info; struct oecls_netdev_info *oecls_dev; - int rxq_id; - int devid; + int rxq_id, devid; for_each_oecls_netdev(devid, oecls_dev) { for (rxq_id = 0; rxq_id < oecls_dev->rxq_num; rxq_id++) { @@ -888,10 +990,7 @@ static void set_netdev_xps_queue(bool enable) const struct cpumask clear_mask = { 0 }; struct oecls_netdev_info *oecls_dev; const struct cpumask *cpu_mask; - int rxq_id; - int devid; - int cpu; - int nid; + int rxq_id, devid, cpu, nid; for_each_oecls_netdev(devid, oecls_dev) { for (rxq_id = 0; rxq_id < oecls_dev->rxq_num; rxq_id++) { @@ -910,26 +1009,29 @@ static void set_netdev_xps_queue(bool enable) static __init int oecls_init(void) { struct oecls_numa_info *numa_info; - int nid; - int err; + int nid, err; - oecls_debug("[init] mode=%d, ifname=[%s]\n", mode, ifname); if (!check_params()) return -EINVAL; init_oecls_l0_cache(); - init_oecls_numa_info(); + err = init_oecls_numa_info(); + if (err) + goto clean_l0; err = init_oecls_netdev_info(ifname); if (err) - goto out; + goto clean_numa; // Set irq affinity config_affinity_strategy(); enable_affinity_strategy(); // Calculate rxq bounded to one numa - for_each_oecls_numa(nid, numa_info) - init_numa_rxq_bitmap(nid, numa_info); + for_each_oecls_numa(nid, numa_info) { + err = init_numa_rxq_bitmap(nid, numa_info); + if (err) + goto clean_rxq; + } #ifdef CONFIG_XPS set_netdev_xps_queue(true); @@ -941,16 +1043,18 @@ static __init int oecls_init(void) oecls_flow_res_init(); return 0; -out: + +clean_rxq: +clean_numa: clean_oecls_netdev_info(); clean_oecls_numa_info(); +clean_l0: clean_oecls_l0_cache(); return err; } static __exit void oecls_exit(void) { - oecls_debug("[exit] mode=%d\n", mode); if (mode == 0) oecls_ntuple_res_clean(); else @@ -960,6 +1064,7 @@ static __exit void oecls_exit(void) set_netdev_xps_queue(false); #endif + clean_oecls_rxq(); clean_oecls_netdev_info(); clean_oecls_numa_info(); clean_oecls_l0_cache(); diff --git a/net/oenetcls/oenetcls_ntuple.c b/net/oenetcls/oenetcls_ntuple.c index 38d1f5df6ff1..0ac69eb08f24 100644 --- a/net/oenetcls/oenetcls_ntuple.c +++ b/net/oenetcls/oenetcls_ntuple.c @@ -10,7 +10,7 @@ #include <trace/hooks/oenetcls.h> #include "oenetcls.h" -struct oecls_sk_rule_list oecls_sk_rules; +struct oecls_sk_rule_list oecls_sk_rules, oecls_sk_list; static void init_oecls_sk_rules(void) { @@ -21,21 +21,28 @@ static void init_oecls_sk_rules(void) mutex_init(&oecls_sk_rules.mutex); } -static struct hlist_head *oecls_sk_rule_hash(u32 dip4, u16 dport) +static inline struct hlist_head *get_rule_hashlist(u32 dip4, u16 dport) { return oecls_sk_rules.hash + (jhash_2words(dip4, dport, 0) & OECLS_SK_RULE_HASHMASK); } +static inline struct hlist_head *get_sk_hashlist(void *sk) +{ + return oecls_sk_list.hash + (jhash(sk, sizeof(sk), 0) & OECLS_SK_RULE_HASHMASK); +} + static void add_sk_rule(int devid, u32 dip4, u16 dport, void *sk, int action, int ruleid, int nid) { - struct hlist_head *hlist = oecls_sk_rule_hash(dip4, dport); + struct hlist_head *hlist = get_rule_hashlist(dip4, dport); + struct hlist_head *sk_hlist = get_sk_hashlist(sk); struct oecls_sk_rule *rule; + struct oecls_sk_entry *entry; rule = alloc_from_l0(sizeof(struct oecls_sk_rule)); - if (!rule) - return; - oecls_debug("alloc rule=%p\n", rule); + entry = alloc_from_l0(sizeof(struct oecls_sk_entry)); + if (!rule || !entry) + goto out; rule->sk = sk; rule->dip4 = dip4; @@ -45,18 +52,47 @@ static void add_sk_rule(int devid, u32 dip4, u16 dport, void *sk, int action, rule->ruleid = ruleid; rule->nid = nid; hlist_add_head(&rule->node, hlist); + + entry->sk = sk; + entry->sk_rule_hash = jhash_2words(dip4, dport, 0); + hlist_add_head(&entry->node, sk_hlist); + return; +out: + oecls_debug("alloc failed rule:%p entry:%p\n", rule, entry); + free_to_l0(entry); + free_to_l0(rule); +} + +static struct oecls_sk_entry *get_sk_entry(void *sk) +{ + struct hlist_head *sk_hlist = get_sk_hashlist(sk); + struct oecls_sk_entry *entry = NULL; + + hlist_for_each_entry(entry, sk_hlist, node) { + if (entry->sk == sk) + break; + } + return entry; } static void del_sk_rule(struct oecls_sk_rule *rule) { - hlist_del_init(&rule->node); + struct oecls_sk_entry *entry; + + entry = get_sk_entry(rule->sk); + if (!entry) + return; + hlist_del_init(&entry->node); + free_to_l0(entry); + oecls_debug("del rule=%p\n", rule); + hlist_del_init(&rule->node); free_to_l0(rule); } static struct oecls_sk_rule *get_sk_rule(int devid, u32 dip4, u16 dport) { - struct hlist_head *hlist = oecls_sk_rule_hash(dip4, dport); + struct hlist_head *hlist = get_rule_hashlist(dip4, dport); struct oecls_sk_rule *rule = NULL; hlist_for_each_entry(rule, hlist, node) { @@ -66,7 +102,25 @@ static struct oecls_sk_rule *get_sk_rule(int devid, u32 dip4, u16 dport) return rule; } -static bool reuseport_check(int devid, u32 dip4, u16 dport) +static struct oecls_sk_rule *get_rule_from_sk(int devid, void *sk) +{ + struct oecls_sk_rule *rule = NULL; + struct oecls_sk_entry *entry; + struct hlist_head *hlist; + + entry = get_sk_entry(sk); + if (!entry) + return NULL; + + hlist = oecls_sk_rules.hash + (entry->sk_rule_hash & OECLS_SK_RULE_HASHMASK); + hlist_for_each_entry(rule, hlist, node) { + if (rule->devid == devid && rule->sk == sk) + break; + } + return rule; +} + +static inline bool reuseport_check(int devid, u32 dip4, u16 dport) { return !!get_sk_rule(devid, dip4, dport); } @@ -90,7 +144,7 @@ static u32 get_first_ip4_addr(struct net *net) in_dev_for_each_ifa_rcu(ifa, in_dev) { if (!strcmp(dev->name, ifa->ifa_label)) { dip4 = ifa->ifa_local; - oecls_debug("dev: %s, dip4: 0x%x\n", dev->name, dip4); + oecls_debug("dev: %s, dip4:%pI4\n", dev->name, &dip4); goto out; } } @@ -382,10 +436,10 @@ static void del_ntuple_rule(struct sock *sk) mutex_lock(&oecls_sk_rules.mutex); for_each_oecls_netdev(devid, oecls_dev) { strncpy(ctx.netdev, oecls_dev->dev_name, IFNAMSIZ); - rule = get_sk_rule(devid, dip4, dport); + rule = get_rule_from_sk(devid, sk); if (!rule) { - oecls_debug("rule not found! sk:%p, devid:%d, dip4:0x%x, dport:%d\n", sk, - devid, dip4, dport); + oecls_debug("rule not found! sk:%p, devid:%d, dip4:%pI4, dport:%d\n", + sk, devid, &dip4, ntohs(dport)); continue; } @@ -456,8 +510,8 @@ static void ethtool_cfg_rxcls(void *data, struct sock *sk, int is_del) if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) return; - oecls_debug("[cpu:%d] app:%s, sk:%p, is_del:%d, ip:0x%x, port:0x%x\n", smp_processor_id(), - current->comm, sk, is_del, sk->sk_rcv_saddr, sk->sk_num); + oecls_debug("[cpu:%d] app:%s, sk:%p, is_del:%d, ip:%pI4, port:%d\n", smp_processor_id(), + current->comm, sk, is_del, &sk->sk_rcv_saddr, (u16)sk->sk_num); if (is_del) del_ntuple_rule(sk); -- 2.34.1

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/16847 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/JAL... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/16847 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/JAL...
participants (2)
-
patchwork bot
-
Yue Haibing