From: Zhang Qiao zhangqiao22@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I64OUS CVE: NA
-------------------------------
When a cfs_rq throttled by qos, mark cfs_rq->throttled as 1, and cfs bw will unthrottled this cfs_rq by mistake, it cause a list_del_valid warning. So add macro QOS_THROTTLED(=2), when a cfs_rq is throttled by qos, we mark the cfs_rq->throttled as QOS_THROTTLED, will check the value of cfs_rq->throttled before unthrottle a cfs_rq.
Signed-off-by: Zhang Qiao zhangqiao22@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Reviewed-by: zheng zucheng zhengzucheng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/sched/fair.c | 102 +++++++++++++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 34 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 654e964b5c31..d3c4b945c019 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -124,6 +124,13 @@ int __weak arch_asym_cpu_priority(int cpu) #endif
#ifdef CONFIG_QOS_SCHED + +/* + * To distinguish cfs bw, use QOS_THROTTLED mark cfs_rq->throttled + * when qos throttled(and cfs bw throttle mark cfs_rq->throttled as 1). + */ +#define QOS_THROTTLED 2 + static DEFINE_PER_CPU_SHARED_ALIGNED(struct list_head, qos_throttled_cfs_rq); static DEFINE_PER_CPU_SHARED_ALIGNED(struct hrtimer, qos_overload_timer); static DEFINE_PER_CPU(int, qos_cpu_overload); @@ -4932,6 +4939,14 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
se = cfs_rq->tg->se[cpu_of(rq)];
+#ifdef CONFIG_QOS_SCHED + /* + * if this cfs_rq throttled by qos, not need unthrottle it. + */ + if (cfs_rq->throttled == QOS_THROTTLED) + return; +#endif + cfs_rq->throttled = 0;
update_rq_clock(rq); @@ -7278,26 +7293,6 @@ static inline bool is_offline_task(struct task_struct *p)
static void start_qos_hrtimer(int cpu);
-static int qos_tg_unthrottle_up(struct task_group *tg, void *data) -{ - struct rq *rq = data; - struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; - - cfs_rq->throttle_count--; - - return 0; -} - -static int qos_tg_throttle_down(struct task_group *tg, void *data) -{ - struct rq *rq = data; - struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; - - cfs_rq->throttle_count++; - - return 0; -} - static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); @@ -7309,7 +7304,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
/* freeze hierarchy runnable averages while throttled */ rcu_read_lock(); - walk_tg_tree_from(cfs_rq->tg, qos_tg_throttle_down, tg_nop, (void *)rq); + walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); rcu_read_unlock();
task_delta = cfs_rq->h_nr_running; @@ -7320,8 +7315,13 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) if (!se->on_rq) break;
- if (dequeue) + if (dequeue) { dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); + } else { + update_load_avg(qcfs_rq, se, 0); + se_update_runnable(se); + } + qcfs_rq->h_nr_running -= task_delta; qcfs_rq->idle_h_nr_running -= idle_task_delta;
@@ -7339,7 +7339,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) if (list_empty(&per_cpu(qos_throttled_cfs_rq, cpu_of(rq)))) start_qos_hrtimer(cpu_of(rq));
- cfs_rq->throttled = 1; + cfs_rq->throttled = QOS_THROTTLED;
list_add(&cfs_rq->qos_throttled_list, &per_cpu(qos_throttled_cfs_rq, cpu_of(rq))); @@ -7349,12 +7349,14 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct sched_entity *se; - int enqueue = 1; unsigned int prev_nr = cfs_rq->h_nr_running; long task_delta, idle_task_delta;
se = cfs_rq->tg->se[cpu_of(rq)];
+ if (cfs_rq->throttled != QOS_THROTTLED) + return; + cfs_rq->throttled = 0;
update_rq_clock(rq); @@ -7362,7 +7364,7 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
/* update hierarchical throttle state */ rcu_read_lock(); - walk_tg_tree_from(cfs_rq->tg, tg_nop, qos_tg_unthrottle_up, (void *)rq); + walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); rcu_read_unlock();
if (!cfs_rq->load.weight) @@ -7372,26 +7374,58 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) idle_task_delta = cfs_rq->idle_h_nr_running; for_each_sched_entity(se) { if (se->on_rq) - enqueue = 0; + break;
cfs_rq = cfs_rq_of(se); - if (enqueue) - enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); + cfs_rq->h_nr_running += task_delta; cfs_rq->idle_h_nr_running += idle_task_delta;
if (cfs_rq_throttled(cfs_rq)) - break; + goto unthrottle_throttle; }
- assert_list_leaf_cfs_rq(rq); + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se);
- if (!se) { - add_nr_running(rq, task_delta); - if (prev_nr < 2 && prev_nr + task_delta >= 2) - overload_set(rq); + update_load_avg(cfs_rq, se, UPDATE_TG); + se_update_runnable(se); + + cfs_rq->h_nr_running += task_delta; + cfs_rq->idle_h_nr_running += idle_task_delta; + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto unthrottle_throttle; + + /* + * One parent has been throttled and cfs_rq removed from the + * list. Add it back to not break the leaf list. + */ + if (throttled_hierarchy(cfs_rq)) + list_add_leaf_cfs_rq(cfs_rq); + } + + add_nr_running(rq, task_delta); + if (prev_nr < 2 && prev_nr + task_delta >= 2) + overload_set(rq); + +unthrottle_throttle: + /* + * The cfs_rq_throttled() breaks in the above iteration can result in + * incomplete leaf list maintenance, resulting in triggering the + * assertion below. + */ + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + if (list_add_leaf_cfs_rq(cfs_rq)) + break; }
+ assert_list_leaf_cfs_rq(rq); + /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq);
From: Long Li leo.lilong@huawei.com
stable inclusion from stable-v5.10.157 commit 4aa32aaef6c1b5e39ae2508ec596bd7b67871043 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I65TQE CVE: CVE-2022-4378
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
commit e6cfaf34be9fcd1a8285a294e18986bfc41a409c upstream.
proc_get_long() is passed a size_t, but then assigns it to an 'int' variable for the length. Let's not do that, even if our IO paths are limited to MAX_RW_COUNT (exactly because of these kinds of type errors).
So do the proper test in the rigth type.
Reported-by: Kyle Zeng zengyhkyle@gmail.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Long Li leo.lilong@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/sysctl.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index eb8db15b5902..ae09d68ef788 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -471,13 +471,12 @@ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { - int len; char *p, tmp[TMPBUFLEN]; + ssize_t len = *size;
- if (!*size) + if (len <= 0) return -EINVAL;
- len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1;
From: Long Li leo.lilong@huawei.com
stable inclusion from stable-v5.10.157 commit 9ba389863ac63032d4b6ffad2c90a62cd78082ee category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I65TQE CVE: CVE-2022-4378
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
commit bce9332220bd677d83b19d21502776ad555a0e73 upstream.
proc_skip_spaces() seems to think it is working on C strings, and ends up being just a wrapper around skip_spaces() with a really odd calling convention.
Instead of basing it on skip_spaces(), it should have looked more like proc_skip_char(), which really is the exact same function (except it skips a particular character, rather than whitespace). So use that as inspiration, odd coding and all.
Now the calling convention actually makes sense and works for the intended purpose.
Reported-and-tested-by: Kyle Zeng zengyhkyle@gmail.com Acked-by: Eric Dumazet edumazet@google.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Long Li leo.lilong@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/sysctl.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ae09d68ef788..d290ba2d8fee 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -396,13 +396,14 @@ int proc_dostring(struct ctl_table *table, int write, ppos); }
-static size_t proc_skip_spaces(char **buf) +static void proc_skip_spaces(char **buf, size_t *size) { - size_t ret; - char *tmp = skip_spaces(*buf); - ret = tmp - *buf; - *buf = tmp; - return ret; + while (*size) { + if (!isspace(**buf)) + break; + (*size)--; + (*buf)++; + } }
static void proc_skip_char(char **buf, size_t *size, const char v) @@ -634,7 +635,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, bool neg;
if (write) { - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left);
if (!left) break; @@ -661,7 +662,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left; @@ -703,7 +704,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1;
- left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) { err = -EINVAL; goto out_free; @@ -723,7 +724,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, }
if (!err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left);
out_free: if (err) @@ -1181,7 +1182,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (write) { bool neg;
- left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break;
@@ -1210,7 +1211,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left;
From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I65K8D CVE: NA
--------------------------------
Use cmpxchg64() in some 32bit platform will cause compile error, because it might not be implemented, for example PCC32. Hence only compile it in 64bit platform.
Fixes: 82327165da5c ("blk-mq: don't access request_wrapper if request is not allocated from block layer") Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- block/blk-core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c index df24a463f2ef..71c5cf508127 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1306,10 +1306,11 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
static void blk_account_io_latency(struct request *req, u64 now, const int sgrp) { +#ifdef CONFIG_64BIT u64 stat_time; struct request_wrapper *rq_wrapper;
- if (!IS_ENABLED(CONFIG_64BIT) || !(req->rq_flags & RQF_FROM_BLOCK)) { + if (!(req->rq_flags & RQF_FROM_BLOCK)) { part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); return; } @@ -1328,6 +1329,10 @@ static void blk_account_io_latency(struct request *req, u64 now, const int sgrp)
part_stat_add(req->part, nsecs[sgrp], duration); } +#else + part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + +#endif }
void blk_account_io_done(struct request *req, u64 now)
From: Ross Lagerwall ross.lagerwall@citrix.com
mainline inclusion from mainline-v6.1 commit ad7f402ae4f466647c3a669b8a6f3e5d4271c84a category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I651EB CVE: CVE-2022-3643
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
In some cases, the frontend may send a packet where the protocol headers are spread across multiple slots. This would result in netback creating an skb where the protocol headers spill over into the non-linear area. Some drivers and NICs don't handle this properly resulting in an interface reset or worse.
This issue was introduced by the removal of an unconditional skb pull in the tx path to improve performance. Fix this without reintroducing the pull by setting up grant copy ops for as many slots as needed to reach the XEN_NETBACK_TX_COPY_LEN size. Adjust the rest of the code to handle multiple copy operations per skb.
This is XSA-423 / CVE-2022-3643.
Fixes: 7e5d7753956b ("xen-netback: remove unconditional __pskb_pull_tail() in guest Tx path") Signed-off-by: Ross Lagerwall ross.lagerwall@citrix.com Reviewed-by: Paul Durrant paul@xen.org Signed-off-by: Juergen Gross jgross@suse.com Signed-off-by: Zhengchao Shao shaozhengchao@huawei.com Reviewed-by: Yue Haibing yuehaibing@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/net/xen-netback/netback.c | 223 ++++++++++++++++-------------- 1 file changed, 123 insertions(+), 100 deletions(-)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index b0cbc7fead74..06fd61b71d37 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -330,10 +330,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb { - u16 pending_idx; + u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; + u8 copy_count; };
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) +#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) +#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, u16 pending_idx, @@ -368,31 +371,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) return skb; }
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop, - unsigned int frag_overflow, - struct sk_buff *nskb) +static void xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txfrags, + unsigned *copy_ops, + unsigned *map_ops, + unsigned int frag_overflow, + struct sk_buff *nskb, + unsigned int extra_count, + unsigned int data_len) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; - int start; + u16 pending_idx; pending_ring_idx_t index; unsigned int nr_slots; + struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; + struct xen_netif_tx_request *txp = first; + + nr_slots = shinfo->nr_frags + 1; + + copy_count(skb) = 0; + + /* Create copy ops for exactly data_len bytes into the skb head. */ + __skb_put(skb, data_len); + while (data_len > 0) { + int amount = data_len > txp->size ? txp->size : data_len; + + cop->source.u.ref = txp->gref; + cop->source.domid = queue->vif->domid; + cop->source.offset = txp->offset; + + cop->dest.domid = DOMID_SELF; + cop->dest.offset = (offset_in_page(skb->data + + skb_headlen(skb) - + data_len)) & ~XEN_PAGE_MASK; + cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) + - data_len); + + cop->len = amount; + cop->flags = GNTCOPY_source_gref;
- nr_slots = shinfo->nr_frags; + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; + callback_param(queue, pending_idx).ctx = NULL; + copy_pending_idx(skb, copy_count(skb)) = pending_idx; + copy_count(skb)++; + + cop++; + data_len -= amount;
- /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + if (amount == txp->size) { + /* The copy op covered the full tx_request */ + + memcpy(&queue->pending_tx_info[pending_idx].req, + txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = + (txp == first) ? extra_count : 0; + + if (txp == first) + txp = txfrags; + else + txp++; + queue->pending_cons++; + nr_slots--; + } else { + /* The copy op partially covered the tx_request. + * The remainder will be mapped. + */ + txp->offset += amount; + txp->size -= amount; + } + }
- for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; + shinfo->nr_frags++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, + txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + + if (txp == first) + txp = txfrags; + else + txp++; }
if (frag_overflow) { @@ -413,7 +478,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que skb_shinfo(skb)->frag_list = nskb; }
- return gop; + (*copy_ops) = cop - queue->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; }
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, @@ -449,7 +515,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct gnttab_copy **gopp_copy) { struct gnttab_map_grant_ref *gop_map = *gopp_map; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + u16 pending_idx; /* This always points to the shinfo of the skb being checked, which * could be either the first or the one on the frag_list */ @@ -460,24 +526,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; const bool sharedslot = nr_frags && - frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; + frag_get_pending_idx(&shinfo->frags[0]) == + copy_pending_idx(skb, copy_count(skb) - 1); int i, err;
- /* Check status of header. */ - err = (*gopp_copy)->status; - if (unlikely(err)) { - if (net_ratelimit()) - netdev_dbg(queue->vif->dev, - "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", - (*gopp_copy)->status, - pending_idx, - (*gopp_copy)->source.u.ref); - /* The first frag might still have this slot mapped */ - if (!sharedslot) - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_ERROR); + for (i = 0; i < copy_count(skb); i++) { + int newerr; + + /* Check status of header. */ + pending_idx = copy_pending_idx(skb, i); + + newerr = (*gopp_copy)->status; + if (likely(!newerr)) { + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } else { + err = newerr; + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; } - (*gopp_copy)++;
check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -524,14 +603,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, if (err) continue;
- /* First error: if the header haven't shared a slot with the - * first frag, release it as well. - */ - if (!sharedslot) - xenvif_idx_release(queue, - XENVIF_TX_CB(skb)->pending_idx, - XEN_NETIF_RSP_OKAY); - /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); @@ -801,7 +872,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops; struct sk_buff *skb, *nskb; int ret; unsigned int frag_overflow; @@ -883,8 +953,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, continue; }
+ data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? + XEN_NETBACK_TX_COPY_LEN : txreq.size; + ret = xenvif_count_requests(queue, &txreq, extra_count, txfrags, work_to_do); + if (unlikely(ret < 0)) break;
@@ -910,9 +984,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index];
- data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && - ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - XEN_NETBACK_TX_COPY_LEN : txreq.size; + if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) + data_len = txreq.size;
skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -923,8 +996,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, }
skb_shinfo(skb)->nr_frags = ret; - if (data_len < txreq.size) - skb_shinfo(skb)->nr_frags++; /* At this point shinfo->nr_frags is in fact the number of * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ @@ -986,54 +1057,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, type); }
- XENVIF_TX_CB(skb)->pending_idx = pending_idx; - - __skb_put(skb, data_len); - queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; - queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; - queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; - - queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_gfn(skb->data); - queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; - queue->tx_copy_ops[*copy_ops].dest.offset = - offset_in_page(skb->data) & ~XEN_PAGE_MASK; - - queue->tx_copy_ops[*copy_ops].len = data_len; - queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; - - (*copy_ops)++; - - if (data_len < txreq.size) { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, - extra_count, gop); - gop++; - } else { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - queue->pending_tx_info[pending_idx].extra_count = - extra_count; - } - - queue->pending_cons++; - - gop = xenvif_get_requests(queue, skb, txfrags, gop, - frag_overflow, nskb); + xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, + map_ops, frag_overflow, nskb, extra_count, + data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
- if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; }
- (*map_ops) = gop - queue->tx_map_ops; return; }
@@ -1112,9 +1148,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; - unsigned data_len;
- pending_idx = XENVIF_TX_CB(skb)->pending_idx; + pending_idx = copy_pending_idx(skb, 0); txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */ @@ -1133,18 +1168,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) continue; }
- data_len = skb->len; - callback_param(queue, pending_idx).ctx = NULL; - if (data_len < txp->size) { - /* Append the packet payload as a fragment. */ - txp->offset += data_len; - txp->size -= data_len; - } else { - /* Schedule a response immediately. */ - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_OKAY); - } - if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) @@ -1330,7 +1353,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) /* Called after netfront has transmitted */ int xenvif_tx_action(struct xenvif_queue *queue, int budget) { - unsigned nr_mops, nr_cops = 0; + unsigned nr_mops = 0, nr_cops = 0; int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
From: Juergen Gross jgross@suse.com
mainline inclusion from mainline-v6.1 commit 7dfa764e0223a324366a2a1fc056d4d9d4e95491 category: bugfix bugzilla: 188138 CVE: CVE-2022-3643
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Commit ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") introduced a (valid) build warning. There have even been reports of this problem breaking networking of Xen guests.
Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Juergen Gross jgross@suse.com Reviewed-by: Jan Beulich jbeulich@suse.com Reviewed-by: Ross Lagerwall ross.lagerwall@citrix.com Tested-by: Jason Andryuk jandryuk@gmail.com Signed-off-by: Juergen Gross jgross@suse.com Signed-off-by: Zhengchao Shao shaozhengchao@huawei.com Reviewed-by: Yue Haibing yuehaibing@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/net/xen-netback/netback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 06fd61b71d37..2898d74572b1 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -528,7 +528,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, const bool sharedslot = nr_frags && frag_get_pending_idx(&shinfo->frags[0]) == copy_pending_idx(skb, copy_count(skb) - 1); - int i, err; + int i, err = 0;
for (i = 0; i < copy_count(skb); i++) { int newerr;
From: Dan Williams dan.j.williams@intel.com
stable inclusion from stable-v5.10.137 commit 71042279b161ba329d730e5fe517adbc106c031b category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60PLB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit b13a3e5fd40b7d1b394c5ecbb5eb301a4c38e7b2 ]
When a platform marks a memory range as "special purpose" it is not onlined as System RAM by default. However, it is still suitable for error injection. Add IORES_DESC_SOFT_RESERVED to einj_error_inject() as a permissible memory type in the sanity checking of the arguments to _EINJ.
Fixes: 262b45ae3ab4 ("x86/efi: EFI soft reservation to E820 enumeration") Reviewed-by: Tony Luck tony.luck@intel.com Reported-by: Omar Avelar omar.avelar@intel.com Signed-off-by: Dan Williams dan.j.williams@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org
Conflicts: drivers/acpi/apei/einj.c Reviewed-by: Xiongfeng Wang wangxiongfeng2@huawei.com
Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/acpi/apei/einj.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 4384269ad159..e905282ff7f9 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -545,6 +545,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) != REGION_INTERSECTS) && + (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED) + != REGION_INTERSECTS) && !arch_is_platform_page(base_addr))) return -EINVAL;
From: Zeng Jingxiang linuszeng@tencent.com
mainline inclusion from mainline-v6.1-rc1 commit 8b740c08eb8202817562c358e8d867db0f7d6565 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I664DZ CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... --------------------------------
Coverity complains of a possible NULL dereference:
in of_select_probe_type(): 1. returned_null: of_match_device() returns NULL. 2. var_assigned: match = NULL return value from of_match_device() 309 match = of_match_device(of_flash_match, &dev->dev);
3.dereference: Dereferencing the NULL pointer match. 310 probe_type = match->data;
Signed-off-by: Zeng Jingxiang linuszeng@tencent.com Signed-off-by: Miquel Raynal miquel.raynal@bootlin.com Link: https://lore.kernel.org/linux-mtd/20220727060302.1560325-1-zengjx95@gmail.co... Signed-off-by: Xiang Yang xiangyang3@huawei.com Reviewed-by: yiyang 30014685 yiyang13@huawei.com Reviewed-by: yiyang 30014685 yiyang13@huawei.com Reviewed-by: guozihua 00570089 guozihua@huawei.com Reviewed-by: GONG, Ruiqi gongruiqi1@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Reviewed-by: Wang Weiyang wangweiyang2@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/mtd/maps/physmap-core.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c index 4f63b8430c71..69d0ab1f6f94 100644 --- a/drivers/mtd/maps/physmap-core.c +++ b/drivers/mtd/maps/physmap-core.c @@ -307,6 +307,9 @@ static const char *of_select_probe_type(struct platform_device *dev) const char *probe_type;
match = of_match_device(of_flash_match, &dev->dev); + if (!match) + return NULL; + probe_type = match->data; if (probe_type) return probe_type;
From: Zhang Jian zhangjian210@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I66EW4 CVE: NA
----------------------------------------------------
When enable Ascend enable CONFIG_ASCEND_FEATURES, and enable CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES, and build kernel image, it will have build warning as follows:
arch/arm64/mm/init.c:736:2: warning: ISO C90 fobids mixed declarations and code [-Wdeclaration-atfer-statement] 736 | extern int enable_charge_mighp;
Signed-off-by: Zhang Jian zhangjian210@huawei.com Reviewed-by: chenweilong 00534513 chenweilong@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/mm/init.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1b2f55d3c195..be67a9c42628 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -727,14 +727,20 @@ void dump_mem_limit(void) } }
+#ifdef CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES +extern int enable_charge_mighp; +#endif + +#ifdef CONFIG_ARM64_PSEUDO_NMI +extern bool enable_pseudo_nmi; +#endif + void ascend_enable_all_features(void) { if (IS_ENABLED(CONFIG_ASCEND_DVPP_MMAP)) enable_mmap_dvpp = 1;
#ifdef CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES - extern int enable_charge_mighp; - enable_charge_mighp = 1; #endif
@@ -743,8 +749,6 @@ void ascend_enable_all_features(void) #endif
#ifdef CONFIG_ARM64_PSEUDO_NMI - extern bool enable_pseudo_nmi; - enable_pseudo_nmi = true; #endif
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I66G0M
-----------------------------------------------
Commit 36a1a8916de5 ("efi/loongarch: Add efistub booting support") introduced the following macro definition for riscv in file drivers/firmware/efi/libstub/efi-stub.c
# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64 # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
but related patches have not been merged for riscv, like e8a62cc26ddf ("riscv: Implement sv48 support"), 01abdfeac81b ("riscv: compat: Support TASK_SIZE for compat mode"),
Causing build error in riscv:
build failed: riscv, allmodconfig
<line too long ...> n this function); did you mean ‘TASK_SIZE_MAX’? # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN ^ drivers/firmware/efi/libstub/efi-stub.c:289:31: note: in expansion of macro ‘EFI_RT_VIRTUAL_LIMIT’ static const u64 headroom = EFI_RT_VIRTUAL_LIMIT - ^~~~~~~~~~~~~~~~~~~~ drivers/firmware/efi/libstub/efi-stub.c:44:32: note: each undeclared identifier is reported only once for each function it appears in # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN ^ drivers/firmware/efi/libstub/efi-stub.c:289:31: note: in expansion of macro ‘EFI_RT_VIRTUAL_LIMIT’ static const u64 headroom = EFI_RT_VIRTUAL_LIMIT - ^~~~~~~~~~~~~~~~~~~~ make[4]: *** [drivers/firmware/efi/libstub/efi-stub.o] Error 1 make[3]: *** [drivers/firmware/efi/libstub] Error 2 make[2]: *** [drivers/firmware/efi] Error 2 make[1]: *** [drivers/firmware] Error 2 make[1]: *** Waiting for unfinished jobs.... make: *** [drivers] Error 2
Fix it by removing the CONFIG_RISCV condition for EFI_RT_VIRTUAL_LIMIT definition.
Fixes: 36a1a8916de5 ("efi/loongarch: Add efistub booting support") Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Wei Li liwei391@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/firmware/efi/libstub/efi-stub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 8e0f64b3db69..96129f0fc60e 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -40,7 +40,7 @@
#ifdef CONFIG_ARM64 # define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64 -#elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH) +#elif defined(CONFIG_LOONGARCH) # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN #else # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE