mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

  • 46 participants
  • 19070 discussions
[PATCH OLK-5.10 V2] sched: More flexible use of CPU quota when CPU is idle
by Cheng Yu 23 Jun '25

23 Jun '25
From: Zheng Zucheng <zhengzucheng(a)huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICE7WC -------------------------------- This feature allows users to use CPU quota more flexibly when CPU is idle and it will cause the CPU quota to be exceeded. So, it cannot be used in scenarios where there are strict restrictions on the use of the CPU quota, such as some commercial scenarios that charge based on the use of CPU quota. Signed-off-by: Zheng Zucheng <zhengzucheng(a)huawei.com> Signed-off-by: Liao Chang <liaochang1(a)huawei.com> Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com> --- arch/arm64/Kconfig | 1 + arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/kernel/topology.c | 32 ++++++ include/linux/sched/sysctl.h | 4 + init/Kconfig | 18 +++ kernel/sched/core.c | 35 ++++++ kernel/sched/fair.c | 147 ++++++++++++++++++++++++- kernel/sched/features.h | 4 + kernel/sched/idle.c | 15 +++ kernel/sched/sched.h | 12 ++ kernel/sysctl.c | 11 ++ 11 files changed, 276 insertions(+), 4 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index eb30ef59aca2..4ba485650d0a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -80,6 +80,7 @@ config ARM64 select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_SCHED_KEEP_ON_CORE select ARCH_SUPPORTS_SCHED_PARAL + select ARCH_SUPPORTS_SCHED_SOFT_QUOTA select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index be1faf2da008..1e1e70a6736d 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -191,6 +191,7 @@ CONFIG_NET_NS=y CONFIG_SCHED_STEAL=y CONFIG_SCHED_KEEP_ON_CORE=y CONFIG_SCHED_PARAL=y +CONFIG_SCHED_SOFT_QUOTA=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y # CONFIG_SYSFS_DEPRECATED is not set diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 785de5b9696d..b3ae5c6de81e 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -363,6 +363,38 @@ void topology_scale_freq_tick(void) this_cpu_write(arch_const_cycles_prev, const_cnt); } +#ifdef CONFIG_SCHED_SOFT_QUOTA +static DEFINE_PER_CPU(int, sibling_idle) = 1; + +int is_sibling_idle(void) +{ + return this_cpu_read(sibling_idle); +} + +static void smt_measurement_begin(void) +{ + // TODO +} + +static void smt_measurement_done(void) +{ + // TODO +} +#else +static inline void smt_measurement_begin(void) { } +static inline void smt_measurement_done(void) { } +#endif + +void arch_cpu_idle_enter(void) +{ + smt_measurement_begin(); +} + +void arch_cpu_idle_exit(void) +{ + smt_measurement_done(); +} + #ifdef CONFIG_ACPI_CPPC_LIB #include <acpi/cppc_acpi.h> diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 9f998be56bdd..90021477ea4c 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -48,6 +48,10 @@ extern unsigned int sysctl_smart_grid_strategy_ctrl; extern int sysctl_affinity_adjust_delay_ms; #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +extern unsigned int sysctl_soft_runtime_ratio; +#endif + enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, diff --git a/init/Kconfig b/init/Kconfig index 5f88cce193e8..2ee50c638ca3 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1411,6 +1411,24 @@ config SCHED_PARAL 3. The existing "qos dynamic affinity" and "qos smart grid" features must not be used simultaneously. +# +# For architectures that want to enable the support for SCHED_SOFT_QUOTA +# +config ARCH_SUPPORTS_SCHED_SOFT_QUOTA + bool + +config SCHED_SOFT_QUOTA + bool "More flexible use of CPU quota" + depends on ARCH_SUPPORTS_SCHED_SOFT_QUOTA + depends on CFS_BANDWIDTH + default n + help + This option allows users to use CPU quota more flexibly when CPU + is idle. It is better for users to have some understanding of + CFS_BANDWIDTH. It cannot be used in scenarios where there are strict + restrictions on the use of the CPU quota, such as some commercial + scenarios that charge based on the use of CPU quota. + config CHECKPOINT_RESTORE bool "Checkpoint/restore support" select PROC_CHILDREN diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 457eeebc7b62..f14ba71f26ce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9902,6 +9902,33 @@ static int cpu_steal_task_write(struct cgroup_subsys_state *css, } #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +static int cpu_soft_quota_write(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 soft_quota) +{ + struct task_group *tg = css_tg(css); + + if (!sched_feat(SOFT_QUOTA)) + return -EINVAL; + + if (soft_quota != 1 && soft_quota != 0) + return -EINVAL; + + if (tg->soft_quota == soft_quota) + return 0; + + tg->soft_quota = soft_quota; + + return 0; +} + +static inline s64 cpu_soft_quota_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return css_tg(css)->soft_quota; +} +#endif + #ifdef CONFIG_BPF_SCHED void sched_settag(struct task_struct *tsk, s64 tag) { @@ -10064,6 +10091,14 @@ static struct cftype cpu_legacy_files[] = { .write_s64 = cpu_qos_write, }, #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA + { + .name = "soft_quota", + .flags = CFTYPE_NOT_ON_ROOT, + .read_s64 = cpu_soft_quota_read, + .write_s64 = cpu_soft_quota_write, + }, +#endif #ifdef CONFIG_QOS_SCHED_SMT_EXPELLER { .name = "smt_expell", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b7544a14225c..8c4993ec7059 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -140,6 +140,10 @@ static int unthrottle_qos_cfs_rqs(int cpu); static bool qos_smt_expelled(int this_cpu); #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +static DEFINE_PER_CPU_SHARED_ALIGNED(struct list_head, soft_quota_throttled_cfs_rq); +#endif + #ifdef CONFIG_QOS_SCHED_MULTILEVEL #define QOS_LEVEL_WEIGHT_OFFLINE_EX 1 #define QOS_LEVEL_WEIGHT_OFFLINE 10 @@ -439,10 +443,11 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return se->parent; } -static void +static bool find_matching_se(struct sched_entity **se, struct sched_entity **pse) { int se_depth, pse_depth; + bool ret = false; /* * preemption test can be made between sibling entities who are in the @@ -456,6 +461,11 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) pse_depth = (*pse)->depth; while (se_depth > pse_depth) { +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA) && !ret && + cfs_rq_of(*se)->soft_quota_enable == 1) + ret = true; +#endif se_depth--; *se = parent_entity(*se); } @@ -466,9 +476,16 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) } while (!is_same_group(*se, *pse)) { +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA) && !ret && + cfs_rq_of(*se)->soft_quota_enable == 1) + ret = true; +#endif *se = parent_entity(*se); *pse = parent_entity(*pse); } + + return ret; } #else /* !CONFIG_FAIR_GROUP_SCHED */ @@ -503,9 +520,10 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return NULL; } -static inline void +static inline bool find_matching_se(struct sched_entity **se, struct sched_entity **pse) { + return false; } #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -5396,6 +5414,14 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) */ cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); + +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA) && cfs_rq->tg->soft_quota == 1) { + list_add(&cfs_rq->soft_quota_throttled_list, + &per_cpu(soft_quota_throttled_cfs_rq, cpu_of(rq))); + } +#endif + return true; } @@ -5414,6 +5440,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) se = cfs_rq->tg->se[cpu_of(rq)]; +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA)) + list_del_init(&cfs_rq->soft_quota_throttled_list); +#endif + #ifdef CONFIG_QOS_SCHED /* * if this cfs_rq throttled by qos, not need unthrottle it. @@ -5531,6 +5562,16 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) struct rq_flags rf; rq_lock_irqsave(rq, &rf); + +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA) && cfs_rq->soft_quota_enable == 1) { + if (cfs_rq->runtime_remaining > 0) + cfs_rq->runtime_remaining = 0; + + cfs_rq->soft_quota_enable = 0; + } +#endif + if (!cfs_rq_throttled(cfs_rq)) goto next; @@ -5573,6 +5614,17 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) rcu_read_unlock(); } +#ifdef CONFIG_SCHED_SOFT_QUOTA +static inline void init_tg_sum_soft_runtime(struct cfs_bandwidth *cfs_b) +{ + unsigned int cpu; + struct task_group *tg = container_of(cfs_b, struct task_group, cfs_bandwidth); + + for_each_possible_cpu(cpu) + tg->cfs_rq[cpu]->sum_soft_runtime = 0; +} +#endif + /* * Responsible for refilling a task_group's bandwidth and unthrottling its * cfs_rqs as appropriate. If there has been no activity within the last @@ -5590,6 +5642,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA)) + init_tg_sum_soft_runtime(cfs_b); +#endif + /* Refill extra burst quota even if cfs_b->idle */ __refill_cfs_bandwidth_runtime(cfs_b); @@ -5898,6 +5955,9 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) #ifdef CONFIG_QOS_SCHED INIT_LIST_HEAD(&cfs_rq->qos_throttled_list); #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA + INIT_LIST_HEAD(&cfs_rq->soft_quota_throttled_list); +#endif } void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -8536,6 +8596,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; int next_buddy_marked = 0; + bool ret = 0; if (unlikely(se == pse)) return; @@ -8590,7 +8651,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) return; - find_matching_se(&se, &pse); + ret = find_matching_se(&se, &pse); + +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA) && ret) + goto preempt; +#endif + update_curr(cfs_rq_of(se)); BUG_ON(!pse); if (wakeup_preempt_entity(se, pse) == 1) { @@ -13823,6 +13890,9 @@ static void task_change_group_fair(struct task_struct *p, int type) void free_fair_sched_group(struct task_group *tg) { int i; +#ifdef CONFIG_SCHED_SOFT_QUOTA + struct cfs_rq *cfs_rq; +#endif destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); destroy_auto_affinity(tg); @@ -13831,6 +13901,12 @@ void free_fair_sched_group(struct task_group *tg) #ifdef CONFIG_QOS_SCHED if (tg->cfs_rq && tg->cfs_rq[i]) unthrottle_qos_sched_group(tg->cfs_rq[i]); +#endif +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (tg->cfs_rq && tg->cfs_rq[i]) { + cfs_rq = tg->cfs_rq[i]; + list_del_init(&cfs_rq->soft_quota_throttled_list); + } #endif if (tg->cfs_rq) kfree(tg->cfs_rq[i]); @@ -14209,13 +14285,20 @@ void task_tick_relationship(struct rq *rq, struct task_struct *curr) __init void init_sched_fair_class(void) { -#ifdef CONFIG_QOS_SCHED +#if defined(CONFIG_QOS_SCHED) || defined(CONFIG_SCHED_SOFT_QUOTA) int i; +#endif +#ifdef CONFIG_QOS_SCHED for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(qos_throttled_cfs_rq, i)); #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(soft_quota_throttled_cfs_rq, i)); +#endif + init_sched_numa_icon(); #ifdef CONFIG_SMP @@ -14327,3 +14410,59 @@ int sched_trace_rq_nr_running(struct rq *rq) return rq ? rq->nr_running : -1; } EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); + +#ifdef CONFIG_SCHED_SOFT_QUOTA +unsigned int sysctl_soft_runtime_ratio = 20; +static bool check_soft_runtime(struct task_group *tg, int slice) +{ + int cpu; + u64 sum_soft_runtime = slice; + struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + + if (cfs_b->quota == RUNTIME_INF) + return true; + + for_each_possible_cpu(cpu) + sum_soft_runtime += tg->cfs_rq[cpu]->sum_soft_runtime; + + return sum_soft_runtime < sysctl_soft_runtime_ratio * cfs_b->quota / 100; +} + +bool unthrottle_cfs_rq_soft_quota(struct rq *rq) +{ + int max_cnt = 0; + bool ret = false; + struct cfs_rq *cfs_rq, *tmp_rq; + struct cfs_bandwidth *cfs_b; + int slice = sched_cfs_bandwidth_slice(); + + list_for_each_entry_safe(cfs_rq, tmp_rq, &per_cpu(soft_quota_throttled_cfs_rq, cpu_of(rq)), + soft_quota_throttled_list) { + if (max_cnt++ > 20) + break; + + if (cfs_rq->throttled) { + cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + raw_spin_lock(&cfs_b->lock); + + if (!check_soft_runtime(cfs_rq->tg, slice)) { + raw_spin_unlock(&cfs_b->lock); + continue; + } + + raw_spin_unlock(&cfs_b->lock); + + if (cfs_rq->runtime_remaining + slice > 0) { + cfs_rq->runtime_remaining += slice; + cfs_rq->sum_soft_runtime += slice; + cfs_rq->soft_quota_enable = 1; + unthrottle_cfs_rq(cfs_rq); + ret = true; + break; + } + } + } + + return ret; +} +#endif diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1fd89af55681..c887b0d384ae 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -78,6 +78,10 @@ SCHED_FEAT(KEEP_ON_CORE, false) SCHED_FEAT(PARAL, false) #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +SCHED_FEAT(SOFT_QUOTA, false) +#endif + /* * Issue a WARN when we do multiple update_rq_clock() calls * in a single rq->lock section. Default disabled because the diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 3c6396d61a04..7aa0a1653bd0 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -439,10 +439,25 @@ static struct task_struct *pick_task_idle(struct rq *rq) } #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +int __weak is_sibling_idle(void) +{ + return 0; +} +#endif + struct task_struct *pick_next_task_idle(struct rq *rq) { struct task_struct *next = rq->idle; +#ifdef CONFIG_SCHED_SOFT_QUOTA + if (sched_feat(SOFT_QUOTA)) { + if (unthrottle_cfs_rq_soft_quota(rq) && rq->cfs.nr_running && + is_sibling_idle()) + return pick_next_task_fair(rq, NULL, NULL); + } +#endif + set_next_task_idle(rq, next, true); return next; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fe6342305b0f..9b2779e8fc91 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -508,6 +508,9 @@ struct task_group { #else KABI_RESERVE(4) #endif +#if defined(CONFIG_SCHED_SOFT_QUOTA) + KABI_EXTEND(u64 soft_quota) +#endif }; #ifdef CONFIG_SCHED_STEAL @@ -606,6 +609,10 @@ static inline int init_auto_affinity(struct task_group *tg) static inline void tg_update_affinity_domains(int cpu, int online) {} #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA +extern bool unthrottle_cfs_rq_soft_quota(struct rq *rq); +#endif + #ifdef CONFIG_FAIR_GROUP_SCHED extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); @@ -734,6 +741,11 @@ struct cfs_rq { KABI_RESERVE(3) KABI_RESERVE(4) #endif +#if defined(CONFIG_SCHED_SOFT_QUOTA) + KABI_EXTEND(u64 soft_quota_enable) + KABI_EXTEND(u64 sum_soft_runtime) + KABI_EXTEND(struct list_head soft_quota_throttled_list) +#endif }; static inline int rt_bandwidth_enabled(void) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0b1c13a05332..738d9a4455c1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2831,6 +2831,17 @@ static struct ctl_table kern_table[] = { .extra2 = &one_hundred, }, #endif +#ifdef CONFIG_SCHED_SOFT_QUOTA + { + .procname = "sched_soft_runtime_ratio", + .data = &sysctl_soft_runtime_ratio, + .maxlen = sizeof(sysctl_soft_runtime_ratio), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = &one_hundred, + }, +#endif #ifdef CONFIG_SCHED_STEAL { .procname = "sched_max_steal_count", -- 2.25.1
2 1
0 0
[PATCH OLK-6.10] ALSA: pcm: Fix race of buffer access at PCM OSS layer
by Luo Gengkun 23 Jun '25

23 Jun '25
From: Takashi Iwai <tiwai(a)suse.de> stable inclusion from stable-v6.6.93 commit 74d90875f3d43f3eff0e9861c4701418795d3455 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICGACK Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit 93a81ca0657758b607c3f4ba889ae806be9beb73 upstream. The PCM OSS layer tries to clear the buffer with the silence data at initialization (or reconfiguration) of a stream with the explicit call of snd_pcm_format_set_silence() with runtime->dma_area. But this may lead to a UAF because the accessed runtime->dma_area might be freed concurrently, as it's performed outside the PCM ops. For avoiding it, move the code into the PCM core and perform it inside the buffer access lock, so that it won't be changed during the operation. Reported-by: syzbot+32d4647f551007595173(a)syzkaller.appspotmail.com Closes: https://lore.kernel.org/68164d8e.050a0220.11da1b.0019.GAE@google.com Cc: <stable(a)vger.kernel.org> Link: https://patch.msgid.link/20250516080817.20068-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai(a)suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com> --- include/sound/pcm.h | 2 ++ sound/core/oss/pcm_oss.c | 3 +-- sound/core/pcm_native.c | 11 +++++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2a815373dac1..ed4449cbdf80 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1427,6 +1427,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 728c211142d1..471de2d1b37a 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1085,8 +1085,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index e40de64ec85c..31fc20350fd9 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -703,6 +703,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); } +/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else -- 2.34.1
1 0
0 0
[PATCH OLK-5.10] ALSA: pcm: Fix race of buffer access at PCM OSS layer
by Luo Gengkun 23 Jun '25

23 Jun '25
From: Takashi Iwai <tiwai(a)suse.de> stable inclusion from stable-v5.10.238 commit 8170d8ec4efd0be352c14cb61f374e30fb0c2a25 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICGACK Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit 93a81ca0657758b607c3f4ba889ae806be9beb73 upstream. The PCM OSS layer tries to clear the buffer with the silence data at initialization (or reconfiguration) of a stream with the explicit call of snd_pcm_format_set_silence() with runtime->dma_area. But this may lead to a UAF because the accessed runtime->dma_area might be freed concurrently, as it's performed outside the PCM ops. For avoiding it, move the code into the PCM core and perform it inside the buffer access lock, so that it won't be changed during the operation. Reported-by: syzbot+32d4647f551007595173(a)syzkaller.appspotmail.com Closes: https://lore.kernel.org/68164d8e.050a0220.11da1b.0019.GAE@google.com Cc: <stable(a)vger.kernel.org> Link: https://patch.msgid.link/20250516080817.20068-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai(a)suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com> --- include/sound/pcm.h | 2 ++ sound/core/oss/pcm_oss.c | 3 +-- sound/core/pcm_native.c | 11 +++++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 6554a9f71c62..c573c6a7da12 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1334,6 +1334,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index de6f94bee50b..8eb5fef41dbe 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1078,8 +1078,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 9425fcd30c4c..98bd6fe850d3 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -685,6 +685,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); } +/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else -- 2.34.1
2 1
0 0
[openeuler:OLK-5.10 2978/2978] drivers/net/ethernet/huawei/hinic3/hinic3_irq.c:22:5: warning: no previous prototype for 'hinic3_poll'
by kernel test robot 23 Jun '25

23 Jun '25
tree: https://gitee.com/openeuler/kernel.git OLK-5.10 head: 04dbc107e40be138cc70f7d15d50779f5538f412 commit: ebcedbe6ddb7bfcb756769994b3a796c771b43f5 [2978/2978] net/hinic3: Add Huawei Intelligent Network Card Driver: hinic3 config: x86_64-buildonly-randconfig-002-20250623 (https://download.01.org/0day-ci/archive/20250623/202506231925.HQ2gIERd-lkp@…) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250623/202506231925.HQ2gIERd-lkp@…) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp(a)intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202506231925.HQ2gIERd-lkp@intel.com/ All warnings (new ones prefixed by >>): >> drivers/net/ethernet/huawei/hinic3/hinic3_irq.c:22:5: warning: no previous prototype for 'hinic3_poll' [-Wmissing-prototypes] 22 | int hinic3_poll(struct napi_struct *napi, int budget) | ^~~~~~~~~~~ -- >> drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c:76:6: warning: no previous prototype for 'hinic3_uld_lock_init' [-Wmissing-prototypes] 76 | void hinic3_uld_lock_init(void) | ^~~~~~~~~~~~~~~~~~~~ -- >> drivers/net/ethernet/huawei/hinic3/hinic3_rx.c:740:5: warning: no previous prototype for 'hinic3_run_xdp' [-Wmissing-prototypes] 740 | int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len) | ^~~~~~~~~~~~~~ >> drivers/net/ethernet/huawei/hinic3/hinic3_rx.c:1156:5: warning: no previous prototype for 'rxq_restore' [-Wmissing-prototypes] 1156 | int rxq_restore(struct hinic3_nic_dev *nic_dev, u16 q_id, u16 hw_ci) | ^~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_rx.c:1254:6: warning: no previous prototype for 'rxq_is_normal' [-Wmissing-prototypes] 1254 | bool rxq_is_normal(struct hinic3_rxq *rxq, struct rxq_check_info rxq_info) | ^~~~~~~~~~~~~ -- drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:346:6: warning: no previous prototype for 'hinic3_flush_rx_flow_rule' [-Wmissing-prototypes] 346 | void hinic3_flush_rx_flow_rule(struct hinic3_nic_dev *nic_dev) | ^~~~~~~~~~~~~~~~~~~~~~~~~ >> drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:785:5: warning: no previous prototype for 'hinic3_ethtool_flow_replace' [-Wmissing-prototypes] 785 | int hinic3_ethtool_flow_replace(struct hinic3_nic_dev *nic_dev, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:830:5: warning: no previous prototype for 'hinic3_ethtool_flow_remove' [-Wmissing-prototypes] 830 | int hinic3_ethtool_flow_remove(struct hinic3_nic_dev *nic_dev, u32 location) | ^~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:852:5: warning: no previous prototype for 'hinic3_ethtool_get_flow' [-Wmissing-prototypes] 852 | int hinic3_ethtool_get_flow(const struct hinic3_nic_dev *nic_dev, | ^~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:875:5: warning: no previous prototype for 'hinic3_ethtool_get_all_flows' [-Wmissing-prototypes] 875 | int hinic3_ethtool_get_all_flows(const struct hinic3_nic_dev *nic_dev, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c:893:6: warning: no previous prototype for 'hinic3_validate_channel_setting_in_ntuple' [-Wmissing-prototypes] 893 | bool hinic3_validate_channel_setting_in_ntuple(const struct hinic3_nic_dev *nic_dev, u32 q_num) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- >> drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c:19:5: warning: no previous prototype for 'hinic3_dbg_get_wqe_info' [-Wmissing-prototypes] 19 | int hinic3_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, | ^~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c:60:5: warning: no previous prototype for 'hinic3_dbg_get_sq_info' [-Wmissing-prototypes] 60 | int hinic3_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, | ^~~~~~~~~~~~~~~~~~~~~~ >> drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c:103:5: warning: no previous prototype for 'hinic3_dbg_get_rq_info' [-Wmissing-prototypes] 103 | int hinic3_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, | ^~~~~~~~~~~~~~~~~~~~~~ vim +/hinic3_poll +22 drivers/net/ethernet/huawei/hinic3/hinic3_irq.c 21 > 22 int hinic3_poll(struct napi_struct *napi, int budget) 23 { 24 int tx_pkts, rx_pkts; 25 struct hinic3_irq *irq_cfg = 26 container_of(napi, struct hinic3_irq, napi); 27 struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); 28 29 rx_pkts = hinic3_rx_poll(irq_cfg->rxq, budget); 30 31 tx_pkts = hinic3_tx_poll(irq_cfg->txq, budget); 32 if (tx_pkts >= budget || rx_pkts >= budget) 33 return budget; 34 35 napi_complete(napi); 36 37 hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, 38 HINIC3_MSIX_ENABLE); 39 40 return max(tx_pkts, rx_pkts); 41 } 42 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
1 0
0 0
[PATCH OLK-6.6 v4 0/2] mm/mem_sampling: fix some mem sampling issues
by Ze Zuo 23 Jun '25

23 Jun '25
This patch fix issues in mem_sampling: - Prevent mem_sampling from enabling if SPE initialization fails. - Fix inaccurate sampling during NUMA balancing and DAMON. These changes improve memory sampling accuracy and stability on ARM systems. changes since v3: -- fix bugzilla issue. Ze Zuo (2): mm/mem_sampling: Prevent mem_sampling from being enabled if SPE init failed mm/mem_sampling: Fix inaccurate sampling for NUMA balancing and DAMON drivers/arm/mm_monitor/mm_spe.c | 6 +++--- include/linux/mem_sampling.h | 7 ++++--- mm/mem_sampling.c | 11 +++++++++-- 3 files changed, 16 insertions(+), 8 deletions(-) -- 2.25.1
2 3
0 0
[PATCH OLK-6.6 v2] mm/mem_sampling: add trace event for spe based damon record
by Ze Zuo 23 Jun '25

23 Jun '25
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICH7AS CVE: NA -------------------------------- This patch adds a new DAMON access tracking mechanism using ARM Statistical Profiling Extension (SPE). By parsing memory access samples from SPE, DAMON can infer access patterns with low overhead and higher precision on supported ARM platforms. Signed-off-by: Ze Zuo <zuoze1(a)huawei.com> --- changes since v1: -- fix bugzilla issues. include/trace/events/kmem.h | 21 +++++++++++++++++++++ mm/mem_sampling.c | 1 + 2 files changed, 22 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 28b9679c474d..3e78e6bd6e18 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -523,6 +523,27 @@ TRACE_EVENT(mm_mem_sampling_access_record, __entry->cpuid, __entry->pid) ); #endif /* CONFIG_NUMABALANCING_MEM_SAMPLING */ + +#ifdef CONFIG_DAMON_MEM_SAMPLING +TRACE_EVENT(mm_mem_sampling_damon_record, + + TP_PROTO(u64 vaddr, int pid), + + TP_ARGS(vaddr, pid), + + TP_STRUCT__entry( + __field(u64, vaddr) + __field(int, pid) + ), + + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->pid = pid; + ), + + TP_printk("vaddr=%llx pid=%d", __entry->vaddr, __entry->pid) +); +#endif /* CONFIG_DAMON_MEM_SAMPLING */ #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/mm/mem_sampling.c b/mm/mem_sampling.c index 9ee68e15d1f6..8d79e83e64f0 100644 --- a/mm/mem_sampling.c +++ b/mm/mem_sampling.c @@ -316,6 +316,7 @@ static void damon_mem_sampling_record_cb(struct mem_sampling_record *record) mmput(mm); domon_record.vaddr = record->virt_addr; + trace_mm_mem_sampling_damon_record(record->virt_addr, (pid_t)record->context_id); /* only the proc under monitor now has damon_fifo */ if (damon_fifo) { -- 2.25.1
2 1
0 0
[PATCH OLK-6.6 v3 0/2] mm/mem_sampling: fix some mem sampling issues
by Ze Zuo 23 Jun '25

23 Jun '25
This patch fix issues in mem_sampling: - Prevent mem_sampling from enabling if SPE initialization fails. - Fix inaccurate sampling during NUMA balancing and DAMON. These changes improve memory sampling accuracy and stability on ARM systems. changes since v2: -- remove the feature patch. -- fix bugzilla number. Ze Zuo (2): mm/mem_sampling: Prevent mem_sampling from being enabled if SPE init failed mm/mem_sampling: Fix inaccurate sampling for NUMA balancing and DAMON drivers/arm/mm_monitor/mm_spe.c | 6 +++--- include/linux/mem_sampling.h | 7 ++++--- mm/mem_sampling.c | 11 +++++++++-- 3 files changed, 16 insertions(+), 8 deletions(-) -- 2.25.1
2 3
0 0
[PATCH] mm/mem_sampling: add trace event for spe based damon record
by Ze Zuo 23 Jun '25

23 Jun '25
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/ICH7AS CVE: NA -------------------------------- This patch adds a new DAMON access tracking mechanism using ARM Statistical Profiling Extension (SPE). By parsing memory access samples from SPE, DAMON can infer access patterns with low overhead and higher precision on supported ARM platforms. Signed-off-by: Ze Zuo <zuoze1(a)huawei.com> --- include/trace/events/kmem.h | 21 +++++++++++++++++++++ mm/mem_sampling.c | 1 + 2 files changed, 22 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 28b9679c474d..3e78e6bd6e18 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -523,6 +523,27 @@ TRACE_EVENT(mm_mem_sampling_access_record, __entry->cpuid, __entry->pid) ); #endif /* CONFIG_NUMABALANCING_MEM_SAMPLING */ + +#ifdef CONFIG_DAMON_MEM_SAMPLING +TRACE_EVENT(mm_mem_sampling_damon_record, + + TP_PROTO(u64 vaddr, int pid), + + TP_ARGS(vaddr, pid), + + TP_STRUCT__entry( + __field(u64, vaddr) + __field(int, pid) + ), + + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->pid = pid; + ), + + TP_printk("vaddr=%llx pid=%d", __entry->vaddr, __entry->pid) +); +#endif /* CONFIG_DAMON_MEM_SAMPLING */ #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/mm/mem_sampling.c b/mm/mem_sampling.c index 9ee68e15d1f6..8d79e83e64f0 100644 --- a/mm/mem_sampling.c +++ b/mm/mem_sampling.c @@ -316,6 +316,7 @@ static void damon_mem_sampling_record_cb(struct mem_sampling_record *record) mmput(mm); domon_record.vaddr = record->virt_addr; + trace_mm_mem_sampling_damon_record(record->virt_addr, (pid_t)record->context_id); /* only the proc under monitor now has damon_fifo */ if (damon_fifo) { -- 2.25.1
1 0
0 0
[PATCH OLK-6.6] drm/radeon: fix uninitialized size issue in radeon_vce_cs_parse()
by Zicheng Qu 23 Jun '25

23 Jun '25
From: Nikita Zhandarovich <n.zhandarovich(a)fintech.ru> stable inclusion from stable-v6.6.85 commit 3ce08215cad55c10a6eeeb33d3583b6cfffe3ab8 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBYOFP CVE: CVE-2025-21996 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit dd8689b52a24807c2d5ce0a17cb26dc87f75235c upstream. On the off chance that command stream passed from userspace via ioctl() call to radeon_vce_cs_parse() is weirdly crafted and first command to execute is to encode (case 0x03000001), the function in question will attempt to call radeon_vce_cs_reloc() with size argument that has not been properly initialized. Specifically, 'size' will point to 'tmp' variable before the latter had a chance to be assigned any value. Play it safe and init 'tmp' with 0, thus ensuring that radeon_vce_cs_reloc() will catch an early error in cases like these. Found by Linux Verification Center (linuxtesting.org) with static analysis tool SVACE. Fixes: 2fc5703abda2 ("drm/radeon: check VCE relocation buffer range v3") Signed-off-by: Nikita Zhandarovich <n.zhandarovich(a)fintech.ru> Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com> (cherry picked from commit 2d52de55f9ee7aaee0e09ac443f77855989c6b68) Cc: stable(a)vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Zicheng Qu <quzicheng(a)huawei.com> --- drivers/gpu/drm/radeon/radeon_vce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index d84b780e318c..2eb1636c560e 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -561,7 +561,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; bool destroyed = false, created = false, allocated = false; - uint32_t tmp, handle = 0; + uint32_t tmp = 0, handle = 0; uint32_t *size = &tmp; int i, r = 0; -- 2.34.1
2 1
0 0
[PATCH OLK-5.10] cachefiles: Fix the potential ABBA deadlock issue
by Zizhi Wo 23 Jun '25

23 Jun '25
From: Zizhi Wo <wozizhi(a)huaweicloud.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICFXTV CVE: NA -------------------------------- The current calling process may trigger a potential ABBA deadlock. The calling stack is as follows: do_mkdirat user_path_create filename_create mnt_want_write --lock a inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT) --lock b vfs_write cachefiles_daemon_write cachefiles_daemon_cull cachefiles_cull cachefiles_check_active inode_lock_nested(d_inode(dir), I_MUTEX_PARENT) --lock b cachefiles_remove_object_xattr mnt_want_write --lock a vfs_removexattr This is because there is a problem with the lock order. mnt_want_write() should be called first, and then inode_lock_nested(). Fix the lock order. And delete the redundant code in cachefiles_check_old_object_xattr(), because this part of the process cannot be triggered. Fixes: 2a0beff2d223 ("cachefiles: Fix non-taking of sb_writers around set/removexattr") Signed-off-by: Zizhi Wo <wozizhi(a)huaweicloud.com> --- fs/cachefiles/namei.c | 9 ++++++++- fs/cachefiles/xattr.c | 20 ++++---------------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 6eeef666c609..6fc8c504ae1b 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -990,9 +990,15 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, _enter(",%pd/,%s", dir, filename); + ret = mnt_want_write(cache->mnt); + if (ret < 0) + return ret; + victim = cachefiles_check_active(cache, dir, filename); - if (IS_ERR(victim)) + if (IS_ERR(victim)) { + mnt_drop_write(cache->mnt); return PTR_ERR(victim); + } _debug("victim -> %p %s", victim, d_backing_inode(victim) ? "positive" : "negative"); @@ -1003,6 +1009,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, _debug("victim is cullable"); ret = cachefiles_remove_object_xattr(cache, victim); + mnt_drop_write(cache->mnt); if (ret < 0) goto error_unlock; diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index bac55fc7359e..646a8002d0cf 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -243,7 +243,6 @@ int cachefiles_check_old_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct cachefiles_xattr *auxbuf; - struct cachefiles_cache *cache; unsigned int len = sizeof(struct cachefiles_xattr) + 512; struct dentry *dentry = object->dentry; int ret; @@ -301,17 +300,10 @@ int cachefiles_check_old_object_xattr(struct cachefiles_object *object, BUG(); } - cache = container_of(object->fscache.cache, - struct cachefiles_cache, cache); - /* update the current label */ - ret = mnt_want_write(cache->mnt); - if (ret == 0) { - ret = vfs_setxattr(dentry, cachefiles_xattr_cache, - &auxdata->type, auxdata->len, - XATTR_REPLACE); - mnt_drop_write(cache->mnt); - } + ret = vfs_setxattr(dentry, cachefiles_xattr_cache, + &auxdata->type, auxdata->len, + XATTR_REPLACE); if (ret < 0) { cachefiles_io_error_obj(object, "Can't update xattr on %lu" @@ -393,11 +385,7 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, { int ret; - ret = mnt_want_write(cache->mnt); - if (ret == 0) { - ret = vfs_removexattr(dentry, cachefiles_xattr_cache); - mnt_drop_write(cache->mnt); - } + ret = vfs_removexattr(dentry, cachefiles_xattr_cache); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) ret = 0; -- 2.39.2
2 1
0 0
  • ← Newer
  • 1
  • ...
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • ...
  • 1907
  • Older →

HyperKitty Powered by HyperKitty