[PATCH OLK-5.10] pinmux: Use sequential access to access desc->pinmux data

From: Mukesh Ojha <quic_mojha@quicinc.com> stable inclusion from stable-v6.6.66 commit 2da32aed4a97ca1d70fb8b77926f72f30ce5fb4b category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBIFR8 CVE: CVE-2024-47141 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=... -------------------------------- [ Upstream commit 5a3e85c3c397c781393ea5fb2f45b1f60f8a4e6e ] When two client of the same gpio call pinctrl_select_state() for the same functionality, we are seeing NULL pointer issue while accessing desc->mux_owner. Let's say two processes A, B executing in pin_request() for the same pin and process A updates the desc->mux_usecount but not yet updated the desc->mux_owner while process B see the desc->mux_usecount which got updated by A path and further executes strcmp and while accessing desc->mux_owner it crashes with NULL pointer. Serialize the access to mux related setting with a mutex lock. cpu0 (process A) cpu1(process B) pinctrl_select_state() { pinctrl_select_state() { pin_request() { pin_request() { ... .... } else { desc->mux_usecount++; desc->mux_usecount && strcmp(desc->mux_owner, owner)) { if (desc->mux_usecount > 1) return 0; desc->mux_owner = owner; } } Signed-off-by: Mukesh Ojha <quic_mojha@quicinc.com> Link: https://lore.kernel.org/20241014192930.1539673-1-quic_mojha@quicinc.com Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Conflicts: drivers/pinctrl/pinmux.c [ Scope-based resource management is not merged yet (54da6a092431), so we lock the data-race region by hand ] Signed-off-by: Zhang Kunbo <zhangkunbo@huawei.com> --- drivers/pinctrl/core.c | 3 +++ drivers/pinctrl/core.h | 1 + drivers/pinctrl/pinmux.c | 49 ++++++++++++++++++++++++++++++++++------ 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index b7a6bab4ac01..118c4975c777 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -220,6 +220,9 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, /* Set owner */ pindesc->pctldev = pctldev; +#ifdef CONFIG_PINMUX + mutex_init(&pindesc->mux_lock); +#endif /* Copy basic pin info */ if (pin->name) { diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 840103c40c14..f5e4ca19c17b 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -167,6 +167,7 @@ struct pin_desc { const char *mux_owner; const struct pinctrl_setting_mux *mux_setting; const char *gpio_owner; + struct mutex mux_lock; #endif }; diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index bab888fe3f8e..53fe25e9e864 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -27,6 +27,7 @@ #include <linux/pinctrl/pinmux.h> #include "core.h" #include "pinmux.h" +#include <linux/mutex.h> int pinmux_check_ops(struct pinctrl_dev *pctldev) { @@ -84,15 +85,21 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin) { struct pin_desc *desc = pin_desc_get(pctldev, pin); const struct pinmux_ops *ops = pctldev->desc->pmxops; + bool can; /* Can't inspect pin, assume it can be used */ if (!desc || !ops) return true; - if (ops->strict && desc->mux_usecount) + mutex_lock(&desc->mux_lock); + if (ops->strict && desc->mux_usecount) { + mutex_unlock(&desc->mux_lock); return false; + } - return !(ops->strict && !!desc->gpio_owner); + can = !(ops->strict && !!desc->gpio_owner); + mutex_unlock(&desc->mux_lock); + return can; } /** @@ -123,11 +130,13 @@ static int pin_request(struct pinctrl_dev *pctldev, dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n", pin, desc->name, owner); + mutex_lock(&desc->mux_lock); if ((!gpio_range || ops->strict) && desc->mux_usecount && strcmp(desc->mux_owner, owner)) { dev_err(pctldev->dev, "pin %s already requested by %s; cannot claim for %s\n", desc->name, desc->mux_owner, owner); + mutex_unlock(&desc->mux_lock); goto out; } @@ -135,6 +144,7 @@ static int pin_request(struct pinctrl_dev *pctldev, dev_err(pctldev->dev, "pin %s already requested by %s; cannot claim for %s\n", desc->name, desc->gpio_owner, owner); + mutex_unlock(&desc->mux_lock); goto out; } @@ -142,11 +152,14 @@ static int pin_request(struct pinctrl_dev *pctldev, desc->gpio_owner = owner; } else { desc->mux_usecount++; - if (desc->mux_usecount > 1) + if (desc->mux_usecount > 1) { + mutex_unlock(&desc->mux_lock); return 0; + } desc->mux_owner = owner; } + mutex_unlock(&desc->mux_lock); /* Let each pin increase references to this module */ if (!try_module_get(pctldev->owner)) { @@ -176,6 +189,7 @@ static int pin_request(struct pinctrl_dev *pctldev, out_free_pin: if (status) { + mutex_lock(&desc->mux_lock); if (gpio_range) { desc->gpio_owner = NULL; } else { @@ -183,6 +197,7 @@ static int pin_request(struct pinctrl_dev *pctldev, if (!desc->mux_usecount) desc->mux_owner = NULL; } + mutex_unlock(&desc->mux_lock); } out: if (status) @@ -217,16 +232,22 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, return NULL; } + mutex_lock(&desc->mux_lock); if (!gpio_range) { /* * A pin should not be freed more times than allocated. */ - if (WARN_ON(!desc->mux_usecount)) + if (WARN_ON(!desc->mux_usecount)) { + mutex_unlock(&desc->mux_lock); return NULL; + } desc->mux_usecount--; - if (desc->mux_usecount) + if (desc->mux_usecount) { + mutex_unlock(&desc->mux_lock); return NULL; + } } + mutex_unlock(&desc->mux_lock); /* * If there is no kind of request function for the pin we just assume @@ -237,6 +258,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, else if (ops->free) ops->free(pctldev, pin); + mutex_lock(&desc->mux_lock); if (gpio_range) { owner = desc->gpio_owner; desc->gpio_owner = NULL; @@ -245,6 +267,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, desc->mux_owner = NULL; desc->mux_setting = NULL; } + mutex_unlock(&desc->mux_lock); module_put(pctldev->owner); @@ -457,7 +480,9 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting) pins[i]); continue; } + mutex_lock(&desc->mux_lock); desc->mux_setting = &(setting->data.mux); + mutex_unlock(&desc->mux_lock); } ret = ops->set_mux(pctldev, setting->data.mux.func, @@ -471,8 +496,11 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting) err_set_mux: for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); - if (desc) + if (desc) { + mutex_lock(&desc->mux_lock); desc->mux_setting = NULL; + mutex_unlock(&desc->mux_lock); + } } err_pin_request: /* On error release all taken pins */ @@ -491,6 +519,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting) unsigned num_pins = 0; int i; struct pin_desc *desc; + bool is_equal; if (pctlops->get_group_pins) ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, @@ -516,7 +545,11 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting) pins[i]); continue; } - if (desc->mux_setting == &(setting->data.mux)) { + mutex_lock(&desc->mux_lock); + is_equal = (desc->mux_setting == &(setting->data.mux)); + mutex_unlock(&desc->mux_lock); + + if (is_equal) { pin_free(pctldev, pins[i], NULL); } else { const char *gname; @@ -608,6 +641,7 @@ static int pinmux_pins_show(struct seq_file *s, void *what) if (desc == NULL) continue; + mutex_lock(&desc->mux_lock); if (desc->mux_owner && !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) is_hog = true; @@ -642,6 +676,7 @@ static int pinmux_pins_show(struct seq_file *s, void *what) desc->mux_setting->group)); else seq_putc(s, '\n'); + mutex_unlock(&desc->mux_lock); } mutex_unlock(&pctldev->mutex); -- 2.34.1

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/15046 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/D... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/15046 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/D...
participants (2)
-
patchwork bot
-
Zhang Kunbo