mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

June 2024

  • 84 participants
  • 1085 discussions
[PATCH OLK-5.10] cvm_tsi: enhance security for cvm tsi
by Shengjie Li 09 Jun '24

09 Jun '24
virtcca inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9Q85J -------------------------------- enhance security for Confidential cVM tsi: Signed-off-by: Shengjie Li <lishengjie12(a)huawei.com> --- arch/arm64/include/asm/cvm_smc.h | 22 +++---- arch/arm64/include/uapi/asm/cvm_tsi.h | 11 ++-- arch/arm64/kernel/cvm_tsi.c | 89 ++++++++++++++------------- 3 files changed, 64 insertions(+), 58 deletions(-) diff --git a/arch/arm64/include/asm/cvm_smc.h b/arch/arm64/include/asm/cvm_smc.h index c4d56333e3..91688bc169 100644 --- a/arch/arm64/include/asm/cvm_smc.h +++ b/arch/arm64/include/asm/cvm_smc.h @@ -124,30 +124,30 @@ static inline unsigned long tsi_measurement_read(struct cvm_measurement *cvm_mea return res.a0; } -static inline unsigned long tsi_attestation_token_init(struct cvm_attestation_cmd *attest_cmd) +static inline unsigned long tsi_attestation_token_init(unsigned char *challenge) { struct arm_smccc_res res; - unsigned char *challenge; + unsigned char *buf; - challenge = kmalloc(CHALLENGE_SIZE, GFP_KERNEL); - if (!challenge) + buf = kmalloc(CHALLENGE_SIZE, GFP_KERNEL); + if (!buf) return -ENOMEM; - memcpy(challenge, attest_cmd->challenge, CHALLENGE_SIZE); + memcpy(buf, challenge, CHALLENGE_SIZE); - arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_INIT, virt_to_phys(challenge), &res); - kfree(challenge); + arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_INIT, virt_to_phys(buf), &res); + kfree(buf); return res.a0; } -static inline unsigned long tsi_attestation_token_continue(struct cvm_attestation_cmd *attest_cmd) +static inline unsigned long tsi_attestation_token_continue(struct cvm_token_granule *token_granule) { struct arm_smccc_res res; - arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_CONTINUE, virt_to_phys(attest_cmd->granule_ipa), - attest_cmd->offset, attest_cmd->size, &res); + arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_CONTINUE, virt_to_phys(token_granule->ipa), + token_granule->offset, token_granule->size, &res); - attest_cmd->num_wr_bytes = res.a1; + token_granule->num_wr_bytes = res.a1; return res.a0; } diff --git a/arch/arm64/include/uapi/asm/cvm_tsi.h b/arch/arm64/include/uapi/asm/cvm_tsi.h index 43e17a0da0..1ed4386db2 100644 --- a/arch/arm64/include/uapi/asm/cvm_tsi.h +++ b/arch/arm64/include/uapi/asm/cvm_tsi.h @@ -55,10 +55,13 @@ struct cvm_measurement_extend { struct cvm_attestation_cmd { unsigned char challenge[CHALLENGE_SIZE]; /* input: challenge value */ - unsigned long token_size; /* return: challenge value */ - void *granule_head; - void *granule_ipa; /* IPA of the Granule to which the token will be written */ - unsigned long granule_count; + unsigned long token_size; /* return: token size */ +}; + +struct cvm_token_granule { + void *head; + void *ipa; /* IPA of the Granule to which the token will be written */ + unsigned long count; unsigned long offset; /* Offset within Granule to start of buffer in bytes */ unsigned long size; /* Size of buffer in bytes */ unsigned long num_wr_bytes; /* Number of bytes written to buffer */ diff --git a/arch/arm64/kernel/cvm_tsi.c b/arch/arm64/kernel/cvm_tsi.c index b48e5b17f4..252fe5352a 100644 --- a/arch/arm64/kernel/cvm_tsi.c +++ b/arch/arm64/kernel/cvm_tsi.c @@ -4,6 +4,7 @@ #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/preempt.h> +#include <asm/cvm_guest.h> #include <asm/cvm_smc.h> #include <asm/cvm_tsi.h> @@ -16,20 +17,19 @@ struct attestation_token { static struct attestation_token token; +static DEFINE_SPINLOCK(lock); + static long tmm_tsi_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -static int tmm_tsi_release(struct inode *inode, struct file *file); static ssize_t tmm_token_read(struct file *file, char __user *user_buffer, size_t size, loff_t *offset); static int tmm_get_tsi_version(struct cvm_tsi_version __user *arg); -static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg, - struct attestation_token *attest_token); +static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg); static int tmm_get_device_cert(struct cca_device_cert __user *arg); static const struct file_operations tmm_tsi_fops = { .owner = THIS_MODULE, .read = tmm_token_read, - .release = tmm_tsi_release, .unlocked_ioctl = tmm_tsi_ioctl }; @@ -44,6 +44,9 @@ static int __init tmm_tsi_init(void) unsigned long ver; int ret; + if (!is_cvm_world()) + return -EIO; + ver = tsi_get_version(); if (ver == SMCCC_RET_NOT_SUPPORTED) { pr_err("tmm_tsi: SMC return not supported!\n"); @@ -56,6 +59,11 @@ static int __init tmm_tsi_init(void) return ret; } + /* Allocate a large memory */ + token.buf = kmalloc(GRANULE_SIZE * MAX_TOKEN_GRANULE_PAGE, GFP_KERNEL); + if (!token.buf) + return -ENOMEM; + pr_warn("tmm_tsi: module loaded (version %lu.%lu).\n", TSI_ABI_VERSION_GET_MAJOR(ver), TSI_ABI_VERSION_GET_MINOR(ver)); @@ -65,8 +73,10 @@ static int __init tmm_tsi_init(void) static void __exit tmm_tsi_exit(void) { - if (token.buf != NULL) + if (token.buf != NULL) { + memset(token.buf, 0, GRANULE_SIZE * MAX_TOKEN_GRANULE_PAGE); kfree(token.buf); + } misc_deregister(&ioctl_dev); pr_warn("tmm_tsi: module unloaded.\n"); } @@ -80,7 +90,7 @@ static long tmm_tsi_ioctl(struct file *file, unsigned int cmd, unsigned long arg ret = tmm_get_tsi_version((struct cvm_tsi_version *)arg); break; case TMM_GET_ATTESTATION_TOKEN: - ret = tmm_get_attestation_token((struct cvm_attestation_cmd *)arg, &token); + ret = tmm_get_attestation_token((struct cvm_attestation_cmd *)arg); break; case TMM_GET_DEVICE_CERT: ret = tmm_get_device_cert((struct cca_device_cert *)arg); @@ -99,28 +109,25 @@ static ssize_t tmm_token_read(struct file *file, char __user *user_buffer, int ret; int to_copy; - if (*offset >= token.size) + spin_lock(&lock); + if (*offset >= token.size) { + spin_unlock(&lock); return 0; + } to_copy = min((int)size, (int)(token.size - *offset)); ret = copy_to_user(user_buffer, token.buf + *offset, to_copy); if (ret) { pr_err("tmm_tsi: copy token to user failed (%d)!\n", ret); + spin_unlock(&lock); return -1; } *offset += to_copy; + spin_unlock(&lock); return to_copy; } -static int tmm_tsi_release(struct inode *inode, struct file *file) -{ - if (token.buf != NULL) { - memset(token.buf, 0, GRANULE_SIZE * MAX_TOKEN_GRANULE_PAGE); - kfree(token.buf); - } - return 0; -} static int tmm_get_tsi_version(struct cvm_tsi_version __user *arg) { @@ -141,65 +148,61 @@ static int tmm_get_tsi_version(struct cvm_tsi_version __user *arg) return 0; } -static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg, - struct attestation_token *attest_token) +static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg) { unsigned long ret; - struct cvm_attestation_cmd cmd = {0}; + struct cvm_token_granule token_granule = {0}; + unsigned char challenge[CHALLENGE_SIZE]; - ret = copy_from_user(&(cmd.challenge), &(arg->challenge), sizeof(cmd.challenge)); + ret = copy_from_user(challenge, &(arg->challenge), CHALLENGE_SIZE); if (ret) { pr_err("tmm_tsi: copy data from user failed (%lu)!\n", ret); return -EFAULT; } - /* Allocate a large memory */ - attest_token->buf = kmalloc(GRANULE_SIZE * MAX_TOKEN_GRANULE_PAGE, GFP_KERNEL); - if (!attest_token->buf) - return -ENOMEM; - cmd.granule_head = attest_token->buf; - cmd.granule_ipa = cmd.granule_head; + spin_lock(&lock); + token_granule.head = token.buf; + token_granule.ipa = token_granule.head; - /* preempt_disable(); */ - - ret = tsi_attestation_token_init(&cmd); + ret = tsi_attestation_token_init(challenge); if (ret) { pr_err("tmm_tsi: tsi call tsi_attestation_token_init failed (%lu)!\n", ret); + spin_unlock(&lock); return -EIO; } do { /* Retrieve one Granule of data per loop iteration */ - cmd.granule_ipa = cmd.granule_head + - (unsigned long)(cmd.granule_count * GRANULE_SIZE); - cmd.offset = 0; + token_granule.ipa = token_granule.head + + (unsigned long)(token_granule.count * GRANULE_SIZE); + token_granule.offset = 0; do { /* Retrieve sub-Granule chunk of data per loop iteration */ - cmd.size = GRANULE_SIZE - cmd.offset; - ret = tsi_attestation_token_continue(&cmd); - cmd.offset += cmd.num_wr_bytes; - } while (ret == TSI_INCOMPLETE && cmd.offset < GRANULE_SIZE); + token_granule.size = GRANULE_SIZE - token_granule.offset; + ret = tsi_attestation_token_continue(&token_granule); + token_granule.offset += token_granule.num_wr_bytes; + } while (ret == TSI_INCOMPLETE && token_granule.offset < GRANULE_SIZE); - cmd.granule_count += 1; - if (cmd.granule_count >= MAX_TOKEN_GRANULE_PAGE && ret == TSI_INCOMPLETE) { + token_granule.count += 1; + if (token_granule.count >= MAX_TOKEN_GRANULE_PAGE && ret == TSI_INCOMPLETE) { pr_err("tmm_tsi: macro MAX_TOKEN_GRANULE_PAGE (%d) is too small!\n", MAX_TOKEN_GRANULE_PAGE); + spin_unlock(&lock); return -ENOMEM; } } while (ret == TSI_INCOMPLETE); - /* preempt_enable(); */ - /* Send to user space the total size of the token */ - cmd.granule_count = cmd.granule_count - 1; - cmd.token_size = (unsigned long)(GRANULE_SIZE * cmd.granule_count) + cmd.offset; - attest_token->size = cmd.token_size; + token_granule.count = token_granule.count - 1; + token.size = (unsigned long)(GRANULE_SIZE * token_granule.count) + token_granule.offset; - ret = copy_to_user(&(arg->token_size), &(cmd.token_size), sizeof(cmd.token_size)); + ret = copy_to_user(&(arg->token_size), &(token.size), sizeof(token.size)); if (ret) { pr_err("tmm_tsi: copy data to user failed (%lu)!\n", ret); + spin_unlock(&lock); return -EFAULT; } + spin_unlock(&lock); return 0; } -- 2.37.2.windows.2
2 1
0 0
[PATCH openEuler-22.03-LTS-SP1] net: core: reject skb_copy(_expand) for fraglist GSO skbs
by Ziyang Xuan 09 Jun '24

09 Jun '24
From: Felix Fietkau <nbd(a)nbd.name> stable inclusion from stable-v5.10.217 commit faa83a7797f06cefed86731ba4baa3b4dfdc06c1 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9U9P7 CVE: CVE-2024-36929 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit d091e579b864fa790dd6a0cd537a22c383126681 ] SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become invalid. Return NULL if such an skb is passed to skb_copy or skb_copy_expand, in order to prevent a crash on a potential later call to skb_gso_segment. Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.") Signed-off-by: Felix Fietkau <nbd(a)nbd.name> Signed-off-by: David S. Miller <davem(a)davemloft.net> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/core/skbuff.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5197b286ef18d..120290a9d2078 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1534,11 +1534,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { - int headerlen = skb_headroom(skb); - unsigned int size = skb_end_offset(skb) + skb->data_len; - struct sk_buff *n = __alloc_skb(size, gfp_mask, - skb_alloc_rx_flag(skb), NUMA_NO_NODE); + struct sk_buff *n; + unsigned int size; + int headerlen; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + headerlen = skb_headroom(skb); + size = skb_end_offset(skb) + skb->data_len; + n = __alloc_skb(size, gfp_mask, + skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; @@ -1799,12 +1805,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, /* * Allocate the copy buffer */ - struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, - gfp_mask, skb_alloc_rx_flag(skb), - NUMA_NO_NODE); - int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; + struct sk_buff *n; + int oldheadroom; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + oldheadroom = skb_headroom(skb); + n = __alloc_skb(newheadroom + skb->len + newtailroom, + gfp_mask, skb_alloc_rx_flag(skb), + NUMA_NO_NODE); if (!n) return NULL; -- 2.25.1
2 1
0 0
[PATCH OLK-5.10] net: core: reject skb_copy(_expand) for fraglist GSO skbs
by Ziyang Xuan 09 Jun '24

09 Jun '24
From: Felix Fietkau <nbd(a)nbd.name> stable inclusion from stable-v5.10.217 commit faa83a7797f06cefed86731ba4baa3b4dfdc06c1 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9U9P7 CVE: CVE-2024-36929 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit d091e579b864fa790dd6a0cd537a22c383126681 ] SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become invalid. Return NULL if such an skb is passed to skb_copy or skb_copy_expand, in order to prevent a crash on a potential later call to skb_gso_segment. Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.") Signed-off-by: Felix Fietkau <nbd(a)nbd.name> Signed-off-by: David S. Miller <davem(a)davemloft.net> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/core/skbuff.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 779a860e00731..de0229b8a920c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1540,11 +1540,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { - int headerlen = skb_headroom(skb); - unsigned int size = skb_end_offset(skb) + skb->data_len; - struct sk_buff *n = __alloc_skb(size, gfp_mask, - skb_alloc_rx_flag(skb), NUMA_NO_NODE); + struct sk_buff *n; + unsigned int size; + int headerlen; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + headerlen = skb_headroom(skb); + size = skb_end_offset(skb) + skb->data_len; + n = __alloc_skb(size, gfp_mask, + skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; @@ -1856,12 +1862,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, /* * Allocate the copy buffer */ - struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, - gfp_mask, skb_alloc_rx_flag(skb), - NUMA_NO_NODE); - int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; + struct sk_buff *n; + int oldheadroom; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + oldheadroom = skb_headroom(skb); + n = __alloc_skb(newheadroom + skb->len + newtailroom, + gfp_mask, skb_alloc_rx_flag(skb), + NUMA_NO_NODE); if (!n) return NULL; -- 2.25.1
2 1
0 0
[PATCH OLK-6.6] net: core: reject skb_copy(_expand) for fraglist GSO skbs
by Ziyang Xuan 09 Jun '24

09 Jun '24
From: Felix Fietkau <nbd(a)nbd.name> stable inclusion from stable-v6.6.31 commit cfe34d86ef9765c388f145039006bb79b6c81ac6 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9U9P7 CVE: CVE-2024-36929 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit d091e579b864fa790dd6a0cd537a22c383126681 ] SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become invalid. Return NULL if such an skb is passed to skb_copy or skb_copy_expand, in order to prevent a crash on a potential later call to skb_gso_segment. Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.") Signed-off-by: Felix Fietkau <nbd(a)nbd.name> Signed-off-by: David S. Miller <davem(a)davemloft.net> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/core/skbuff.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 60876262b3fb3..f0a9ef1aeaa29 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1971,11 +1971,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { - int headerlen = skb_headroom(skb); - unsigned int size = skb_end_offset(skb) + skb->data_len; - struct sk_buff *n = __alloc_skb(size, gfp_mask, - skb_alloc_rx_flag(skb), NUMA_NO_NODE); + struct sk_buff *n; + unsigned int size; + int headerlen; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + headerlen = skb_headroom(skb); + size = skb_end_offset(skb) + skb->data_len; + n = __alloc_skb(size, gfp_mask, + skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; @@ -2303,12 +2309,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, /* * Allocate the copy buffer */ - struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, - gfp_mask, skb_alloc_rx_flag(skb), - NUMA_NO_NODE); - int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; + struct sk_buff *n; + int oldheadroom; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + oldheadroom = skb_headroom(skb); + n = __alloc_skb(newheadroom + skb->len + newtailroom, + gfp_mask, skb_alloc_rx_flag(skb), + NUMA_NO_NODE); if (!n) return NULL; -- 2.25.1
2 1
0 0
[openeuler:OLK-5.10] BUILD SUCCESS 9e1339f3705f9326a4594db2783ca5d7522d1a9c
by kernel test robot 09 Jun '24

09 Jun '24
tree/branch: https://gitee.com/openeuler/kernel.git OLK-5.10 branch HEAD: 9e1339f3705f9326a4594db2783ca5d7522d1a9c !8684 tcp: properly terminate timers for kernel sockets Warning ids grouped by kconfigs: clang_recent_errors `-- x86_64-allnoconfig |-- drivers-arm-spe-spe.c:linux-perf-arm_pmu.h-is-included-more-than-once. |-- drivers-net-ethernet-yunsilicon-xsc-net-main.c:common-qp.h-is-included-more-than-once. |-- drivers-ub-urma-ubcore-ubcore_cdev_file.c:linux-version.h-not-needed. |-- drivers-ub-urma-ubcore-ubcore_device.c:linux-version.h-not-needed. |-- drivers-ub-urma-ubcore-ubcore_genl.c:linux-version.h-not-needed. |-- drivers-ub-urma-ubcore-ubcore_genl_admin.c:linux-version.h-not-needed. |-- drivers-ub-urma-ubcore-ubcore_uvs_cmd.c:ubcore_device.h-is-included-more-than-once. `-- drivers-ub-urma-uburma-uburma_mmap.c:linux-version.h-not-needed. elapsed time: 1237m configs tested: 35 configs skipped: 131 The following configs have been built successfully. More configs may be tested in the coming days. tested configs: arm64 allmodconfig clang arm64 allnoconfig gcc arm64 defconfig gcc arm64 randconfig-001-20240609 gcc arm64 randconfig-002-20240609 clang arm64 randconfig-003-20240609 gcc arm64 randconfig-004-20240609 gcc x86_64 allnoconfig clang x86_64 allyesconfig clang x86_64 buildonly-randconfig-001-20240609 clang x86_64 buildonly-randconfig-002-20240609 clang x86_64 buildonly-randconfig-003-20240609 clang x86_64 buildonly-randconfig-004-20240609 gcc x86_64 buildonly-randconfig-005-20240609 clang x86_64 buildonly-randconfig-006-20240609 gcc x86_64 defconfig gcc x86_64 randconfig-001-20240609 gcc x86_64 randconfig-002-20240609 clang x86_64 randconfig-003-20240609 gcc x86_64 randconfig-004-20240609 gcc x86_64 randconfig-005-20240609 clang x86_64 randconfig-006-20240609 clang x86_64 randconfig-011-20240609 gcc x86_64 randconfig-012-20240609 gcc x86_64 randconfig-013-20240609 clang x86_64 randconfig-014-20240609 gcc x86_64 randconfig-015-20240609 clang x86_64 randconfig-016-20240609 gcc x86_64 randconfig-071-20240609 clang x86_64 randconfig-072-20240609 clang x86_64 randconfig-073-20240609 gcc x86_64 randconfig-074-20240609 clang x86_64 randconfig-075-20240609 gcc x86_64 randconfig-076-20240609 gcc x86_64 rhel-8.3-rust clang -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
1 0
0 0
[PATCH OLK-6.6 V1] drm/vmwgfx: Fix invalid reads in fence signaled events
by Cheng Yu 09 Jun '24

09 Jun '24
From: Zack Rusin <zack.rusin(a)broadcom.com> mainline inclusion from mainline-v6.9-rc7 commit a37ef7613c00f2d72c8fc08bd83fb6cc76926c8c category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9UNUO CVE: CVE-2024-36960 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- Correctly set the length of the drm_event to the size of the structure that's actually used. The length of the drm_event was set to the parent structure instead of to the drm_vmw_event_fence which is supposed to be read. drm_read uses the length parameter to copy the event to the user space thus resuling in oob reads. Signed-off-by: Zack Rusin <zack.rusin(a)broadcom.com> Fixes: 8b7de6aa8468 ("vmwgfx: Rework fence event action") Reported-by: zdi-disclosures(a)trendmicro.com # ZDI-CAN-23566 Cc: David Airlie <airlied(a)gmail.com> CC: Daniel Vetter <daniel(a)ffwll.ch> Cc: Zack Rusin <zack.rusin(a)broadcom.com> Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list(a)broadcom.com> Cc: dri-devel(a)lists.freedesktop.org Cc: linux-kernel(a)vger.kernel.org Cc: <stable(a)vger.kernel.org> # v3.4+ Reviewed-by: Maaz Mombasawala <maaz.mombasawala(a)broadcom.com> Reviewed-by: Martin Krastev <martin.krastev(a)broadcom.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425192748.1761522-1-zack… Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 2a0cda324703..5efc6a766f64 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); -- 2.25.1
2 1
0 0
[PATCH openEuler-22.03-LTS-SP1 V1] drm/vmwgfx: Fix invalid reads in fence signaled events
by Cheng Yu 09 Jun '24

09 Jun '24
From: Zack Rusin <zack.rusin(a)broadcom.com> mainline inclusion from mainline-v6.9-rc7 commit a37ef7613c00f2d72c8fc08bd83fb6cc76926c8c category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9UNUO CVE: CVE-2024-36960 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- Correctly set the length of the drm_event to the size of the structure that's actually used. The length of the drm_event was set to the parent structure instead of to the drm_vmw_event_fence which is supposed to be read. drm_read uses the length parameter to copy the event to the user space thus resuling in oob reads. Signed-off-by: Zack Rusin <zack.rusin(a)broadcom.com> Fixes: 8b7de6aa8468 ("vmwgfx: Rework fence event action") Reported-by: zdi-disclosures(a)trendmicro.com # ZDI-CAN-23566 Cc: David Airlie <airlied(a)gmail.com> CC: Daniel Vetter <daniel(a)ffwll.ch> Cc: Zack Rusin <zack.rusin(a)broadcom.com> Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list(a)broadcom.com> Cc: dri-devel(a)lists.freedesktop.org Cc: linux-kernel(a)vger.kernel.org Cc: <stable(a)vger.kernel.org> # v3.4+ Reviewed-by: Maaz Mombasawala <maaz.mombasawala(a)broadcom.com> Reviewed-by: Martin Krastev <martin.krastev(a)broadcom.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425192748.1761522-1-zack… Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 8bc41ec97d71..6bacdb7583df 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1066,7 +1066,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); -- 2.25.1
2 1
0 0
[PATCH openEuler-1.0-LTS V1] drm/vmwgfx: Fix invalid reads in fence signaled events
by Cheng Yu 09 Jun '24

09 Jun '24
From: Zack Rusin <zack.rusin(a)broadcom.com> mainline inclusion from mainline-v6.9-rc7 commit a37ef7613c00f2d72c8fc08bd83fb6cc76926c8c category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9UNUO CVE: CVE-2024-36960 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- Correctly set the length of the drm_event to the size of the structure that's actually used. The length of the drm_event was set to the parent structure instead of to the drm_vmw_event_fence which is supposed to be read. drm_read uses the length parameter to copy the event to the user space thus resuling in oob reads. Signed-off-by: Zack Rusin <zack.rusin(a)broadcom.com> Fixes: 8b7de6aa8468 ("vmwgfx: Rework fence event action") Reported-by: zdi-disclosures(a)trendmicro.com # ZDI-CAN-23566 Cc: David Airlie <airlied(a)gmail.com> CC: Daniel Vetter <daniel(a)ffwll.ch> Cc: Zack Rusin <zack.rusin(a)broadcom.com> Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list(a)broadcom.com> Cc: dri-devel(a)lists.freedesktop.org Cc: linux-kernel(a)vger.kernel.org Cc: <stable(a)vger.kernel.org> # v3.4+ Reviewed-by: Maaz Mombasawala <maaz.mombasawala(a)broadcom.com> Reviewed-by: Martin Krastev <martin.krastev(a)broadcom.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425192748.1761522-1-zack… Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 72a75316d472..e1b4f9612f5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1064,7 +1064,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); -- 2.25.1
2 1
0 0
[PATCH openEuler-1.0-LTS V1] drm/vmwgfx: Fix invalid reads in fence signaled events
by Cheng Yu 09 Jun '24

09 Jun '24
From: Zack Rusin <zack.rusin(a)broadcom.com> mainline inclusion from mainline-v6.9-rc7 commit a37ef7613c00f2d72c8fc08bd83fb6cc76926c8c category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9UNUO CVE: CVE-2024-36960 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- Correctly set the length of the drm_event to the size of the structure that's actually used. The length of the drm_event was set to the parent structure instead of to the drm_vmw_event_fence which is supposed to be read. drm_read uses the length parameter to copy the event to the user space thus resuling in oob reads. Signed-off-by: Zack Rusin <zack.rusin(a)broadcom.com> Fixes: 8b7de6aa8468 ("vmwgfx: Rework fence event action") Reported-by: zdi-disclosures(a)trendmicro.com # ZDI-CAN-23566 Cc: David Airlie <airlied(a)gmail.com> CC: Daniel Vetter <daniel(a)ffwll.ch> Cc: Zack Rusin <zack.rusin(a)broadcom.com> Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list(a)broadcom.com> Cc: dri-devel(a)lists.freedesktop.org Cc: linux-kernel(a)vger.kernel.org Cc: <stable(a)vger.kernel.org> # v3.4+ Reviewed-by: Maaz Mombasawala <maaz.mombasawala(a)broadcom.com> Reviewed-by: Martin Krastev <martin.krastev(a)broadcom.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240425192748.1761522-1-zack… Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 72a75316d472..e1b4f9612f5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1064,7 +1064,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); -- 2.25.1
2 1
0 0
[PATCH OLK-6.6] xdp: use flags field to disambiguate broadcast redirect
by Ziyang Xuan 09 Jun '24

09 Jun '24
From: Toke Høiland-Jørgensen <toke(a)redhat.com> stable inclusion from stable-v6.6.31 commit e22e25820fa04ea5eaac4ef7ee200e9923f466a4 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9UM46 CVE: CVE-2024-36937 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit 5bcf0dcbf9066348058b88a510c57f70f384c92c ] When redirecting a packet using XDP, the bpf_redirect_map() helper will set up the redirect destination information in struct bpf_redirect_info (using the __bpf_xdp_redirect_map() helper function), and the xdp_do_redirect() function will read this information after the XDP program returns and pass the frame on to the right redirect destination. When using the BPF_F_BROADCAST flag to do multicast redirect to a whole map, __bpf_xdp_redirect_map() sets the 'map' pointer in struct bpf_redirect_info to point to the destination map to be broadcast. And xdp_do_redirect() reacts to the value of this map pointer to decide whether it's dealing with a broadcast or a single-value redirect. However, if the destination map is being destroyed before xdp_do_redirect() is called, the map pointer will be cleared out (by bpf_clear_redirect_map()) without waiting for any XDP programs to stop running. This causes xdp_do_redirect() to think that the redirect was to a single target, but the target pointer is also NULL (since broadcast redirects don't have a single target), so this causes a crash when a NULL pointer is passed to dev_map_enqueue(). To fix this, change xdp_do_redirect() to react directly to the presence of the BPF_F_BROADCAST flag in the 'flags' value in struct bpf_redirect_info to disambiguate between a single-target and a broadcast redirect. And only read the 'map' pointer if the broadcast flag is set, aborting if that has been cleared out in the meantime. This prevents the crash, while keeping the atomic (cmpxchg-based) clearing of the map pointer itself, and without adding any more checks in the non-broadcast fast path. Fixes: e624d4ed4aa8 ("xdp: Extend xdp_redirect_map with broadcast support") Reported-and-tested-by: syzbot+af9492708df9797198d6(a)syzkaller.appspotmail.com Signed-off-by: Toke Høiland-Jørgensen <toke(a)redhat.com> Acked-by: Stanislav Fomichev <sdf(a)google.com> Reviewed-by: Hangbin Liu <liuhangbin(a)gmail.com> Acked-by: Jesper Dangaard Brouer <hawk(a)kernel.org> Link: https://lore.kernel.org/r/20240418071840.156411-1-toke@redhat.com Signed-off-by: Martin KaFai Lau <martin.lau(a)kernel.org> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/core/filter.c | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 81cd6493c7d10..6ec353bf36f38 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4334,10 +4334,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; + u32 flags = ri->flags; struct bpf_map *map; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ + ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (unlikely(!xdpf)) { @@ -4349,11 +4351,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: - map = READ_ONCE(ri->map); - if (unlikely(map)) { + if (unlikely(flags & BPF_F_BROADCAST)) { + map = READ_ONCE(ri->map); + + /* The map pointer is cleared when the map is being torn + * down by bpf_clear_redirect_map() + */ + if (unlikely(!map)) { + err = -ENOENT; + break; + } + WRITE_ONCE(ri->map, NULL); err = dev_map_enqueue_multi(xdpf, dev, map, - ri->flags & BPF_F_EXCLUDE_INGRESS); + flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_enqueue(fwd, xdpf, dev); } @@ -4416,9 +4427,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); static int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, - void *fwd, - enum bpf_map_type map_type, u32 map_id) + struct bpf_prog *xdp_prog, void *fwd, + enum bpf_map_type map_type, u32 map_id, + u32 flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map; @@ -4428,11 +4439,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: - map = READ_ONCE(ri->map); - if (unlikely(map)) { + if (unlikely(flags & BPF_F_BROADCAST)) { + map = READ_ONCE(ri->map); + + /* The map pointer is cleared when the map is being torn + * down by bpf_clear_redirect_map() + */ + if (unlikely(!map)) { + err = -ENOENT; + break; + } + WRITE_ONCE(ri->map, NULL); err = dev_map_redirect_multi(dev, skb, xdp_prog, map, - ri->flags & BPF_F_EXCLUDE_INGRESS); + flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_generic_redirect(fwd, skb, xdp_prog); } @@ -4469,9 +4489,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; + u32 flags = ri->flags; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ + ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { @@ -4491,7 +4513,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, return 0; } - return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id); + return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags); err: _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); return err; -- 2.25.1
2 1
0 0
  • ← Newer
  • 1
  • ...
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • ...
  • 109
  • Older →

HyperKitty Powered by HyperKitty