From: Toke Høiland-Jørgensen <toke@redhat.com> mainline inclusion from mainline-v6.18-rc1 commit 95920c2ed02bde551ab654e9749c2ca7bc3100e0 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICPKD5 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=... ------------------------------------------ Helge reported that the introduction of PP_MAGIC_MASK let to crashes on boot on his 32-bit parisc machine. The cause of this is the mask is set too wide, so the page_pool_page_is_pp() incurs false positives which crashes the machine. Just disabling the check in page_pool_is_pp() will lead to the page_pool code itself malfunctioning; so instead of doing this, this patch changes the define for PP_DMA_INDEX_BITS to avoid mistaking arbitrary kernel pointers for page_pool-tagged pages. The fix relies on the kernel pointers that alias with the pp_magic field always being above PAGE_OFFSET. With this assumption, we can use the lowest bit of the value of PAGE_OFFSET as the upper bound of the PP_DMA_INDEX_MASK, which should avoid the false positives. Because we cannot rely on PAGE_OFFSET always being a compile-time constant, nor on it always being >0, we fall back to disabling the dma_index storage when there are not enough bits available. This leaves us in the situation we were in before the patch in the Fixes tag, but only on a subset of architecture configurations. This seems to be the best we can do until the transition to page types in complete for page_pool pages. v2: - Make sure there's at least 8 bits available and that the PAGE_OFFSET bit calculation doesn't wrap Link: https://lore.kernel.org/all/aMNJMFa5fDalFmtn@p100/ Fixes: ee62ce7a1d90 ("page_pool: Track DMA-mapped pages and unmap them when destroying the pool") Cc: stable@vger.kernel.org # 6.15+ Tested-by: Helge Deller <deller@gmx.de> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Reviewed-by: Mina Almasry <almasrymina@google.com> Tested-by: Helge Deller <deller@gmx.de> Link: https://patch.msgid.link/20250930114331.675412-1-toke@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Conflicts: include/linux/mm.h net/core/page_pool.c [commit 1a251f52cfdc add min and commit 8ab79ed50cf1, 57afb4830157, 4dec64c52e24 introduce netmem, which not merged lead to conflict] Signed-off-by: Dong Chenchen <dongchenchen2@huawei.com> --- include/linux/mm.h | 22 ++++++++------ net/core/page_pool.c | 72 ++++++++++++++++++++++++++++++-------------- 2 files changed, 62 insertions(+), 32 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 346a19e9749d..86909d01bf2c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4370,14 +4370,13 @@ static inline bool vma_is_peer_shared(struct vm_area_struct *vma) * since this value becomes part of PP_SIGNATURE; meaning we can just use the * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is - * 0, we make sure that we leave the two topmost bits empty, as that guarantees - * we won't mistake a valid kernel pointer for a value we set, regardless of the - * VMSPLIT setting. + * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is + * known at compile-time. * - * Altogether, this means that the number of bits available is constrained by - * the size of an unsigned long (at the upper end, subtracting two bits per the - * above), and the definition of PP_SIGNATURE (with or without - * POISON_POINTER_DELTA). + * If the value of PAGE_OFFSET is not known at compile time, or if it is too + * small to leave at least 8 bits available above PP_SIGNATURE, we define the + * number of bits to be 0, which turns off the DMA index tracking altogether + * (see page_pool_register_dma_index()). */ #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA)) @@ -4392,8 +4391,13 @@ static inline bool vma_is_peer_shared(struct vm_area_struct *vma) */ #define PP_DMA_INDEX_BITS PP_MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT) #else -/* Always leave out the topmost two; see above. */ -#define PP_DMA_INDEX_BITS PP_MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2) +/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */ +#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8)) +#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \ + PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \ + !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \ + PP_MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0) + #endif #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f55aa48f528b..f63687fcd40e 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -359,11 +359,57 @@ static void page_set_dma_index(struct page *page, unsigned long id) page->pp_magic = magic; } +static int page_pool_register_dma_index(struct page_pool *pool, + struct page *page, gfp_t gfp) +{ + int err = 0; + u32 id; + + if (unlikely(!PP_DMA_INDEX_BITS)) + goto out; + + if (in_softirq()) + err = xa_alloc(&pool->dma_mapped, &id, page, PP_DMA_INDEX_LIMIT, gfp); + else + err = xa_alloc_bh(&pool->dma_mapped, &id, page, PP_DMA_INDEX_LIMIT, gfp); + if (err) { + WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); + goto out; + } + + page_set_dma_index(page, id); +out: + return err; +} + +static int page_pool_release_dma_index(struct page_pool *pool, + struct page *page) +{ + struct page *old; + unsigned long id; + + if (unlikely(!PP_DMA_INDEX_BITS)) + return 0; + + id = page_get_dma_index(page); + if (!id) + return -1; + + if (in_softirq()) + old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); + else + old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); + if (old != page) + return -1; + + page_set_dma_index(page, 0); + return 0; +} + static bool page_pool_dma_map(struct page_pool *pool, struct page *page, gfp_t gfp) { dma_addr_t dma; int err; - u32 id; /* Setup DMA mapping: use 'struct page' area for storing DMA-addr * since dma_addr_t can be either 32 or 64 bits and does not always fit @@ -377,15 +423,8 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page, gfp_t g if (dma_mapping_error(pool->p.dev, dma)) return false; - if (in_softirq()) - err = xa_alloc(&pool->dma_mapped, &id, page, PP_DMA_INDEX_LIMIT, - gfp); - else - err = xa_alloc_bh(&pool->dma_mapped, &id, page, - PP_DMA_INDEX_LIMIT, gfp); - + err = page_pool_register_dma_index(pool, page, gfp); if (err) { - WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); @@ -393,7 +432,6 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page, gfp_t g } page_pool_set_dma_addr(page, dma); - page_set_dma_index(page, id); if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) page_pool_dma_sync_for_device(pool, page, pool->p.max_len); @@ -539,8 +577,6 @@ static s32 page_pool_inflight(struct page_pool *pool) static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) { - struct page *old; - unsigned long id; dma_addr_t dma; if (!(pool->p.flags & PP_FLAG_DMA_MAP)) @@ -549,16 +585,7 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) */ return; - id = page_get_dma_index(page); - if (!id) - return; - - if (in_softirq()) - old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); - else - old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); - - if (old != page) + if (page_pool_release_dma_index(pool, page)) return; dma = page_pool_get_dma_addr(page); @@ -568,7 +595,6 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); page_pool_set_dma_addr(page, 0); - page_set_dma_index(page, 0); } /* Disconnects a page (from a page_pool). API users can have a need -- 2.25.1