tree: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS head: bacbf9a9f0df34d979a8029d5a7a7e5dc91460b9 commit: 18f49509eef01d1ee6ed81899298994f2f88dd2a [18192/23720] ascend: share_pool: Use remap_pfn_range to share kva to uva config: x86_64-allnoconfig (https://download.01.org/0day-ci/archive/20240920/202409201222.m2Vt8TXh-lkp@i...) compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240920/202409201222.m2Vt8TXh-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202409201222.m2Vt8TXh-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from mm/memory.c:51: include/linux/pagemap.h:425:21: warning: cast from 'int (*)(struct file *, struct page *)' to 'filler_t *' (aka 'int (*)(void *, struct page *)') converts to incompatible function type [-Wcast-function-type-strict] 425 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mm/memory.c:1546:10: error: implicit declaration of function 'hugetlb_insert_hugepage_pte_by_pa' [-Werror,-Wimplicit-function-declaration]
1546 | return hugetlb_insert_hugepage_pte_by_pa(vma->vm_mm, addr, | ^ mm/memory.c:1546:10: note: did you mean 'hugetlb_insert__hugepage_pte_by_pa'? include/linux/hugetlb.h:658:19: note: 'hugetlb_insert__hugepage_pte_by_pa' declared here 658 | static inline int hugetlb_insert__hugepage_pte_by_pa(struct mm_struct *mm, | ^ 1 warning and 1 error generated.
vim +/hugetlb_insert_hugepage_pte_by_pa +1546 mm/memory.c
1504 1505 /** 1506 * vm_insert_page - insert single page into user vma 1507 * @vma: user vma to map to 1508 * @addr: target user address of this page 1509 * @page: source kernel page 1510 * 1511 * This allows drivers to insert individual pages they've allocated 1512 * into a user vma. 1513 * 1514 * The page has to be a nice clean _individual_ kernel allocation. 1515 * If you allocate a compound page, you need to have marked it as 1516 * such (__GFP_COMP), or manually just split the page up yourself 1517 * (see split_page()). 1518 * 1519 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1520 * took an arbitrary page protection parameter. This doesn't allow 1521 * that. Your vma protection will have to be set up correctly, which 1522 * means that if you want a shared writable mapping, you'd better 1523 * ask for a shared writable mapping! 1524 * 1525 * The page does not need to be reserved. 1526 * 1527 * Usually this function is called from f_op->mmap() handler 1528 * under mm->mmap_sem write-lock, so it can change vma->vm_flags. 1529 * Caller must set VM_MIXEDMAP on vma if it wants to call this 1530 * function from other places, for example from page-fault handler. 1531 */ 1532 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1533 struct page *page) 1534 { 1535 if (addr < vma->vm_start || addr >= vma->vm_end) 1536 return -EFAULT; 1537 if (!page_count(page)) 1538 return -EINVAL; 1539 if (!(vma->vm_flags & VM_MIXEDMAP)) { 1540 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); 1541 BUG_ON(vma->vm_flags & VM_PFNMAP); 1542 vma->vm_flags |= VM_MIXEDMAP; 1543 } 1544 1545 if (sp_check_hugepage(page))
1546 return hugetlb_insert_hugepage_pte_by_pa(vma->vm_mm, addr,
1547 vma->vm_page_prot, page_to_phys(page)); 1548 else 1549 return insert_page(vma, addr, page, vma->vm_page_prot); 1550 } 1551 EXPORT_SYMBOL(vm_insert_page); 1552