Hi Wang,
FYI, the error/warning still remains.
tree: https://gitee.com/openeuler/kernel.git OLK-6.6 head: 254b433462607f511039b75693d9314822538f20 commit: 9b1283f2bec2134030e1e099b900579f1f03840e [1924/1924] mm/vmalloc: Extend vmalloc usage about hugepage config: x86_64-allnoconfig (https://download.01.org/0day-ci/archive/20250214/202502141816.L1NkVPgB-lkp@i...) compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250214/202502141816.L1NkVPgB-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202502141816.L1NkVPgB-lkp@intel.com/
All warnings (new ones prefixed by >>):
mm/vmalloc.c:4443: warning: Function parameter or member 'pgoff' not described in 'remap_vmalloc_hugepage_range_partial'
vim +4443 mm/vmalloc.c
4423 4424 /** 4425 * remap_vmalloc_hugepage_range_partial - map vmalloc hugepages 4426 * to userspace 4427 * @vma: vma to cover 4428 * @uaddr: target user address to start at 4429 * @kaddr: virtual address of vmalloc hugepage kernel memory 4430 * @size: size of map area 4431 * 4432 * Returns: 0 for success, -Exxx on failure 4433 * 4434 * This function checks that @kaddr is a valid vmalloc'ed area, 4435 * and that it is big enough to cover the range starting at 4436 * @uaddr in @vma. Will return failure if that criteria isn't 4437 * met. 4438 * 4439 * Similar to remap_pfn_range() (see mm/memory.c) 4440 */ 4441 int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4442 void *kaddr, unsigned long pgoff, unsigned long size)
4443 {
4444 struct vm_struct *area; 4445 unsigned long off; 4446 unsigned long end_index; 4447 4448 if (check_shl_overflow(pgoff, PMD_SHIFT, &off)) 4449 return -EINVAL; 4450 4451 size = ALIGN(size, PMD_SIZE); 4452 4453 if (!IS_ALIGNED(uaddr, PMD_SIZE) || !IS_ALIGNED((unsigned long)kaddr, PMD_SIZE)) 4454 return -EINVAL; 4455 4456 area = find_vm_area(kaddr); 4457 if (!area) 4458 return -EINVAL; 4459 4460 if (!(area->flags & VM_USERMAP)) 4461 return -EINVAL; 4462 4463 if (check_add_overflow(size, off, &end_index) || 4464 end_index > get_vm_area_size(area)) 4465 return -EINVAL; 4466 kaddr += off; 4467 4468 do { 4469 struct page *page = vmalloc_to_page(kaddr); 4470 int ret; 4471 4472 ret = hugetlb_insert_hugepage_pte_by_pa(vma->vm_mm, uaddr, 4473 vma->vm_page_prot, page_to_phys(page)); 4474 if (ret) 4475 return ret; 4476 4477 uaddr += PMD_SIZE; 4478 kaddr += PMD_SIZE; 4479 size -= PMD_SIZE; 4480 } while (size > 0); 4481 4482 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4483 4484 return 0; 4485 } 4486 EXPORT_SYMBOL(remap_vmalloc_hugepage_range_partial); 4487