
tree: https://gitee.com/openeuler/kernel.git OLK-5.10 head: 26e6aab08db562ec2a74b2125bddcc9a68cb6d5c commit: 9bc24f753631ce5a80c2c6c36372981726746cf4 [3218/3218] dma: pswiotlb: Move pswiotlb dma functions behind dma_map_ops config: arm64-randconfig-003-20250926 (https://download.01.org/0day-ci/archive/20250926/202509260644.BcnW2VXb-lkp@i...) compiler: aarch64-linux-gcc (GCC) 9.5.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250926/202509260644.BcnW2VXb-lkp@i...) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202509260644.BcnW2VXb-lkp@intel.com/ All warnings (new ones prefixed by >>):
kernel/dma/phytium/pswiotlb-mapping.c:308:30: warning: no previous prototype for 'pswiotlb_clone_orig_dma_ops' [-Wmissing-prototypes] 308 | struct pswiotlb_dma_map_ops *pswiotlb_clone_orig_dma_ops(struct device *dev, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/pswiotlb_clone_orig_dma_ops +308 kernel/dma/phytium/pswiotlb-mapping.c 283 284 static const struct dma_map_ops pswiotlb_noiommu_dma_ops = { 285 .alloc = pswiotlb_dma_alloc_distribute, 286 .free = pswiotlb_dma_free_distribute, 287 .alloc_pages = pswiotlb_dma_common_alloc_pages_distribute, 288 .free_pages = pswiotlb_dma_common_free_pages_distribute, 289 .alloc_noncoherent = pswiotlb_dma_alloc_noncoherent_distribute, 290 .free_noncoherent = pswiotlb_dma_free_noncoherent_distribute, 291 .mmap = pswiotlb_dma_mmap_distribute, 292 .get_sgtable = pswiotlb_dma_get_sgtable_distribute, 293 .map_page = pswiotlb_dma_map_page_attrs_distribute, 294 .unmap_page = pswiotlb_dma_unmap_page_attrs_distribute, 295 .map_sg = pswiotlb_dma_map_sg_attrs_distribute, 296 .unmap_sg = pswiotlb_dma_unmap_sg_attrs_distribute, 297 .sync_single_for_cpu = pswiotlb_dma_sync_single_for_cpu_distribute, 298 .sync_single_for_device = pswiotlb_dma_sync_single_for_device_distribute, 299 .sync_sg_for_cpu = pswiotlb_dma_sync_sg_for_cpu_distribute, 300 .sync_sg_for_device = pswiotlb_dma_sync_sg_for_device_distribute, 301 .map_resource = pswiotlb_dma_map_resource_distribute, 302 .unmap_resource = NULL, 303 .get_merge_boundary = pswiotlb_dma_get_merge_boundary_distribute, 304 .get_required_mask = pswiotlb_dma_get_required_mask_distribute, 305 .dma_supported = pswiotlb_dma_supported_distribute, 306 .max_mapping_size = pswiotlb_dma_max_mapping_size_distribute, 307 };
308 struct pswiotlb_dma_map_ops *pswiotlb_clone_orig_dma_ops(struct device *dev, 309 const struct dma_map_ops *ops) 310 { 311 struct pswiotlb_dma_map_ops *new_dma_ops = kmalloc(sizeof(struct pswiotlb_dma_map_ops), 312 GFP_KERNEL); 313 if (!new_dma_ops) 314 return NULL; 315 316 memcpy(new_dma_ops, ops, sizeof(struct pswiotlb_dma_map_ops)); 317 318 return new_dma_ops; 319 } 320
-- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki