
From: Xi Wang <wangxi11@huawei.com> driver inclusion category: bugfix bugzilla: NA CVE: NA This helper does the same as rdma_for_each_block(), except it works on a umem. This simplifies most of the call sites. Link: https://lore.kernel.org/r/4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Acked-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Xi Wang <wangxi11@huawei.com> Signed-off-by: Shunfeng Yang <yangshunfeng2@huawei.com> Reviewed-by: chunzhi hu <huchunzhi@huawei.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> --- include/rdma/ib_umem.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index a1fd63871d172..c9fd050e74f4d 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -36,6 +36,7 @@ #include <linux/list.h> #include <linux/scatterlist.h> #include <linux/workqueue.h> +#include <rdma/ib_verbs.h> struct ib_ucontext; struct ib_umem_odp; @@ -79,6 +80,28 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem) return (ib_umem_end(umem) - ib_umem_start(umem)) >> umem->page_shift; } +static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, + struct ib_umem *umem, + unsigned long pgsz) +{ + __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); +} + +/** + * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem + * @umem: umem to iterate over + * @pgsz: Page size to split the list into + * + * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The + * returned DMA blocks will be aligned to pgsz and span the range: + * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) + * + * Performs exactly ib_umem_num_dma_blocks() iterations. + */ +#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ + for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ + __rdma_block_iter_next(biter);) + #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, -- 2.25.1