From: Hui Tang tanghui20@huawei.com
stable inclusion from stable-v5.10.150 commit a91af50850270554a4ff032a12eaa4c2fed0dd44 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6D0XA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit 7cc05071f930a631040fea16a41f9d78771edc49 ]
DMA_TO_DEVICE synchronisation must be done after the last modification of the memory region by the software and before it is handed off to the device.
Signed-off-by: Hui Tang tanghui20@huawei.com Reported-by: kernel test robot lkp@intel.com Reported-by: Dan Carpenter dan.carpenter@oracle.com Signed-off-by: Herbert Xu herbert@gondor.apana.org.au Stable-dep-of: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction") Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- drivers/crypto/qat/qat_common/qat_algs.c | 27 ++++++++++++------------ 1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 8fe7b8935d38..cedf9aa0961f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -670,8 +670,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -686,10 +686,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; - for_each_sg(sgl, sg, n, i) { int y = sg_nctr;
@@ -705,6 +701,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -724,9 +723,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, for_each_sg(sglout, sg, n, i) bufers[i].addr = DMA_MAPPING_ERROR;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; for_each_sg(sglout, sg, n, i) { int y = sg_nctr;
@@ -743,6 +739,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -754,17 +753,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0;
err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); kfree(buflout);
err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) @@ -772,8 +775,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[i].len, DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");