[OLK-6.6 0/9] migration: adapt to new migration configuration

From: JiangShui Yang <yangjiangshui@h-partners.com> Longfang Liu (9): hisi_acc_vfio_pci: fix XQE dma address error hisi_acc_vfio_pci: add eq and aeq interruption restore hisi_acc_vfio_pci: bugfix cache write-back issue hisi_acc_vfio_pci: bugfix the problem of uninstalling driver hisi_acc_vfio_pci: bugfix live migration function without VF device driver hisi_acc_vfio_pci: update function return values. migration: update BAR space size migration: qm updates BAR configuration migration: adapt to new migration configuration drivers/crypto/hisilicon/qm.c | 29 ++ .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 312 +++++++++++++----- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 21 +- 3 files changed, 272 insertions(+), 90 deletions(-) -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- The dma addresses of EQE and AEQE are wrong after migration and results in guest kernel-mode encryption services failure. Comparing the definition of hardware registers, we found that there was an error when the data read from the register was combined into an address. Therefore, the address combination sequence needs to be corrected. Even after fixing the above problem, we still have an issue where the Guest from an old kernel can get migrated to new kernel and may result in wrong data. In order to ensure that the address is correct after migration, if an old magic number is detected, the dma address needs to be updated. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 41 ++++++++++++++++--- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 14 ++++++- 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 597c5b6d8633..e1f1ca1a10b6 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -350,6 +350,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm) return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); } +static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev) +{ + switch (vf_data->acc_magic) { + case ACC_DEV_MAGIC_V2: + if (vf_data->major_ver != ACC_DRV_MAJOR_VER) { + dev_info(dev, "migration driver version<%u.%u> not match!\n", + vf_data->major_ver, vf_data->minor_ver); + return -EINVAL; + } + break; + case ACC_DEV_MAGIC_V1: + /* Correct dma address */ + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; + break; + default: + return -EINVAL; + } + + return 0; +} + static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_acc_vf_migration_file *migf) { @@ -363,7 +389,8 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done) return 0; - if (vf_data->acc_magic != ACC_DEV_MAGIC) { + ret = vf_qm_version_check(vf_data, dev); + if (ret) { dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); return -EINVAL; } @@ -418,7 +445,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, int vf_id = hisi_acc_vdev->vf_id; int ret; - vf_data->acc_magic = ACC_DEV_MAGIC; + vf_data->acc_magic = ACC_DEV_MAGIC_V2; + vf_data->major_ver = ACC_DRV_MAJOR_VER; + vf_data->minor_ver = ACC_DRV_MINOR_VER; /* Save device id */ vf_data->dev_id = hisi_acc_vdev->vf_dev->device; @@ -496,12 +525,12 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) return -EINVAL; /* Every reg is 32 bit, the dma address is 64 bit. */ - vf_data->eqe_dma = vf_data->qm_eqc_dw[1]; + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->eqe_dma |= vf_data->qm_eqc_dw[0]; - vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1]; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0]; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; /* Through SQC_BT/CQC_BT to get sqc and cqc address */ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 245d7537b2bc..91002ceeebc1 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -39,6 +39,9 @@ #define QM_REG_ADDR_OFFSET 0x0004 #define QM_XQC_ADDR_OFFSET 32U +#define QM_XQC_ADDR_LOW 0x1 +#define QM_XQC_ADDR_HIGH 0x2 + #define QM_VF_AEQ_INT_MASK 0x0004 #define QM_VF_EQ_INT_MASK 0x000c #define QM_IFC_INT_SOURCE_V 0x0020 @@ -50,10 +53,15 @@ #define QM_EQC_DW0 0X8000 #define QM_AEQC_DW0 0X8020 +#define ACC_DRV_MAJOR_VER 1 +#define ACC_DRV_MINOR_VER 0 + +#define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC +#define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE + struct acc_vf_data { #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state) /* QM match information */ -#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC u64 acc_magic; u32 qp_num; u32 dev_id; @@ -61,7 +69,9 @@ struct acc_vf_data { u32 qp_base; u32 vf_qm_state; /* QM reserved match information */ - u32 qm_rsv_state[3]; + u16 major_ver; + u16 minor_ver; + u32 qm_rsv_state[2]; /* QM RW regs */ u32 aeq_int_mask; -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- In order to ensure that the task packets of the accelerator device are not lost during the migration process, it is necessary to send an EQ and AEQ command to the device after the live migration is completed and to update the completion position of the task queue. Let the device recheck the completed tasks data and if there are uncollected packets, device resend a task completion interrupt to the software. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index e1f1ca1a10b6..deb702d85951 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -470,6 +470,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; } +static void vf_qm_xeqc_save(struct hisi_qm *qm, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + u16 eq_head, aeq_head; + + eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0); + + aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0); +} + static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_acc_vf_migration_file *migf) { @@ -578,6 +591,9 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; migf->total_length = sizeof(struct acc_vf_data); + /* Save eqc and aeqc interrupt information */ + vf_qm_xeqc_save(vf_qm, migf); + return 0; } -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- At present, cache write-back is placed in the device data copy stage after stopping the device operation. Writing back to the cache at this stage will cause the data obtained by the cache to be written back to be empty. In order to ensure that the cache data is written back successfully, the data needs to be written back into the stop device stage. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index deb702d85951..fed5bd9571df 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -566,7 +566,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, { struct acc_vf_data *vf_data = &migf->vf_data; struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; - struct device *dev = &vf_qm->pdev->dev; int ret; if (unlikely(qm_wait_dev_not_ready(vf_qm))) { @@ -580,12 +579,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->vf_qm_state = QM_READY; hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = vf_qm_read_data(vf_qm, vf_data); if (ret) return -EINVAL; @@ -1014,6 +1007,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev dev_err(dev, "failed to check QM INT state!\n"); return ret; } + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM cache!\n"); + return ret; + } + return 0; } -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- In a live migration scenario. If the number of VFs at the destination is greater than the source, the recovery operation will fail and qemu will not be able to complete the process and exit after shutting down the device FD. This will cause the driver to be unable to be unloaded normally due to abnormal reference counting of the live migration driver caused by the abnormal closing operation of fd. Therefore, make sure the migration file descriptor references are always released when the device is closed. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index fed5bd9571df..1d6c4aaa5e91 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -1510,6 +1510,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + hisi_acc_vf_disable_fds(hisi_acc_vdev); mutex_lock(&hisi_acc_vdev->open_mutex); hisi_acc_vdev->dev_opened = false; iounmap(vf_qm->io_base); -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- If the VF device driver is not loaded in the Guest OS and we attempt to perform device data migration, the address of the migrated data will be NULL. The live migration recovery operation on the destination side will access a null address value, which will cause access errors. Therefore, live migration of VMs without added VF device drivers does not require device data migration. In addition, when the queue address data obtained by the destination is empty, device queue recovery processing will not be performed. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu <liulongfang@huawei.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 1d6c4aaa5e91..b378f3479e58 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -426,13 +426,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; } - ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); - if (ret) { - dev_err(dev, "failed to write QM_VF_STATE\n"); - return ret; - } - - hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; hisi_acc_vdev->match_done = true; return 0; } @@ -498,6 +491,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (migf->total_length < sizeof(struct acc_vf_data)) return -EINVAL; + if (!vf_data->eqe_dma || !vf_data->aeqe_dma || + !vf_data->sqc_dma || !vf_data->cqc_dma) { + dev_info(dev, "resume dma addr is NULL!\n"); + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; + return 0; + } + + ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_STATE\n"); + return -EINVAL; + } + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + qm->eqe_dma = vf_data->eqe_dma; qm->aeqe_dma = vf_data->aeqe_dma; qm->sqc_dma = vf_data->sqc_dma; @@ -1533,6 +1540,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1; hisi_acc_vdev->pf_qm = pf_qm; hisi_acc_vdev->vf_dev = pdev; + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; mutex_init(&hisi_acc_vdev->state_mutex); mutex_init(&hisi_acc_vdev->open_mutex); -- 2.43.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- In this driver file, many functions call sub-functions and use ret to store the error code of the sub-functions. However, instead of directly returning ret to the caller, they use a converted error code, which prevents the end-user from clearly understanding the root cause of the error. Therefore, the code needs to be modified to directly return the error code from the sub-functions. Signed-off-by: Longfang Liu <liulongfang@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index b378f3479e58..6bca52c286a3 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -392,7 +392,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, ret = vf_qm_version_check(vf_data, dev); if (ret) { dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); - return -EINVAL; + return ret; } if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) { @@ -404,7 +404,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, ret = qm_get_vft(vf_qm, &vf_qm->qp_base); if (ret <= 0) { dev_err(dev, "failed to get vft qp nums\n"); - return -EINVAL; + return ret; } if (ret != vf_data->qp_num) { @@ -501,7 +501,7 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); if (ret) { dev_err(dev, "failed to write QM_VF_STATE\n"); - return -EINVAL; + return ret; } hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; @@ -542,7 +542,7 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) ret = qm_get_regs(vf_qm, vf_data); if (ret) - return -EINVAL; + return ret; /* Every reg is 32 bit, the dma address is 64 bit. */ vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; @@ -556,13 +556,13 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); if (ret) { dev_err(dev, "failed to read SQC addr!\n"); - return -EINVAL; + return ret; } ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma); if (ret) { dev_err(dev, "failed to read CQC addr!\n"); - return -EINVAL; + return ret; } return 0; @@ -588,7 +588,7 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, ret = vf_qm_read_data(vf_qm, vf_data); if (ret) - return -EINVAL; + return ret; migf->total_length = sizeof(struct acc_vf_data); /* Save eqc and aeqc interrupt information */ @@ -1381,7 +1381,7 @@ static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vde ret = qm_wait_dev_not_ready(vf_qm); if (ret) { seq_puts(seq, "VF device not ready!\n"); - return -EBUSY; + return ret; } return 0; -- 2.43.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- On the new hardware platform, the live migration configuration region is moved from VF to PF. The VF's own configuration space is restored to the complete 64KB, and there is no need to divide the size of the BAR configuration space equally. Signed-off-by: Longfang Liu <liulongfang@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 41 +++++++++++++++---- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 6bca52c286a3..35a16f31c88c 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -1249,6 +1249,33 @@ static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev) return !IS_ERR(pf_qm) ? pf_qm : NULL; } +static size_t hisi_acc_get_resource_len(struct vfio_pci_core_device *vdev, + unsigned int index) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = + hisi_acc_drvdata(vdev->pdev); + + /* + * ACC VF dev 64KB BAR2 region consists of both functional + * register space and migration control register space, each + * uses 32KB BAR2 region, on the system with more than 64KB + * page size, even if the migration control register space + * is written by VM, it will only affects the VF. + * + * In order to support the live migration function in the + * system with a page size above 64KB, the driver needs + * to ensure that the VF region size is aligned with the + * system page size. + * + * On the new hardware platform, the live migration control register + * has been moved from VF to PF. + */ + if (hisi_acc_vdev->pf_qm->ver == QM_HW_V3) + return (pci_resource_len(vdev->pdev, index) >> 1); + + return pci_resource_len(vdev->pdev, index); +} + static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev, size_t count, loff_t *ppos, size_t *new_count) @@ -1259,8 +1286,9 @@ static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev, if (index == VFIO_PCI_BAR2_REGION_INDEX) { loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - resource_size_t end = pci_resource_len(vdev->pdev, index) / 2; + resource_size_t end; + end = hisi_acc_get_resource_len(vdev, index); /* Check if access is for migration control region */ if (pos >= end) return -EINVAL; @@ -1281,8 +1309,9 @@ static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev, index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index == VFIO_PCI_BAR2_REGION_INDEX) { u64 req_len, pgoff, req_start; - resource_size_t end = pci_resource_len(vdev->pdev, index) / 2; + resource_size_t end; + end = PAGE_ALIGN(hisi_acc_get_resource_len(vdev, index)); req_len = vma->vm_end - vma->vm_start; pgoff = vma->vm_pgoff & ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); @@ -1329,7 +1358,6 @@ static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int if (cmd == VFIO_DEVICE_GET_REGION_INFO) { struct vfio_pci_core_device *vdev = container_of(core_vdev, struct vfio_pci_core_device, vdev); - struct pci_dev *pdev = vdev->pdev; struct vfio_region_info info; unsigned long minsz; @@ -1344,12 +1372,7 @@ static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int if (info.index == VFIO_PCI_BAR2_REGION_INDEX) { info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - /* - * ACC VF dev BAR2 region consists of both functional - * register space and migration control register space. - * Report only the functional region to Guest. - */ - info.size = pci_resource_len(pdev, info.index) / 2; + info.size = hisi_acc_get_resource_len(vdev, info.index); info.flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE | -- 2.43.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- On the new hardware platform, the configuration region for the live migration function of the accelerator device is no longer placed in the VF, but is instead placed in the PF. Therefore, the configuration region of the live migration function needs to be opened when the QM driver is loaded. When the QM driver is uninstalled, the driver needs to clear this configuration. Signed-off-by: Longfang Liu <liulongfang@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b2036cedfcfd..cb00a14b9843 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -243,6 +243,9 @@ #define QM_QOS_MAX_CIR_U 6 #define QM_AUTOSUSPEND_DELAY 3000 +#define QM_MIG_REGION_SEL 0x100198 +#define QM_MIG_REGION_EN 0x1 + /* abnormal status value for stopping queue */ #define QM_STOP_QUEUE_FAIL 1 #define QM_DUMP_SQC_FAIL 3 @@ -3088,11 +3091,36 @@ static void qm_put_pci_res(struct hisi_qm *qm) pci_release_mem_regions(pdev); } +static void hisi_mig_region_clear(struct hisi_qm *qm) +{ + u32 val; + + /* Clear migration region set of PF */ + if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { + val = readl(qm->io_base + QM_MIG_REGION_SEL); + val &= ~BIT(0); + writel(val, qm->io_base + QM_MIG_REGION_SEL); + } +} + +static void hisi_mig_region_enable(struct hisi_qm *qm) +{ + u32 val; + + /* Select migration region of PF */ + if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { + val = readl(qm->io_base + QM_MIG_REGION_SEL); + val |= QM_MIG_REGION_EN; + writel(val, qm->io_base + QM_MIG_REGION_SEL); + } +} + static void hisi_qm_pci_uninit(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; pci_free_irq_vectors(pdev); + hisi_mig_region_clear(qm); qm_put_pci_res(qm); pci_disable_device(pdev); } @@ -5717,6 +5745,7 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_free_qm_memory; qm_cmd_init(qm); + hisi_mig_region_enable(qm); return 0; -- 2.43.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC0BWY CVE: NA ---------------------------------------------------------------------- On the new hardware platform, the migration region is moved from VF to PF. the driver also needs to be modified accordingly to adapt to the new hardware device. Use PF's io base directly on the new hardware platform. and no mmap operation is required. If it is on an old platform, the driver needs to be compatible with the old solution. Signed-off-by: Longfang Liu <liulongfang@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 165 ++++++++++++------ .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 7 + 2 files changed, 119 insertions(+), 53 deletions(-) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 35a16f31c88c..6e3ebd3adc9e 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -125,6 +125,72 @@ static int qm_get_cqc(struct hisi_qm *qm, u64 *addr) return 0; } +static int qm_get_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct acc_vf_data *vf_data) +{ + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &qm->pdev->dev; + u32 eqc_addr, aeqc_addr; + int ret; + + if (qm->ver == QM_HW_V3) { + eqc_addr = QM_EQC_DW0; + aeqc_addr = QM_AEQC_DW0; + } else { + eqc_addr = QM_EQC_PF_DW0; + aeqc_addr = QM_AEQC_PF_DW0; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_read_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_EQC_DW\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_read_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_AEQC_DW\n"); + return ret; + } + + return 0; +} + +static int qm_set_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct acc_vf_data *vf_data) +{ + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &qm->pdev->dev; + u32 eqc_addr, aeqc_addr; + int ret; + + if (qm->ver == QM_HW_V3) { + eqc_addr = QM_EQC_DW0; + aeqc_addr = QM_AEQC_DW0; + } else { + eqc_addr = QM_EQC_PF_DW0; + aeqc_addr = QM_AEQC_PF_DW0; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_write_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_EQC_DW\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_write_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_AEQC_DW\n"); + return ret; + } + + return 0; +} + static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) { struct device *dev = &qm->pdev->dev; @@ -167,20 +233,6 @@ static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) return ret; } - /* QM_EQC_DW has 7 regs */ - ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); - if (ret) { - dev_err(dev, "failed to read QM_EQC_DW\n"); - return ret; - } - - /* QM_AEQC_DW has 7 regs */ - ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); - if (ret) { - dev_err(dev, "failed to read QM_AEQC_DW\n"); - return ret; - } - return 0; } @@ -238,20 +290,6 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) return ret; } - /* QM_EQC_DW has 7 regs */ - ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); - if (ret) { - dev_err(dev, "failed to write QM_EQC_DW\n"); - return ret; - } - - /* QM_AEQC_DW has 7 regs */ - ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); - if (ret) { - dev_err(dev, "failed to write QM_AEQC_DW\n"); - return ret; - } - return 0; } @@ -519,6 +557,10 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return ret; } + ret = qm_set_xqc_regs(hisi_acc_vdev, vf_data); + if (ret) + return ret; + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) { dev_err(dev, "set sqc failed\n"); @@ -586,6 +628,10 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->vf_qm_state = QM_READY; hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + ret = qm_get_xqc_regs(hisi_acc_vdev, vf_data); + if (ret) + return -EINVAL; + ret = vf_qm_read_data(vf_qm, vf_data); if (ret) return ret; @@ -1185,34 +1231,46 @@ static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev) { struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device; struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm; struct pci_dev *vf_dev = vdev->pdev; - /* - * ACC VF dev BAR2 region consists of both functional register space - * and migration control register space. For migration to work, we - * need access to both. Hence, we map the entire BAR2 region here. - * But unnecessarily exposing the migration BAR region to the Guest - * has the potential to prevent/corrupt the Guest migration. Hence, - * we restrict access to the migration control space from - * Guest(Please see mmap/ioctl/read/write override functions). - * - * Please note that it is OK to expose the entire VF BAR if migration - * is not supported or required as this cannot affect the ACC PF - * configurations. - * - * Also the HiSilicon ACC VF devices supported by this driver on - * HiSilicon hardware platforms are integrated end point devices - * and the platform lacks the capability to perform any PCIe P2P - * between these devices. - */ + if (pf_qm->ver == QM_HW_V3) { + /* + * ACC VF dev BAR2 region consists of both functional register space + * and migration control register space. For migration to work, we + * need access to both. Hence, we map the entire BAR2 region here. + * But unnecessarily exposing the migration BAR region to the Guest + * has the potential to prevent/corrupt the Guest migration. Hence, + * we restrict access to the migration control space from + * Guest(Please see mmap/ioctl/read/write override functions). + * + * Please note that it is OK to expose the entire VF BAR if migration + * is not supported or required as this cannot affect the ACC PF + * configurations. + * + * Also the HiSilicon ACC VF devices supported by this driver on + * HiSilicon hardware platforms are integrated end point devices + * and the platform lacks the capability to perform any PCIe P2P + * between these devices. + */ - vf_qm->io_base = - ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX), - pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX)); - if (!vf_qm->io_base) - return -EIO; + vf_qm->io_base = + ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX), + pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX)); + if (!vf_qm->io_base) + return -EIO; - vf_qm->fun_type = QM_HW_VF; + vf_qm->fun_type = QM_HW_VF; + vf_qm->ver = pf_qm->ver; + } else { + /* + * In the new HW platform, the migration function register space is in BAR2 of PF, + * and each VF occupies 8KB address space. + */ + vf_qm->io_base = pf_qm->io_base + QM_MIG_REGION_OFFSET + + hisi_acc_vdev->vf_id * QM_MIG_REGION_SIZE; + vf_qm->fun_type = QM_HW_PF; + } vf_qm->pdev = vf_dev; mutex_init(&vf_qm->mailbox_lock); @@ -1543,7 +1601,8 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) hisi_acc_vf_disable_fds(hisi_acc_vdev); mutex_lock(&hisi_acc_vdev->open_mutex); hisi_acc_vdev->dev_opened = false; - iounmap(vf_qm->io_base); + if (vf_qm->ver == QM_HW_V3) + iounmap(vf_qm->io_base); mutex_unlock(&hisi_acc_vdev->open_mutex); vfio_pci_core_close_device(core_vdev); } diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 91002ceeebc1..01a4cd7e8d9f 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -59,6 +59,13 @@ #define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC #define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE +#define QM_MIG_REGION_OFFSET 0x180000 +#define QM_MIG_REGION_SIZE 0x2000 + +#define QM_SUB_VERSION_ID 0x100210 +#define QM_EQC_PF_DW0 0x1c00 +#define QM_AEQC_PF_DW0 0x1c20 + struct acc_vf_data { #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state) /* QM match information */ -- 2.43.0
participants (1)
-
Longfang Liu