From: Lijun Fang fanglijun3@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4JMM0 CVE: NA
--------
Add ioctl to get pyhs addr for ts core, and put it in the reserved memory.
Signed-off-by: Lijun Fang fanglijun3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/char/svm.c | 313 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 312 insertions(+), 1 deletion(-)
diff --git a/drivers/char/svm.c b/drivers/char/svm.c index bc31724fb730..531c765e4415 100644 --- a/drivers/char/svm.c +++ b/drivers/char/svm.c @@ -41,6 +41,7 @@ #define SVM_IOCTL_GETHUGEINFO 0xfff6 #define SVM_IOCTL_PIN_MEMORY 0xfff7 #define SVM_IOCTL_GET_PHYMEMINFO 0xfff8 +#define SVM_IOCTL_GET_PHYS 0xfff9 #define SVM_IOCTL_LOAD_FLAG 0xfffa #define SVM_IOCTL_SET_RC 0xfffc #define SVM_IOCTL_PROCESS_BIND 0xffff @@ -141,6 +142,8 @@ static char *svm_cmd_to_string(unsigned int cmd) switch (cmd) { case SVM_IOCTL_PROCESS_BIND: return "bind"; + case SVM_IOCTL_GET_PHYS: + return "get phys"; case SVM_IOCTL_SET_RC: return "set rc"; case SVM_IOCTL_PIN_MEMORY: @@ -164,6 +167,231 @@ static char *svm_cmd_to_string(unsigned int cmd) return NULL; }
+/* + * image word of slot + * SVM_IMAGE_WORD_INIT: initial value, indicating that the slot is not used. + * SVM_IMAGE_WORD_VALID: valid data is filled in the slot + * SVM_IMAGE_WORD_DONE: the DMA operation is complete when the TS uses this address, + * so, this slot can be freed. + */ +#define SVM_IMAGE_WORD_INIT 0x0 +#define SVM_IMAGE_WORD_VALID 0xaa55aa55 +#define SVM_IMAGE_WORD_DONE 0x55ff55ff + +/* + * The length of this structure must be 64 bytes, which is the agreement with the TS. + * And the data type and sequence cannot be changed, because the TS core reads data + * based on the data type and sequence. + * image_word: slot status. For details, see SVM_IMAGE_WORD_xxx + * pid: pid of process which ioctl svm device to get physical addr, it is used for + * verification by TS. + * data_type: used to determine the data type by TS. Currently, data type must be + * SVM_VA2PA_TYPE_DMA. + * char data[48]: for the data type SVM_VA2PA_TYPE_DMA, the DMA address is stored. + */ +struct svm_va2pa_slot { + int image_word; + int resv; + int pid; + int data_type; + union { + char user_defined_data[48]; + struct { + unsigned long phys; + unsigned long len; + char reserved[32]; + }; + }; +}; + +struct svm_va2pa_trunk { + struct svm_va2pa_slot *slots; + int slot_total; + int slot_used; + unsigned long *bitmap; + struct mutex mutex; +}; + +struct svm_va2pa_trunk va2pa_trunk; + +#define SVM_VA2PA_TRUNK_SIZE_MAX 0x3200000 +#define SVM_VA2PA_MEMORY_ALIGN 64 +#define SVM_VA2PA_SLOT_SIZE sizeof(struct svm_va2pa_slot) +#define SVM_VA2PA_TYPE_DMA 0x1 +#define SVM_MEM_REG "va2pa trunk" +#define SVM_VA2PA_CLEAN_BATCH_NUM 0x80 + +struct device_node *svm_find_mem_reg_node(struct device *dev, const char *compat) +{ + int index = 0; + struct device_node *tmp = NULL; + struct device_node *np = dev->of_node; + + for (; ; index++) { + tmp = of_parse_phandle(np, "memory-region", index); + if (!tmp) + break; + + if (of_device_is_compatible(tmp, compat)) + return tmp; + + of_node_put(tmp); + } + + return NULL; +} + +static int svm_parse_trunk_memory(struct device *dev, phys_addr_t *base, unsigned long *size) +{ + int err; + struct resource r; + struct device_node *trunk = NULL; + + trunk = svm_find_mem_reg_node(dev, SVM_MEM_REG); + if (!trunk) { + dev_err(dev, "Didn't find reserved memory\n"); + return -EINVAL; + } + + err = of_address_to_resource(trunk, 0, &r); + of_node_put(trunk); + if (err) { + dev_err(dev, "Couldn't address to resource for reserved memory\n"); + return -ENOMEM; + } + + *base = r.start; + *size = resource_size(&r); + + return 0; +} + +static int svm_setup_trunk(struct device *dev, phys_addr_t base, unsigned long size) +{ + int slot_total; + unsigned long *bitmap = NULL; + struct svm_va2pa_slot *slot = NULL; + + if (!IS_ALIGNED(base, SVM_VA2PA_MEMORY_ALIGN)) { + dev_err(dev, "Didn't aligned to %u\n", SVM_VA2PA_MEMORY_ALIGN); + return -EINVAL; + } + + if ((size == 0) || (size > SVM_VA2PA_TRUNK_SIZE_MAX)) { + dev_err(dev, "Size of reserved memory is not right\n"); + return -EINVAL; + } + + slot_total = size / SVM_VA2PA_SLOT_SIZE; + if (slot_total < BITS_PER_LONG) + return -EINVAL; + + bitmap = kvcalloc(slot_total / BITS_PER_LONG, sizeof(unsigned long), GFP_KERNEL); + if (!bitmap) { + dev_err(dev, "alloc memory failed\n"); + return -ENOMEM; + } + + slot = ioremap(base, size); + if (!slot) { + kvfree(bitmap); + dev_err(dev, "Ioremap trunk failed\n"); + return -ENXIO; + } + + va2pa_trunk.slots = slot; + va2pa_trunk.slot_used = 0; + va2pa_trunk.slot_total = slot_total; + va2pa_trunk.bitmap = bitmap; + mutex_init(&va2pa_trunk.mutex); + + return 0; +} + +static void svm_remove_trunk(struct device *dev) +{ + iounmap(va2pa_trunk.slots); + kvfree(va2pa_trunk.bitmap); + + va2pa_trunk.slots = NULL; + va2pa_trunk.bitmap = NULL; +} + +static void svm_set_slot_valid(unsigned long index, unsigned long phys, unsigned long len) +{ + struct svm_va2pa_slot *slot = &va2pa_trunk.slots[index]; + + slot->phys = phys; + slot->len = len; + slot->image_word = SVM_IMAGE_WORD_VALID; + slot->pid = current->tgid; + slot->data_type = SVM_VA2PA_TYPE_DMA; + __bitmap_set(va2pa_trunk.bitmap, index, 1); + va2pa_trunk.slot_used++; +} + +static void svm_set_slot_init(unsigned long index) +{ + struct svm_va2pa_slot *slot = &va2pa_trunk.slots[index]; + + slot->image_word = SVM_IMAGE_WORD_INIT; + __bitmap_clear(va2pa_trunk.bitmap, index, 1); + va2pa_trunk.slot_used--; +} + +static void svm_clean_done_slots(void) +{ + int used = va2pa_trunk.slot_used; + int count = 0; + long temp = -1; + phys_addr_t addr; + unsigned long *bitmap = va2pa_trunk.bitmap; + + for (; count < used && count < SVM_VA2PA_CLEAN_BATCH_NUM;) { + temp = find_next_bit(bitmap, va2pa_trunk.slot_total, temp + 1); + if (temp == va2pa_trunk.slot_total) + break; + + count++; + if (va2pa_trunk.slots[temp].image_word != SVM_IMAGE_WORD_DONE) + continue; + + addr = (phys_addr_t)va2pa_trunk.slots[temp].phys; + put_page(pfn_to_page(PHYS_PFN(addr))); + svm_set_slot_init(temp); + } +} + +static int svm_find_slot_init(unsigned long *index) +{ + int temp; + unsigned long *bitmap = va2pa_trunk.bitmap; + + temp = find_first_zero_bit(bitmap, va2pa_trunk.slot_total); + if (temp == va2pa_trunk.slot_total) + return -ENOSPC; + + *index = temp; + return 0; +} + +static int svm_va2pa_trunk_init(struct device *dev) +{ + int err; + phys_addr_t base; + unsigned long size; + + err = svm_parse_trunk_memory(dev, &base, &size); + if (err) + return err; + + err = svm_setup_trunk(dev, base, size); + if (err) + return err; + + return 0; +} + static struct svm_process *find_svm_process(unsigned long asid) { struct rb_node *node = svm_process_root.rb_node; @@ -805,6 +1033,78 @@ static pte_t *svm_walk_pt(unsigned long addr, unsigned long *page_size, return svm_get_pte(vma, pud, addr, page_size, offset); }
+static int svm_get_phys(unsigned long __user *arg) +{ + int err; + pte_t *ptep = NULL; + pte_t pte; + unsigned long index = 0; + struct page *page; + unsigned long addr, phys, offset; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + unsigned long len; + + if (!acpi_disabled) + return -EPERM; + + if (get_user(addr, arg)) + return -EFAULT; + + down_read(&mm->mmap_lock); + ptep = svm_walk_pt(addr, NULL, &offset); + if (!ptep) { + up_read(&mm->mmap_lock); + return -EINVAL; + } + + pte = READ_ONCE(*ptep); + if (!pte_present(pte) || !(pfn_in_present_section(pte_pfn(pte)))) { + up_read(&mm->mmap_lock); + return -EINVAL; + } + + page = pte_page(pte); + get_page(page); + + phys = PFN_PHYS(pte_pfn(pte)) + offset; + + /* fix ts problem, which need the len to check out memory */ + len = 0; + vma = find_vma(mm, addr); + if (vma) + len = vma->vm_end - addr; + + up_read(&mm->mmap_lock); + + mutex_lock(&va2pa_trunk.mutex); + svm_clean_done_slots(); + if (va2pa_trunk.slot_used == va2pa_trunk.slot_total) { + err = -ENOSPC; + goto err_mutex_unlock; + } + + err = svm_find_slot_init(&index); + if (err) + goto err_mutex_unlock; + + svm_set_slot_valid(index, phys, len); + + err = put_user(index * SVM_VA2PA_SLOT_SIZE, (unsigned long __user *)arg); + if (err) + goto err_slot_init; + + mutex_unlock(&va2pa_trunk.mutex); + return 0; + +err_slot_init: + svm_set_slot_init(index); +err_mutex_unlock: + mutex_unlock(&va2pa_trunk.mutex); + put_page(page); + return err; +} + static struct bus_type svm_bus_type = { .name = "svm_bus", }; @@ -1303,6 +1603,9 @@ static long svm_ioctl(struct file *file, unsigned int cmd, return -EFAULT; } break; + case SVM_IOCTL_GET_PHYS: + err = svm_get_phys((unsigned long __user *)arg); + break; case SVM_IOCTL_SET_RC: err = svm_set_rc((unsigned long __user *)arg); break; @@ -1767,10 +2070,15 @@ static int svm_device_probe(struct platform_device *pdev) if (err) dev_warn(dev, "Cannot get l2buff\n");
+ if (svm_va2pa_trunk_init(dev)) { + dev_err(dev, "failed to init va2pa trunk\n"); + goto err_unregister_misc; + } + err = svm_dt_init_core(sdev, np); if (err) { dev_err(dev, "failed to init dt cores\n"); - goto err_unregister_misc; + goto err_remove_trunk; }
probe_index++; @@ -1780,6 +2088,9 @@ static int svm_device_probe(struct platform_device *pdev)
return err;
+err_remove_trunk: + svm_remove_trunk(dev); + err_unregister_misc: misc_deregister(&sdev->miscdev);