From: Wang Wensheng wangwensheng4@huawei.com
ascend inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SON8 CVE: NA
-------------------------------------------------
MAP_SHARE_POOL and MAP_FIXED_NOREPLACE have the same value. Redefine MAP_SHARE_POOL to fix it.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index cd4c305449dd9..88ef96ac0bfb3 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -170,7 +170,7 @@ struct sp_walk_data { pmd_t *pmd; };
-#define MAP_SHARE_POOL 0x100000 +#define MAP_SHARE_POOL 0x200000
#define MMAP_TOP_4G_SIZE 0x100000000UL
From: Wang Wensheng wangwensheng4@huawei.com
ascend inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SON8 CVE: NA
-------------------------------------------------
The maximum devices supported in share_pool in static. Here we make it extendable for later use.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 2 +- mm/share_pool.c | 42 +++++++++++++++++++++++++++----------- 2 files changed, 31 insertions(+), 13 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 88ef96ac0bfb3..04cadd9ae5f9d 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -24,7 +24,7 @@ #define SPG_ID_AUTO_MAX 199999 #define SPG_ID_AUTO 200000 /* generate group id automatically */
-#define MAX_DEVID 2 /* the max num of Da-vinci devices */ +#define MAX_DEVID 8 /* the max num of Da-vinci devices */
extern int sysctl_share_pool_hugepage_enable;
diff --git a/mm/share_pool.c b/mm/share_pool.c index 0ba516e16ef29..ec59475ea4a91 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -97,10 +97,15 @@ static int share_pool_group_mode = SINGLE_GROUP_MODE;
static int system_group_count;
-static bool enable_sp_dev_addr; +static unsigned int sp_device_number; static unsigned long sp_dev_va_start[MAX_DEVID]; static unsigned long sp_dev_va_size[MAX_DEVID];
+static bool is_sp_dev_addr_enabled(int device_id) +{ + return sp_dev_va_size[device_id]; +} + /* idr of all sp_groups */ static DEFINE_IDR(sp_group_idr); /* rw semaphore for sp_group_idr and mm->sp_group_master */ @@ -942,7 +947,7 @@ static bool is_device_addr(unsigned long addr) { int i;
- for (i = 0; i < MAX_DEVID; i++) { + for (i = 0; i < sp_device_number; i++) { if (addr >= sp_dev_va_start[i] && addr < sp_dev_va_start[i] + sp_dev_va_size[i]) return true; @@ -960,7 +965,7 @@ static loff_t addr_offset(struct sp_area *spa) } addr = spa->va_start;
- if (!enable_sp_dev_addr || !is_device_addr(addr)) + if (!is_device_addr(addr)) return (loff_t)(addr - MMAP_SHARE_POOL_START);
return (loff_t)(addr - sp_dev_va_start[spa->node_id]); @@ -1621,13 +1626,13 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, int node_id = (flags >> DEVICE_ID_SHIFT) & DEVICE_ID_MASK;
if (!is_online_node_id(node_id) || - node_id < 0 || node_id >= MAX_DEVID) { + node_id < 0 || node_id >= sp_device_number) { pr_err_ratelimited("invalid numa node id %d\n", node_id); return ERR_PTR(-EINVAL); }
if ((flags & SP_DVPP)) { - if (!enable_sp_dev_addr) { + if (!is_sp_dev_addr_enabled(node_id)) { vstart = MMAP_SHARE_POOL_16G_START + node_id * MMAP_SHARE_POOL_16G_SIZE; vend = vstart + MMAP_SHARE_POOL_16G_SIZE; @@ -3635,13 +3640,15 @@ EXPORT_SYMBOL_GPL(sp_unregister_notifier); */ bool sp_config_dvpp_range(size_t start, size_t size, int device_id, int pid) { - if (!is_online_node_id(device_id) || device_id < 0 || device_id >= MAX_DEVID || - pid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE || enable_sp_dev_addr) + if (pid < 0 || + size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE || + device_id < 0 || device_id >= sp_device_number || + !is_online_node_id(device_id) || + is_sp_dev_addr_enabled(device_id)) return false;
sp_dev_va_start[device_id] = start; sp_dev_va_size[device_id] = size; - enable_sp_dev_addr = true; return true; } EXPORT_SYMBOL_GPL(sp_config_dvpp_range); @@ -3656,7 +3663,7 @@ static bool is_sp_normal_addr(unsigned long addr) { return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_16G_START + - MAX_DEVID * MMAP_SHARE_POOL_16G_SIZE; + sp_device_number * MMAP_SHARE_POOL_16G_SIZE; }
/** @@ -3667,9 +3674,6 @@ static bool is_sp_normal_addr(unsigned long addr) */ bool is_sharepool_addr(unsigned long addr) { - if (!enable_sp_dev_addr) - return is_sp_normal_addr(addr); - return is_sp_normal_addr(addr) || is_device_addr(addr); } EXPORT_SYMBOL_GPL(is_sharepool_addr); @@ -4504,6 +4508,18 @@ static int __init enable_share_pool(char *s) } __setup("enable_ascend_share_pool", enable_share_pool);
+static void __init sp_device_number_detect(void) +{ + /* NOTE: TO BE COMPLETED */ + sp_device_number = 4; + + if (sp_device_number > MAX_DEVID) { + pr_warn("sp_device_number %d exceed, truncate it to %d\n", + sp_device_number, MAX_DEVID); + sp_device_number = MAX_DEVID; + } +} + static int __init share_pool_init(void) { /* lockless, as init kthread has no sp operation else */ @@ -4512,6 +4528,8 @@ static int __init share_pool_init(void) if (IS_ERR(spg_none) || !spg_none) goto fail;
+ sp_device_number_detect(); + return 0; fail: pr_err("Ascend share pool initialization failed\n");
From: Wang Wensheng wangwensheng4@huawei.com
ascend inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SON8 CVE: NA
-------------------------------------------------
Device_id is used for DVPP to select the correct virtual address space and node_id is used to specify the node where we want to alloc physical memory from. Those two don't have to be the same in theory.
Actually, the process runs always on the numa nodes corresponding to the device the process used and the node with the same id as the device is always belongs the the device. So using device_id as node_id to alloc memory could work.
However the number of numa nodes belongs to a specified device is not always one and we cannot use other numa nodes of the device.
Here we introduce a new flag SP_SPEC_NODE_ID and add a bit-region in sp_flags for those who want to use other nodes belongs to a device. That is, if one want to specify the node_id, the new flag and the node_id should be both added to the sp_flags when calling sp_alloc() or sp_make_share_k2u(), otherwise the node with the same id as the device would be in use.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 19 ++++++++++++++++--- mm/share_pool.c | 28 +++++++++++++--------------- 2 files changed, 29 insertions(+), 18 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 04cadd9ae5f9d..669d32f9a0926 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -7,14 +7,27 @@ #include <linux/vmalloc.h> #include <linux/printk.h> #include <linux/hashtable.h> +#include <linux/numa.h>
#define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE_ONLY (1 << 1) #define SP_DVPP (1 << 2) -#define DEVICE_ID_MASK 0x3ff -#define DEVICE_ID_SHIFT 32 +#define SP_SPEC_NODE_ID (1 << 3) + +#define DEVICE_ID_BITS 4UL +#define DEVICE_ID_MASK ((1UL << DEVICE_ID_BITS) - 1UL) +#define DEVICE_ID_SHIFT 32UL +#define NODE_ID_BITS NODES_SHIFT +#define NODE_ID_MASK ((1UL << NODE_ID_BITS) - 1UL) +#define NODE_ID_SHIFT (DEVICE_ID_SHIFT + DEVICE_ID_BITS) + #define SP_FLAG_MASK (SP_HUGEPAGE | SP_HUGEPAGE_ONLY | SP_DVPP | \ - (_AC(DEVICE_ID_MASK, UL) << DEVICE_ID_SHIFT)) + SP_SPEC_NODE_ID | \ + (DEVICE_ID_MASK << DEVICE_ID_SHIFT) | \ + (NODE_ID_MASK << NODE_ID_SHIFT)) + +#define sp_flags_device_id(flags) (((flags) >> DEVICE_ID_SHIFT) & DEVICE_ID_MASK) +#define sp_flags_node_id(flags) (((flags) >> NODE_ID_SHIFT) & NODE_ID_MASK)
#define SPG_ID_NONE (-1) /* not associated with sp_group, only for specified thread */ #define SPG_ID_DEFAULT 0 /* use the spg id of current thread */ diff --git a/mm/share_pool.c b/mm/share_pool.c index ec59475ea4a91..400a702b30eb5 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -522,6 +522,7 @@ struct sp_area { unsigned long kva; /* shared kva */ pid_t applier; /* the original applier process */ int node_id; /* memory node */ + int device_id; }; static DEFINE_SPINLOCK(sp_area_lock); static struct rb_root sp_area_root = RB_ROOT; @@ -934,13 +935,7 @@ EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);
static bool is_online_node_id(int node_id) { - pg_data_t *pgdat; - - for_each_online_pgdat(pgdat) { - if (node_id == pgdat->node_id) - return true; - } - return false; + return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id); }
static bool is_device_addr(unsigned long addr) @@ -968,7 +963,7 @@ static loff_t addr_offset(struct sp_area *spa) if (!is_device_addr(addr)) return (loff_t)(addr - MMAP_SHARE_POOL_START);
- return (loff_t)(addr - sp_dev_va_start[spa->node_id]); + return (loff_t)(addr - sp_dev_va_start[spa->device_id]); }
static struct sp_group *create_spg(int spg_id) @@ -1623,22 +1618,24 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, unsigned long vend = MMAP_SHARE_POOL_16G_START; unsigned long addr; unsigned long size_align = PMD_ALIGN(size); /* va aligned to 2M */ - int node_id = (flags >> DEVICE_ID_SHIFT) & DEVICE_ID_MASK; + int device_id, node_id; + + device_id = sp_flags_device_id(flags); + node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;
- if (!is_online_node_id(node_id) || - node_id < 0 || node_id >= sp_device_number) { + if (!is_online_node_id(node_id)) { pr_err_ratelimited("invalid numa node id %d\n", node_id); return ERR_PTR(-EINVAL); }
if ((flags & SP_DVPP)) { - if (!is_sp_dev_addr_enabled(node_id)) { + if (!is_sp_dev_addr_enabled(device_id)) { vstart = MMAP_SHARE_POOL_16G_START + - node_id * MMAP_SHARE_POOL_16G_SIZE; + device_id * MMAP_SHARE_POOL_16G_SIZE; vend = vstart + MMAP_SHARE_POOL_16G_SIZE; } else { - vstart = sp_dev_va_start[node_id]; - vend = vstart + sp_dev_va_size[node_id]; + vstart = sp_dev_va_start[device_id]; + vend = vstart + sp_dev_va_size[device_id]; } }
@@ -1737,6 +1734,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, spa->kva = 0; /* NULL pointer */ spa->applier = applier; spa->node_id = node_id; + spa->device_id = device_id;
spa_inc_usage(spa); __insert_sp_area(spa);
From: Wang Wensheng wangwensheng4@huawei.com
ascend inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SON8 CVE: NA
-------------------------------------------------
We use device_id to select the correct dvpp vspace range when SP_DVPP flag is specified.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 400a702b30eb5..ef74de39053b2 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2856,10 +2856,11 @@ static int sp_k2u_prepare(unsigned long kva, unsigned long size,
trace_sp_k2u_begin(kc);
- if (sp_flags & ~SP_DVPP) { + if (sp_flags & ~SP_FLAG_MASK) { pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags); return -EINVAL; } + sp_flags &= ~SP_HUGEPAGE;
if (!current->mm) { pr_err_ratelimited("k2u: kthread is not allowed\n");
From: Yuan Can yuancan@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4S786 CVE: NA
-------------------------------------------------------
Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/char/svm.c | 157 +++++++++++++++++++++++++++++---------------- 1 file changed, 102 insertions(+), 55 deletions(-)
diff --git a/drivers/char/svm.c b/drivers/char/svm.c index dd0a9babcbee2..098b84529d0e5 100644 --- a/drivers/char/svm.c +++ b/drivers/char/svm.c @@ -146,6 +146,12 @@ struct spalloc { unsigned long flag; };
+struct addr_trans_args { + unsigned long vptr; + unsigned long *pptr; + unsigned int device_id; +}; + static struct bus_type svm_bus_type = { .name = "svm_bus", }; @@ -225,16 +231,19 @@ struct svm_va2pa_trunk { int slot_used; unsigned long *bitmap; struct mutex mutex; + phys_addr_t base; + unsigned long size; };
-struct svm_va2pa_trunk va2pa_trunk; - #define SVM_VA2PA_TRUNK_SIZE_MAX 0x3200000 #define SVM_VA2PA_MEMORY_ALIGN 64 #define SVM_VA2PA_SLOT_SIZE sizeof(struct svm_va2pa_slot) #define SVM_VA2PA_TYPE_DMA 0x1 #define SVM_MEM_REG "va2pa trunk" #define SVM_VA2PA_CLEAN_BATCH_NUM 0x80 +#define SVM_VA2PA_TRUNK_COUNT_MAX 0x8 + +static struct svm_va2pa_trunk va2pa_trunk[SVM_VA2PA_TRUNK_COUNT_MAX];
struct device_node *svm_find_mem_reg_node(struct device *dev, const char *compat) { @@ -256,9 +265,9 @@ struct device_node *svm_find_mem_reg_node(struct device *dev, const char *compat return NULL; }
-static int svm_parse_trunk_memory(struct device *dev, phys_addr_t *base, unsigned long *size) +static int svm_parse_trunk_memory(struct device *dev) { - int err; + int err, count; struct resource r; struct device_node *trunk = NULL;
@@ -268,24 +277,30 @@ static int svm_parse_trunk_memory(struct device *dev, phys_addr_t *base, unsigne return -EINVAL; }
- err = of_address_to_resource(trunk, 0, &r); - of_node_put(trunk); - if (err) { - dev_err(dev, "Couldn't address to resource for reserved memory\n"); - return -ENOMEM; + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + err = of_address_to_resource(trunk, count, &r); + if (err) + break; + + va2pa_trunk[count].base = r.start; + va2pa_trunk[count].size = resource_size(&r); }
- *base = r.start; - *size = resource_size(&r); + if (!count) { + dev_err(dev, "Couldn't address to resource for reserved memory\n"); + return -ENODEV; + }
return 0; }
-static int svm_setup_trunk(struct device *dev, phys_addr_t base, unsigned long size) +static int __svm_setup_trunk(struct device *dev, struct svm_va2pa_trunk *trunk) { int slot_total; unsigned long *bitmap = NULL; struct svm_va2pa_slot *slot = NULL; + phys_addr_t base = trunk->base; + unsigned long size = trunk->size;
if (!IS_ALIGNED(base, SVM_VA2PA_MEMORY_ALIGN)) { dev_err(dev, "Didn't aligned to %u\n", SVM_VA2PA_MEMORY_ALIGN); @@ -314,76 +329,100 @@ static int svm_setup_trunk(struct device *dev, phys_addr_t base, unsigned long s return -ENXIO; }
- va2pa_trunk.slots = slot; - va2pa_trunk.slot_used = 0; - va2pa_trunk.slot_total = slot_total; - va2pa_trunk.bitmap = bitmap; - mutex_init(&va2pa_trunk.mutex); + trunk->slots = slot; + trunk->slot_used = 0; + trunk->slot_total = slot_total; + trunk->bitmap = bitmap; + mutex_init(&trunk->mutex);
return 0; }
+static int svm_setup_trunk(struct device *dev) +{ + int err = 0; + int count; + + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + if (!va2pa_trunk[count].base) + break; + + err = __svm_setup_trunk(dev, &va2pa_trunk[count]); + if (err) + break; + } + + return err; +} + static void svm_remove_trunk(struct device *dev) { - iounmap(va2pa_trunk.slots); - kvfree(va2pa_trunk.bitmap); + int count; + + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + if (!va2pa_trunk[count].base) + break;
- va2pa_trunk.slots = NULL; - va2pa_trunk.bitmap = NULL; + iounmap(va2pa_trunk[count].slots); + kvfree(va2pa_trunk[count].bitmap); + va2pa_trunk[count].slots = NULL; + va2pa_trunk[count].bitmap = NULL; + } }
-static void svm_set_slot_valid(unsigned long index, unsigned long phys, unsigned long len) +static void svm_set_slot_valid(struct svm_va2pa_trunk *trunk, unsigned long index, + unsigned long phys, unsigned long len) { - struct svm_va2pa_slot *slot = &va2pa_trunk.slots[index]; + struct svm_va2pa_slot *slot = &trunk->slots[index];
slot->phys = phys; slot->len = len; slot->image_word = SVM_IMAGE_WORD_VALID; slot->pid = current->tgid; slot->data_type = SVM_VA2PA_TYPE_DMA; - __bitmap_set(va2pa_trunk.bitmap, index, 1); - va2pa_trunk.slot_used++; + __bitmap_set(trunk->bitmap, index, 1); + trunk->slot_used++; }
-static void svm_set_slot_init(unsigned long index) +static void svm_set_slot_init(struct svm_va2pa_trunk *trunk, unsigned long index) { - struct svm_va2pa_slot *slot = &va2pa_trunk.slots[index]; + struct svm_va2pa_slot *slot = &trunk->slots[index];
slot->image_word = SVM_IMAGE_WORD_INIT; - __bitmap_clear(va2pa_trunk.bitmap, index, 1); - va2pa_trunk.slot_used--; + __bitmap_clear(trunk->bitmap, index, 1); + trunk->slot_used--; }
-static void svm_clean_done_slots(void) +static void svm_clean_done_slots(struct svm_va2pa_trunk *trunk) { - int used = va2pa_trunk.slot_used; + int used = trunk->slot_used; int count = 0; long temp = -1; phys_addr_t addr; - unsigned long *bitmap = va2pa_trunk.bitmap; + unsigned long *bitmap = trunk->bitmap;
for (; count < used && count < SVM_VA2PA_CLEAN_BATCH_NUM;) { - temp = find_next_bit(bitmap, va2pa_trunk.slot_total, temp + 1); - if (temp == va2pa_trunk.slot_total) + temp = find_next_bit(bitmap, trunk->slot_total, temp + 1); + if (temp == trunk->slot_total) break;
count++; - if (va2pa_trunk.slots[temp].image_word != SVM_IMAGE_WORD_DONE) + if (trunk->slots[temp].image_word != SVM_IMAGE_WORD_DONE) continue;
- addr = (phys_addr_t)va2pa_trunk.slots[temp].phys; + addr = (phys_addr_t)trunk->slots[temp].phys; put_page(pfn_to_page(PHYS_PFN(addr))); - svm_set_slot_init(temp); + svm_set_slot_init(trunk, temp); } }
-static int svm_find_slot_init(unsigned long *index) +static int svm_find_slot_init(struct svm_va2pa_trunk *trunk, unsigned long *index) { int temp; - unsigned long *bitmap = va2pa_trunk.bitmap; + unsigned long *bitmap = trunk->bitmap;
- temp = find_first_zero_bit(bitmap, va2pa_trunk.slot_total); - if (temp == va2pa_trunk.slot_total) + temp = find_first_zero_bit(bitmap, trunk->slot_total); + if (temp == trunk->slot_total) return -ENOSPC;
*index = temp; @@ -393,14 +432,14 @@ static int svm_find_slot_init(unsigned long *index) static int svm_va2pa_trunk_init(struct device *dev) { int err; - phys_addr_t base; - unsigned long size;
- err = svm_parse_trunk_memory(dev, &base, &size); + memset(va2pa_trunk, 0, sizeof(va2pa_trunk)); + + err = svm_parse_trunk_memory(dev); if (err) return err;
- err = svm_setup_trunk(dev, base, size); + err = svm_setup_trunk(dev); if (err) return err;
@@ -1361,17 +1400,21 @@ static int svm_get_phys(unsigned long __user *arg) pte_t pte; unsigned long index = 0; struct page *page; + struct addr_trans_args args; unsigned long addr, phys, offset; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; unsigned long len; + unsigned int trunk_id; + struct svm_va2pa_trunk *trunk;
if (!acpi_disabled) return -EPERM;
- if (get_user(addr, arg)) + if (copy_from_user(&args, (void __user *)arg, sizeof(args))) return -EFAULT;
+ addr = args.vptr; down_read(&mm->mmap_sem); ptep = svm_walk_pt(addr, NULL, &offset); if (!ptep) { @@ -1398,30 +1441,34 @@ static int svm_get_phys(unsigned long __user *arg)
up_read(&mm->mmap_sem);
- mutex_lock(&va2pa_trunk.mutex); - svm_clean_done_slots(); - if (va2pa_trunk.slot_used == va2pa_trunk.slot_total) { + trunk_id = args.device_id; + if (trunk_id >= SVM_VA2PA_TRUNK_COUNT_MAX) + return -EINVAL; + trunk = &va2pa_trunk[trunk_id]; + mutex_lock(&trunk->mutex); + svm_clean_done_slots(trunk); + if (trunk->slot_used == trunk->slot_total) { err = -ENOSPC; goto err_mutex_unlock; }
- err = svm_find_slot_init(&index); + err = svm_find_slot_init(trunk, &index); if (err) goto err_mutex_unlock;
- svm_set_slot_valid(index, phys, len); + svm_set_slot_valid(trunk, index, phys, len);
- err = put_user(index * SVM_VA2PA_SLOT_SIZE, (unsigned long __user *)arg); + err = put_user(index * SVM_VA2PA_SLOT_SIZE, (unsigned long __user *)args.pptr); if (err) goto err_slot_init;
- mutex_unlock(&va2pa_trunk.mutex); + mutex_unlock(&trunk->mutex); return 0;
err_slot_init: - svm_set_slot_init(index); + svm_set_slot_init(trunk, index); err_mutex_unlock: - mutex_unlock(&va2pa_trunk.mutex); + mutex_unlock(&trunk->mutex); put_page(page); return err; }