From: Jason Gunthorpe jgg@mellanox.com
stable inclusion from linux-4.19.115 commit abc4ea7f1345398261295345fd9b30243e4f4f8e
prepare for fixing CVE-2020-36385 --------------------------------
commit 7c11910783a1ea17e88777552ef146cace607b3c upstream.
The rdma_cm must be used single threaded.
This appears to be a bug in the design, as it does have lots of locking that seems like it should allow concurrency. However, when it is all said and done every single place that uses the cma_exch() scheme is broken, and all the unlocked reads from the ucma of the cm_id data are wrong too.
syzkaller has been finding endless bugs related to this.
Fixing this in any elegant way is some enormous amount of work. Take a very big hammer and put a mutex around everything to do with the ucma_context at the top of every syscall.
Fixes: 75216638572f ("RDMA/cma: Export rdma cm interface to userspace") Link: https://lore.kernel.org/r/20200218210432.GA31966@ziepe.ca Reported-by: syzbot+adb15cf8c2798e4e0db4@syzkaller.appspotmail.com Reported-by: syzbot+e5579222b6a3edd96522@syzkaller.appspotmail.com Reported-by: syzbot+4b628fcc748474003457@syzkaller.appspotmail.com Reported-by: syzbot+29ee8f76017ce6cf03da@syzkaller.appspotmail.com Reported-by: syzbot+6956235342b7317ec564@syzkaller.appspotmail.com Reported-by: syzbot+b358909d8d01556b790b@syzkaller.appspotmail.com Reported-by: syzbot+6b46b135602a3f3ac99e@syzkaller.appspotmail.com Reported-by: syzbot+8458d13b13562abf6b77@syzkaller.appspotmail.com Reported-by: syzbot+bd034f3fdc0402e942ed@syzkaller.appspotmail.com Reported-by: syzbot+c92378b32760a4eef756@syzkaller.appspotmail.com Reported-by: syzbot+68b44a1597636e0b342c@syzkaller.appspotmail.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 01d68ed46c1b6..2acc30c3d5b2d 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -89,6 +89,7 @@ struct ucma_context {
struct ucma_file *file; struct rdma_cm_id *cm_id; + struct mutex mutex; u64 uid;
struct list_head list; @@ -215,6 +216,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; + mutex_init(&ctx->mutex);
mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); @@ -596,6 +598,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) }
events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } @@ -665,7 +668,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); + ucma_put_ctx(ctx); return ret; } @@ -688,7 +694,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -712,8 +720,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -738,8 +748,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -759,7 +771,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -848,6 +862,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? @@ -871,6 +886,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out: + mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; @@ -1022,6 +1038,7 @@ static ssize_t ucma_query(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); @@ -1036,6 +1053,7 @@ static ssize_t ucma_query(struct ucma_file *file, ret = -ENOSYS; break; } + mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); return ret; @@ -1076,7 +1094,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); + mutex_lock(&ctx->mutex); ret = rdma_connect(ctx->cm_id, &conn_param); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1097,7 +1117,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; + mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, ctx->backlog); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1120,13 +1142,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); + mutex_unlock(&ctx->mutex); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); - } else + } else { + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, NULL, NULL); - + mutex_unlock(&ctx->mutex); + } ucma_put_ctx(ctx); return ret; } @@ -1145,7 +1171,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1164,7 +1192,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1195,7 +1225,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; + mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); + mutex_unlock(&ctx->mutex); if (ret) goto out;
@@ -1274,9 +1306,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path); + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); + mutex_unlock(&ctx->mutex); } else { + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); + mutex_unlock(&ctx->mutex); } if (ret) return ret; @@ -1309,7 +1345,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) { case RDMA_OPTION_ID: + mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); + mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); @@ -1369,8 +1407,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); return ret; @@ -1413,8 +1453,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); + mutex_unlock(&ctx->mutex); if (ret) goto err2;
@@ -1518,7 +1560,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, goto out; }
+ mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&mc->ctx->mutex); + mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list);
From: Jason Gunthorpe jgg@nvidia.com
stable inclusion from linux-4.19.153 commit c62998f154356a6708070538ed4221f18ff6be02
prepare for fixing CVE-2020-36385 --------------------------------
[ Upstream commit 98837c6c3d7285f6eca86480b6f7fac6880e27a8 ]
This value is locked under the file->mut, ensure it is held whenever touching it.
The case in ucma_migrate_id() is a race, while in ucma_free_uctx() it is already not possible for the write side to run, the movement is just for clarity.
Fixes: 88314e4dda1e ("RDMA/cma: add support for rdma_migrate_id()") Link: https://lore.kernel.org/r/20200818120526.702120-10-leon@kernel.org Signed-off-by: Leon Romanovsky leonro@mellanox.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/core/ucma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2acc30c3d5b2d..0c095c8c0ac5b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -588,6 +588,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); + events_reported = ctx->events_reported; mutex_unlock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &list, list) { @@ -597,7 +598,6 @@ static int ucma_free_ctx(struct ucma_context *ctx) kfree(uevent); }
- events_reported = ctx->events_reported; mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; @@ -1644,7 +1644,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
cur_file = ctx->file; if (cur_file == new_file) { + mutex_lock(&cur_file->mut); resp.events_reported = ctx->events_reported; + mutex_unlock(&cur_file->mut); goto response; }
From: Jason Gunthorpe jgg@nvidia.com
stable inclusion from linux-4.19.153 commit 0beef934d50822b1cb884f1e7bb115fe0a36a974
prepare for fixing CVE-2020-36385 --------------------------------
[ Upstream commit 38e03d092699891c3237b5aee9e8029d4ede0956 ]
All entry points to the rdma_cm from a ULP must be single threaded, even this error unwinds. Add the missing locking.
Fixes: 7c11910783a1 ("RDMA/ucma: Put a lock around every call to the rdma_cm layer") Link: https://lore.kernel.org/r/20200818120526.702120-11-leon@kernel.org Signed-off-by: Leon Romanovsky leonro@mellanox.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/core/ucma.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 0c095c8c0ac5b..01052de6bedbf 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1476,7 +1476,9 @@ static ssize_t ucma_process_join(struct ucma_file *file, return 0;
err3: + mutex_lock(&ctx->mutex); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err2: mutex_lock(&mut);
From: Jason Gunthorpe jgg@nvidia.com
mainline inclusion from mainline-v5.10-rc1 commit f5449e74802c1112dea984aec8af7a33c4516af1 category: bugfix bugzilla: NA CVE: CVE-2020-36385
--------------------------------
ucma_destroy_id() assumes that all things accessing the ctx will do so via the xarray. This assumption violated only in the case the FD is being closed, then the ctx is reached via the ctx_list. Normally this is OK since ucma_destroy_id() cannot run concurrenty with release(), however with ucma_migrate_id() is involved this can violated as the close of the 2nd FD can run concurrently with destroy on the first:
CPU0 CPU1 ucma_destroy_id(fda) ucma_migrate_id(fda -> fdb) ucma_get_ctx() xa_lock() _ucma_find_context() xa_erase() xa_unlock() xa_lock() ctx->file = new_file list_move() xa_unlock() ucma_put_ctx()
ucma_close(fdb) _destroy_id() kfree(ctx)
_destroy_id() wait_for_completion() // boom, ctx was freed
The ctx->file must be modified under the handler and xa_lock, and prior to modification the ID must be rechecked that it is still reachable from cur_file, ie there is no parallel destroy or migrate.
To make this work remove the double locking and streamline the control flow. The double locking was obsoleted by the handler lock now directly preventing new uevents from being created, and the ctx_list cannot be read while holding fgets on both files. Removing the double locking also removes the need to check for the same file.
Fixes: 88314e4dda1e ("RDMA/cma: add support for rdma_migrate_id()") Link: https://lore.kernel.org/r/0-v1-05c5a4090305+3a872-ucma_syz_migrate_jgg@nvidi... Reported-and-tested-by: syzbot+cc6fc752b3819e082d0c@syzkaller.appspotmail.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/core/ucma.c | 78 +++++++++++++--------------------- 1 file changed, 29 insertions(+), 49 deletions(-)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 01052de6bedbf..74086abd39316 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1582,45 +1582,15 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, return ret; }
-static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) -{ - /* Acquire mutex's based on pointer comparison to prevent deadlock. */ - if (file1 < file2) { - mutex_lock(&file1->mut); - mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); - } else { - mutex_lock(&file2->mut); - mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); - } -} - -static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) -{ - if (file1 < file2) { - mutex_unlock(&file2->mut); - mutex_unlock(&file1->mut); - } else { - mutex_unlock(&file1->mut); - mutex_unlock(&file2->mut); - } -} - -static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) -{ - struct ucma_event *uevent, *tmp; - - list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) - if (uevent->ctx == ctx) - list_move_tail(&uevent->list, &file->event_list); -} - static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; + struct ucma_event *uevent, *tmp; struct ucma_context *ctx; + LIST_HEAD(event_list); struct fd f; struct ucma_file *cur_file; int ret = 0; @@ -1636,42 +1606,52 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, ret = -EINVAL; goto file_put; } + cur_file = f.file->private_data;
/* Validate current fd and prevent destruction of id. */ - ctx = ucma_get_ctx(f.file->private_data, cmd.id); + ctx = ucma_get_ctx(cur_file, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; }
- cur_file = ctx->file; - if (cur_file == new_file) { - mutex_lock(&cur_file->mut); - resp.events_reported = ctx->events_reported; - mutex_unlock(&cur_file->mut); - goto response; - } - /* - * Migrate events between fd's, maintaining order, and avoiding new - * events being added before existing events. + * ctx->file can only be changed under the handler & xa_lock. xa_load() + * must be checked again to ensure the ctx hasn't begun destruction + * since the ucma_get_ctx(). */ - ucma_lock_files(cur_file, new_file); mutex_lock(&mut);
- list_move_tail(&ctx->list, &new_file->ctx_list); - ucma_move_events(ctx, new_file); + if (_ucma_find_context(cmd.id, cur_file) != ctx) { + mutex_unlock(&mut); + ret = -ENOENT; + goto err_unlock; + } ctx->file = new_file; + mutex_unlock(&mut); + + mutex_lock(&cur_file->mut); + list_del(&ctx->list); + /* + * At this point lock_handler() prevents addition of new uevents for + * this ctx. + */ + list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list) + if (uevent->ctx == ctx) + list_move_tail(&uevent->list, &event_list); resp.events_reported = ctx->events_reported; + mutex_unlock(&cur_file->mut);
- mutex_unlock(&mut); - ucma_unlock_files(cur_file, new_file); + mutex_lock(&new_file->mut); + list_add_tail(&ctx->list, &new_file->ctx_list); + list_splice_tail(&event_list, &new_file->event_list); + mutex_unlock(&new_file->mut);
-response: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT;
+err_unlock: ucma_put_ctx(ctx); file_put: fdput(f);