From: Jan Kara jack@suse.cz
mainline inclusion from mainline-v5.0-rc1 commit a5789b07b35aa56569dff762bfc063303a9ccb95 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8X7IV CVE: NA
Reference: https://lore.kernel.org/all/20181017101505.25881-3-jack@suse.cz/
--------------------------------
When an inode is tagged with a tree, tag_chunk() checks whether there is audit_tree_group mark attached to the inode and adds one if not. However nothing protects another tag_chunk() to add the mark between we've checked and try to add the fsnotify mark thus resulting in an error from fsnotify_add_mark() and consequently an ENOSPC error from tag_chunk().
Fix the problem by holding mark_mutex over the whole check-insert code sequence.
Reviewed-by: Richard Guy Briggs rgb@redhat.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Paul Moore paul@paul-moore.com Conflicts: kernel/audit_tree.c Signed-off-by: GUO Zihua guozihua@huawei.com --- kernel/audit_tree.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 92c34f55f27f..7a252b986524 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -329,25 +329,29 @@ static void untag_chunk(struct node *p) spin_lock(&hash_lock); }
+/* Call with group->mark_mutex held, releases it */ static int create_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *entry; struct audit_chunk *chunk = alloc_chunk(1); - if (!chunk) + + if (!chunk) { + mutex_unlock(&audit_tree_group->mark_mutex); return -ENOMEM; + }
entry = &chunk->mark; - if (fsnotify_add_inode_mark(entry, inode, 0)) { + if (fsnotify_add_inode_mark_locked(entry, inode, 0)) { + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(entry); return -ENOSPC; }
- mutex_lock(&entry->group->mark_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - mutex_unlock(&entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(entry); return 0; @@ -363,7 +367,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) chunk->key = inode_to_key(inode); insert_hash(chunk); spin_unlock(&hash_lock); - mutex_unlock(&entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -377,6 +381,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) struct node *p; int n;
+ mutex_lock(&audit_tree_group->mark_mutex); old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group); if (!old_entry) @@ -389,6 +394,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); return 0; } @@ -397,20 +403,20 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1); if (!chunk) { + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); return -ENOMEM; }
chunk_entry = &chunk->mark;
- mutex_lock(&old_entry->group->mark_mutex); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { /* old_entry is being shot, lets just lie */ - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); fsnotify_put_mark(&chunk->mark); return -ENOENT; @@ -418,7 +424,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, FSNOTIFY_OBJ_TYPE_INODE, 1)) { - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; @@ -428,7 +434,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex);
fsnotify_destroy_mark(chunk_entry, audit_tree_group);
@@ -460,7 +466,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */