From: Jan Kara jack@suse.cz
mainline inclusion from mainline-v5.0-rc1 commit 9f16d2e6241b2fc664523f17d74adda7489f496b category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8X7IV CVE: NA
Reference: https://lore.kernel.org/all/20181017101505.25881-2-jack@suse.cz/
--------------------------------
Currently, audit_tree code uses mark->lock to protect against detaching of mark from an inode. In most places it however also uses mark->group->mark_mutex (as we need to atomically replace attached marks) and this provides protection against mark detaching as well. So just remove protection with mark->lock from audit tree code and replace it with mark->group->mark_mutex protection in all the places. It simplifies the code and gets rid of some ugly catches like calling fsnotify_add_mark_locked() with mark->lock held (which cannot sleep only because we hold a reference to another mark attached to the same inode).
Reviewed-by: Richard Guy Briggs rgb@redhat.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Paul Moore paul@paul-moore.com Conflicts: kernel/audit_tree.c Signed-off-by: GUO Zihua guozihua@huawei.com --- kernel/audit_tree.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 04f59dfd3e71..92c34f55f27f 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -179,7 +179,7 @@ static inline struct list_head *chunk_hash(unsigned long key) return chunk_hash_heads + n % HASH_SIZE; }
-/* hash_lock & entry->lock is held by caller */ +/* hash_lock & entry->group->mark_mutex is held by caller */ static void insert_hash(struct audit_chunk *chunk) { struct list_head *list; @@ -242,13 +242,11 @@ static void untag_chunk(struct node *p) new = alloc_chunk(size);
mutex_lock(&entry->group->mark_mutex); - spin_lock(&entry->lock); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); if (new) fsnotify_put_mark(&new->mark); @@ -266,7 +264,6 @@ static void untag_chunk(struct node *p) list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); goto out; @@ -310,7 +307,6 @@ static void untag_chunk(struct node *p) list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(&new->mark); /* drop initial reference */ @@ -327,7 +323,6 @@ static void untag_chunk(struct node *p) p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); out: fsnotify_put_mark(entry); @@ -347,12 +342,12 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) return -ENOSPC; }
- spin_lock(&entry->lock); + mutex_lock(&entry->group->mark_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(entry); return 0; @@ -368,7 +363,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) chunk->key = inode_to_key(inode); insert_hash(chunk); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -409,14 +404,12 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk_entry = &chunk->mark;
mutex_lock(&old_entry->group->mark_mutex); - spin_lock(&old_entry->lock); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { /* old_entry is being shot, lets just lie */ - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(old_entry); fsnotify_put_mark(&chunk->mark); @@ -425,23 +418,16 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, FSNOTIFY_OBJ_TYPE_INODE, 1)) { - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; }
- /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ - spin_lock(&chunk_entry->lock); spin_lock(&hash_lock); - - /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - spin_unlock(&chunk_entry->lock); - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_destroy_mark(chunk_entry, audit_tree_group); @@ -474,8 +460,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); - spin_unlock(&chunk_entry->lock); - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */