This series backports mainline bugfixes for audit.
Jan Kara (3): audit_tree: Remove mark->lock locking audit: Fix possible spurious -ENOSPC error audit: Fix possible tagging failures
kernel/audit_tree.c | 61 ++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 34 deletions(-)
From: Jan Kara jack@suse.cz
mainline inclusion from mainline-v5.0-rc1 commit 9f16d2e6241b2fc664523f17d74adda7489f496b category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8X7IV CVE: NA
Reference: https://lore.kernel.org/all/20181017101505.25881-2-jack@suse.cz/
--------------------------------
Currently, audit_tree code uses mark->lock to protect against detaching of mark from an inode. In most places it however also uses mark->group->mark_mutex (as we need to atomically replace attached marks) and this provides protection against mark detaching as well. So just remove protection with mark->lock from audit tree code and replace it with mark->group->mark_mutex protection in all the places. It simplifies the code and gets rid of some ugly catches like calling fsnotify_add_mark_locked() with mark->lock held (which cannot sleep only because we hold a reference to another mark attached to the same inode).
Reviewed-by: Richard Guy Briggs rgb@redhat.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Paul Moore paul@paul-moore.com Conflicts: kernel/audit_tree.c Signed-off-by: GUO Zihua guozihua@huawei.com --- kernel/audit_tree.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 04f59dfd3e71..92c34f55f27f 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -179,7 +179,7 @@ static inline struct list_head *chunk_hash(unsigned long key) return chunk_hash_heads + n % HASH_SIZE; }
-/* hash_lock & entry->lock is held by caller */ +/* hash_lock & entry->group->mark_mutex is held by caller */ static void insert_hash(struct audit_chunk *chunk) { struct list_head *list; @@ -242,13 +242,11 @@ static void untag_chunk(struct node *p) new = alloc_chunk(size);
mutex_lock(&entry->group->mark_mutex); - spin_lock(&entry->lock); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); if (new) fsnotify_put_mark(&new->mark); @@ -266,7 +264,6 @@ static void untag_chunk(struct node *p) list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); goto out; @@ -310,7 +307,6 @@ static void untag_chunk(struct node *p) list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(&new->mark); /* drop initial reference */ @@ -327,7 +323,6 @@ static void untag_chunk(struct node *p) p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); mutex_unlock(&entry->group->mark_mutex); out: fsnotify_put_mark(entry); @@ -347,12 +342,12 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) return -ENOSPC; }
- spin_lock(&entry->lock); + mutex_lock(&entry->group->mark_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(entry); return 0; @@ -368,7 +363,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) chunk->key = inode_to_key(inode); insert_hash(chunk); spin_unlock(&hash_lock); - spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -409,14 +404,12 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk_entry = &chunk->mark;
mutex_lock(&old_entry->group->mark_mutex); - spin_lock(&old_entry->lock); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { /* old_entry is being shot, lets just lie */ - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(old_entry); fsnotify_put_mark(&chunk->mark); @@ -425,23 +418,16 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, FSNOTIFY_OBJ_TYPE_INODE, 1)) { - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; }
- /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ - spin_lock(&chunk_entry->lock); spin_lock(&hash_lock); - - /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - spin_unlock(&chunk_entry->lock); - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_destroy_mark(chunk_entry, audit_tree_group); @@ -474,8 +460,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); - spin_unlock(&chunk_entry->lock); - spin_unlock(&old_entry->lock); mutex_unlock(&old_entry->group->mark_mutex); fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */
From: Jan Kara jack@suse.cz
mainline inclusion from mainline-v5.0-rc1 commit a5789b07b35aa56569dff762bfc063303a9ccb95 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8X7IV CVE: NA
Reference: https://lore.kernel.org/all/20181017101505.25881-3-jack@suse.cz/
--------------------------------
When an inode is tagged with a tree, tag_chunk() checks whether there is audit_tree_group mark attached to the inode and adds one if not. However nothing protects another tag_chunk() to add the mark between we've checked and try to add the fsnotify mark thus resulting in an error from fsnotify_add_mark() and consequently an ENOSPC error from tag_chunk().
Fix the problem by holding mark_mutex over the whole check-insert code sequence.
Reviewed-by: Richard Guy Briggs rgb@redhat.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Paul Moore paul@paul-moore.com Conflicts: kernel/audit_tree.c Signed-off-by: GUO Zihua guozihua@huawei.com --- kernel/audit_tree.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 92c34f55f27f..7a252b986524 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -329,25 +329,29 @@ static void untag_chunk(struct node *p) spin_lock(&hash_lock); }
+/* Call with group->mark_mutex held, releases it */ static int create_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *entry; struct audit_chunk *chunk = alloc_chunk(1); - if (!chunk) + + if (!chunk) { + mutex_unlock(&audit_tree_group->mark_mutex); return -ENOMEM; + }
entry = &chunk->mark; - if (fsnotify_add_inode_mark(entry, inode, 0)) { + if (fsnotify_add_inode_mark_locked(entry, inode, 0)) { + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(entry); return -ENOSPC; }
- mutex_lock(&entry->group->mark_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - mutex_unlock(&entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(entry); return 0; @@ -363,7 +367,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) chunk->key = inode_to_key(inode); insert_hash(chunk); spin_unlock(&hash_lock); - mutex_unlock(&entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -377,6 +381,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) struct node *p; int n;
+ mutex_lock(&audit_tree_group->mark_mutex); old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group); if (!old_entry) @@ -389,6 +394,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); return 0; } @@ -397,20 +403,20 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1); if (!chunk) { + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); return -ENOMEM; }
chunk_entry = &chunk->mark;
- mutex_lock(&old_entry->group->mark_mutex); /* * mark_mutex protects mark from getting detached and thus also from * mark->connector->obj getting NULL. */ if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { /* old_entry is being shot, lets just lie */ - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(old_entry); fsnotify_put_mark(&chunk->mark); return -ENOENT; @@ -418,7 +424,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, FSNOTIFY_OBJ_TYPE_INODE, 1)) { - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; @@ -428,7 +434,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex);
fsnotify_destroy_mark(chunk_entry, audit_tree_group);
@@ -460,7 +466,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); - mutex_unlock(&old_entry->group->mark_mutex); + mutex_unlock(&audit_tree_group->mark_mutex); fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
From: Jan Kara jack@suse.cz
mainline inclusion from mainline-v5.0-rc1 commit b1e4603b92d8aef8776e5673dc13fedb68d32ea4 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8X8BT CVE: NA
Reference: https://lore.kernel.org/all/20181017101505.25881-4-jack@suse.cz/
--------------------------------
Audit tree code is replacing marks attached to inodes in non-atomic way. Thus fsnotify_find_mark() in tag_chunk() may find a mark that belongs to a chunk that is no longer valid one and will soon be destroyed. Tags added to such chunk will be simply lost.
Fix the problem by making sure old mark is marked as going away (through fsnotify_detach_mark()) before dropping mark_mutex and thus in an atomic way wrt tag_chunk(). Note that this does not fix the problem completely as if tag_chunk() finds a mark that is going away, it fails with -ENOENT. But at least the failure is not silent and currently there's no way to search for another fsnotify mark attached to the inode. We'll fix this problem in later patch.
Reviewed-by: Richard Guy Briggs rgb@redhat.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Paul Moore paul@paul-moore.com Signed-off-by: GUO Zihua guozihua@huawei.com --- kernel/audit_tree.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 7a252b986524..bac5dd90c629 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -264,8 +264,9 @@ static void untag_chunk(struct node *p) list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); + fsnotify_detach_mark(entry); mutex_unlock(&entry->group->mark_mutex); - fsnotify_destroy_mark(entry, audit_tree_group); + fsnotify_free_mark(entry); goto out; }
@@ -307,8 +308,9 @@ static void untag_chunk(struct node *p) list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); + fsnotify_detach_mark(entry); mutex_unlock(&entry->group->mark_mutex); - fsnotify_destroy_mark(entry, audit_tree_group); + fsnotify_free_mark(entry); fsnotify_put_mark(&new->mark); /* drop initial reference */ goto out;
@@ -351,8 +353,9 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; + fsnotify_detach_mark(entry); mutex_unlock(&audit_tree_group->mark_mutex); - fsnotify_destroy_mark(entry, audit_tree_group); + fsnotify_free_mark(entry); fsnotify_put_mark(entry); return 0; } @@ -434,10 +437,9 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; + fsnotify_detach_mark(chunk_entry); mutex_unlock(&audit_tree_group->mark_mutex); - - fsnotify_destroy_mark(chunk_entry, audit_tree_group); - + fsnotify_free_mark(chunk_entry); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return 0; @@ -466,8 +468,9 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); + fsnotify_detach_mark(old_entry); mutex_unlock(&audit_tree_group->mark_mutex); - fsnotify_destroy_mark(old_entry, audit_tree_group); + fsnotify_free_mark(old_entry); fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ return 0;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,转换为PR失败! 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/F... 失败原因:应用补丁/补丁集失败,Patch failed at 0001 audit_tree: Remove mark->lock locking 建议解决方法:请查看失败原因, 确认补丁是否可以应用在当前期望分支的最新代码上
FeedBack: The patch(es) which you have sent to kernel@openeuler.org has been converted to PR failed! Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/F... Failed Reason: apply patch(es) failed, Patch failed at 0001 audit_tree: Remove mark->lock locking Suggest Solution: please checkout if the failed patch(es) can work on the newest codes in expected branch