From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 185863 CVE: NA ---------------------------
Our test report a uaf problem:
[ 154.237639] ================================================================== [ 154.239896] BUG: KASAN: use-after-free in __bfq_deactivate_entity+0x25/0x290 [ 154.241910] Read of size 1 at addr ffff88824501f7b8 by task rmmod/2447
[ 154.244248] CPU: 7 PID: 2447 Comm: rmmod Not tainted 4.19.90+ #1 [ 154.245962] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 [ 154.248184] Call Trace: [ 154.248532] dump_stack+0x7a/0xac [ 154.248995] print_address_description+0x6c/0x237 [ 154.249649] ? __bfq_deactivate_entity+0x25/0x290 [ 154.250297] kasan_report.cold+0x88/0x29c [ 154.250853] __bfq_deactivate_entity+0x25/0x290 [ 154.251483] bfq_pd_offline+0x13e/0x790 [ 154.252017] ? blk_mq_freeze_queue_wait+0x165/0x180 [ 154.252687] ? bfq_reparent_leaf_entity+0xa0/0xa0 [ 154.253333] ? bfq_put_queue+0x12c/0x1e0 [ 154.253877] ? kmem_cache_free+0x8e/0x1e0 [ 154.254433] ? hrtimer_active+0x53/0xa0 [ 154.254966] ? hrtimer_try_to_cancel+0x6d/0x1c0 [ 154.255576] ? __hrtimer_get_remaining+0xf0/0xf0 [ 154.256197] ? __bfq_deactivate_entity+0x11b/0x290 [ 154.256843] blkcg_deactivate_policy+0x106/0x1f0 [ 154.257464] bfq_exit_queue+0xf1/0x110 [ 154.257975] blk_mq_exit_sched+0x114/0x140 [ 154.258530] elevator_exit+0x9a/0xa0 [ 154.259023] blk_exit_queue+0x3d/0x70 [ 154.259523] blk_cleanup_queue+0x160/0x1e0 [ 154.260099] null_del_dev+0xda/0x1f0 [null_blk] [ 154.260723] null_exit+0x5f/0xab [null_blk] [ 154.261298] __x64_sys_delete_module+0x20e/0x2f0 [ 154.261931] ? __ia32_sys_delete_module+0x2f0/0x2f0 [ 154.262597] ? exit_to_usermode_loop+0x45/0xe0 [ 154.263219] do_syscall_64+0x73/0x280 [ 154.263731] ? page_fault+0x8/0x30 [ 154.264197] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 154.264882] RIP: 0033:0x7f033bf63acb [ 154.265370] Code: 73 01 c3 48 8b 0d bd 33 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 8d 33 0c 00 f7 d8 64 89 01 48 [ 154.267880] RSP: 002b:00007ffc7fe52548 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0 [ 154.268900] RAX: ffffffffffffffda RBX: 00005583e2b8e530 RCX: 00007f033bf63acb [ 154.269865] RDX: 000000000000000a RSI: 0000000000000800 RDI: 00005583e2b8e598 [ 154.270837] RBP: 00007ffc7fe525a8 R08: 0000000000000000 R09: 0000000000000000 [ 154.271802] R10: 00007f033bfd7ac0 R11: 0000000000000206 R12: 00007ffc7fe52770 [ 154.272763] R13: 00007ffc7fe536f8 R14: 00005583e2b8d2a0 R15: 00005583e2b8e530
[ 154.273939] Allocated by task 2350: [ 154.274419] kasan_kmalloc+0xc6/0xe0 [ 154.274916] kmem_cache_alloc_node_trace+0x119/0x240 [ 154.275594] bfq_pd_alloc+0x50/0x510 [ 154.276081] blkg_alloc+0x237/0x310 [ 154.276557] blkg_create+0x48a/0x5e0 [ 154.277044] blkg_lookup_create+0x144/0x1c0 [ 154.277614] generic_make_request_checks+0x5cf/0xad0 [ 154.278290] generic_make_request+0xdd/0x6c0 [ 154.278877] submit_bio+0xaa/0x250 [ 154.279342] mpage_readpages+0x2a2/0x3b0 [ 154.279878] read_pages+0xdf/0x3a0 [ 154.280343] __do_page_cache_readahead+0x27c/0x2a0 [ 154.280989] ondemand_readahead+0x275/0x460 [ 154.281556] generic_file_read_iter+0xc4e/0x1790 [ 154.282182] aio_read+0x174/0x260 [ 154.282635] io_submit_one+0x7d4/0x14b0 [ 154.283164] __x64_sys_io_submit+0x102/0x230 [ 154.283749] do_syscall_64+0x73/0x280 [ 154.284250] entry_SYSCALL_64_after_hwframe+0x44/0xa9
[ 154.285159] Freed by task 2315: [ 154.285588] __kasan_slab_free+0x12f/0x180 [ 154.286150] kfree+0xab/0x1d0 [ 154.286561] blkg_free.part.0+0x4a/0xe0 [ 154.287089] rcu_process_callbacks+0x424/0x6d0 [ 154.287689] __do_softirq+0x10d/0x370 [ 154.288395] The buggy address belongs to the object at ffff88824501f700 which belongs to the cache kmalloc-2048 of size 2048 [ 154.290083] The buggy address is located 184 bytes inside of 2048-byte region [ffff88824501f700, ffff88824501ff00) [ 154.291661] The buggy address belongs to the page: [ 154.292306] page:ffffea0009140600 count:1 mapcount:0 mapping:ffff88824bc0e800 index:0x0 compound_mapcount: 0 [ 154.293610] flags: 0x17ffffc0008100(slab|head) [ 154.294211] raw: 0017ffffc0008100 ffffea000896da00 0000000200000002 ffff88824bc0e800 [ 154.295247] raw: 0000000000000000 00000000800f000f 00000001ffffffff 0000000000000000 [ 154.296294] page dumped because: kasan: bad access detected
[ 154.297261] Memory state around the buggy address: [ 154.297913] ffff88824501f680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 154.298884] ffff88824501f700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 154.299858] >ffff88824501f780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 154.300824] ^ [ 154.301505] ffff88824501f800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 154.302479] ffff88824501f880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 154.303459] ==================================================================
This is because when bfq_group is offlined, if the bfq_queues are not in active tree, their parents(bfqq->entity.partent) are still point to the offlined bfq_group. And after some ios are issued to such bfq_queues, the offlined bfq_group is reinserted to service tree.
Fix the problem by move bfq_queue to root_group if we found it's parent is offlined.
Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support") Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/bfq-cgroup.c | 14 ++++++++++---- block/bfq-wf2q.c | 9 +++++++++ 2 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 78cfd008b89d7..73b82a5c03717 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -556,6 +556,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg) { struct bfq_entity *entity = &bfqq->entity; + struct bfq_group *old_parent = bfqq_group(bfqq);
/* * Get extra reference to prevent bfqq from being freed in @@ -577,17 +578,21 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - bfqg_and_blkg_put(bfqq_group(bfqq));
entity->parent = bfqg->my_entity; entity->sched_data = &bfqg->sched_data; /* pin down bfqg and its associated blkg */ bfqg_and_blkg_get(bfqg);
- if (bfq_bfqq_busy(bfqq)) { - bfq_pos_tree_add_move(bfqd, bfqq); + /* + * Don't leave the bfqq->pos_root to old bfqg, since the ref to old + * bfqg will be released and the bfqg might be freed. + */ + bfq_pos_tree_add_move(bfqd, bfqq); + bfqg_and_blkg_put(old_parent); + + if (bfq_bfqq_busy(bfqq)) bfq_activate_bfqq(bfqd, bfqq); - }
if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfq_schedule_dispatch(bfqd); @@ -860,6 +865,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
put_async_queues: bfq_put_async_queues(bfqd, bfqg); + pd->plid = BLKCG_MAX_POLS;
spin_unlock_irqrestore(&bfqd->lock, flags); /* diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 316a1c2d1b610..e830715fe15d6 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1684,6 +1684,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, */ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) { +#ifdef CONFIG_BFQ_GROUP_IOSCHED + /* If parent group is offlined, move the bfqq to root group */ + if (bfqq->entity.parent) { + struct bfq_group *bfqg = bfq_bfqq_to_bfqg(bfqq); + + if (bfqg->pd.plid >= BLKCG_MAX_POLS) + bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); + } +#endif bfq_log_bfqq(bfqd, bfqq, "add to busy");
bfq_activate_bfqq(bfqd, bfqq);