From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/I5QK5M CVE: NA
--------------------------------
Add a new flag QUEUE_FLAG_DISPATCH_ASYNC and two new fields 'dispatch_cpumask' and 'last_dispatch_cpu' for request_queue, prepare to support dispatch bio asynchronous in specified cpus. This patch also add sysfs apis.
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- block/blk-core.c | 28 +++++++++++++++++++++++++++- block/blk-sysfs.c | 39 +++++++++++++++++++++++++++++++++++++++ block/blk.h | 2 ++ include/linux/blkdev.h | 6 ++++++ 4 files changed, 74 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c index e98827d25ef8..bffecc437fbc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -85,6 +85,27 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue;
+static int blk_alloc_queue_dispatch_async(struct request_queue *q) +{ + int cpu; + + q->last_dispatch_cpu = alloc_percpu(int); + if (!q->last_dispatch_cpu) + return -ENOMEM; + + cpumask_setall(&q->dispatch_async_cpus); + for_each_possible_cpu(cpu) { + *per_cpu_ptr(q->last_dispatch_cpu, cpu) = cpu; + } + + return 0; +} + +void blk_free_queue_dispatch_async(struct request_queue *q) +{ + free_percpu(q->last_dispatch_cpu); +} + /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set @@ -1049,9 +1070,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->end_sector = 0; q->boundary_rq = NULL;
+ if (blk_alloc_queue_dispatch_async(q)) + goto fail_q; + q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) - goto fail_q; + goto fail_dispatch_async;
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (ret) @@ -1130,6 +1154,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, bioset_exit(&q->bio_split); fail_id: ida_simple_remove(&blk_queue_ida, q->id); +fail_dispatch_async: + blk_free_queue_dispatch_async(q); fail_q: kmem_cache_free(blk_requestq_cachep, q_wrapper); return NULL; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 30898a7855d7..60daf9b53a97 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -696,6 +696,42 @@ static struct queue_sysfs_entry queue_wb_lat_entry = { .store = queue_wb_lat_store, };
+static ssize_t queue_dispatch_async_cpus_show(struct request_queue *q, + char *page) +{ + int cpu; + ssize_t ret = 0; + + if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags)) + return -EOPNOTSUPP; + + for_each_cpu(cpu, &q->dispatch_async_cpus) { + ret += sprintf(page + ret, "%d ", cpu); + } + + ret += sprintf(page + ret, "\n"); + return ret; +} + +static struct queue_sysfs_entry queue_dispatch_async_cpus_entry = { + .attr = {.name = "dispatch_async_cpus", .mode = 0444 }, + .show = queue_dispatch_async_cpus_show, +}; + +static ssize_t queue_show_dispatch_async(struct request_queue *q, + char *page) +{ + if (test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags)) + return sprintf(page, "1\n"); + else + return sprintf(page, "0\n"); +} + +static struct queue_sysfs_entry queue_dispatch_async_entry = { + .attr = {.name = "dispatch_async", .mode = 0444 }, + .show = queue_show_dispatch_async, +}; + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { .attr = {.name = "throttle_sample_time", .mode = 0644 }, @@ -738,6 +774,8 @@ static struct attribute *default_attrs[] = { &queue_dax_entry.attr, &queue_wb_lat_entry.attr, &queue_poll_delay_entry.attr, + &queue_dispatch_async_cpus_entry.attr, + &queue_dispatch_async_entry.attr, #ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif @@ -819,6 +857,7 @@ static void __blk_release_queue(struct work_struct *work) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); + blk_free_queue_dispatch_async(q);
if (!blk_queue_dead(q)) { /* diff --git a/block/blk.h b/block/blk.h index dde2141a32dd..f3094e18f89e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -460,4 +460,6 @@ extern int blk_iolatency_init(struct request_queue *q); static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif
+extern void blk_free_queue_dispatch_async(struct request_queue *q); + #endif /* BLK_INTERNAL_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1deaf36eb237..fd1fc4670f31 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -701,6 +701,10 @@ struct request_queue {
struct work_struct release_work;
+ /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */ + struct cpumask dispatch_async_cpus; + int __percpu *last_dispatch_cpu; + #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; @@ -739,6 +743,8 @@ struct request_queue { #define QUEUE_FLAG_FORECE_QUIESCE 29 /* force quiesce when cleanup queue */ /* queue has bee quiesced, used in block layer */ #define QUEUE_FLAG_QUIESCED_INTERNAL 30 +/* bio will be dispatched asynchronous */ +#define QUEUE_FLAG_DISPATCH_ASYNC 31
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \