On 2022/9/16 16:33, Yu Kuai wrote:
From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/I5QK5M CVE: NA
Add a new flag QUEUE_FLAG_DISPATCH_ASYNC and two new fields 'dispatch_cpumask' and 'last_dispatch_cpu' for request_queue, prepare to support dispatch bio asynchronous in specified cpus. This patch also add sysfs apis.
Signed-off-by: Yu Kuai yukuai3@huawei.com
block/blk-core.c | 19 +++++++++++++++++++ block/blk-sysfs.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 6 ++++++ 3 files changed, 65 insertions(+)
diff --git a/block/blk-core.c b/block/blk-core.c index e98827d25ef8..fc81dff50a34 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1018,6 +1018,22 @@ static void blk_timeout_work_empty(struct work_struct *work) { }
+static int blk_alloc_queue_dispatch_async(struct request_queue *q) +{
- int cpu;
- q->last_dispatch_cpu = alloc_percpu(int);
- if (!q->last_dispatch_cpu)
return -ENOMEM;
- cpumask_setall(&q->dispatch_async_cpus);
- for_each_possible_cpu(cpu) {
*per_cpu_ptr(q->last_dispatch_cpu, cpu) = cpu;
- }
- return 0;
+}
- /**
- blk_alloc_queue_node - allocate a request queue
- @gfp_mask: memory allocation flags
@@ -1049,6 +1065,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->end_sector = 0; q->boundary_rq = NULL;
- if (blk_alloc_queue_dispatch_async(q))
goto fail_q;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) goto fail_q;
上面多加了内存分配,这里失败了以后需要释放上面的percpu内存。其他错误路径 也一样。
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 30898a7855d7..55b22d66672c 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -696,6 +696,42 @@ static struct queue_sysfs_entry queue_wb_lat_entry = { .store = queue_wb_lat_store, };
+static ssize_t queue_dispatch_async_cpus_show(struct request_queue *q,
char *page)
+{
- int cpu;
- ssize_t ret = 0;
- if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags))
return -EOPNOTSUPP;
- for_each_cpu(cpu, &q->dispatch_async_cpus) {
ret += sprintf(page + ret, "%d ", cpu);
- }
- ret += sprintf(page + ret, "\n");
- return ret;
+}
+static struct queue_sysfs_entry queue_dispatch_async_cpus_entry = {
- .attr = {.name = "dispatch_async_cpus", .mode = 0444 },
- .show = queue_dispatch_async_cpus_show,
+};
+static ssize_t queue_show_dispatch_async(struct request_queue *q,
char *page)
+{
- if (test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags))
return sprintf(page, "1\n");
- else
return sprintf(page, "0\n");
+}
+static struct queue_sysfs_entry queue_dispatch_async_entry = {
- .attr = {.name = "dispatch_async", .mode = 0444 },
- .show = queue_show_dispatch_async,
+};
- #ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { .attr = {.name = "throttle_sample_time", .mode = 0644 },
@@ -738,6 +774,8 @@ static struct attribute *default_attrs[] = { &queue_dax_entry.attr, &queue_wb_lat_entry.attr, &queue_poll_delay_entry.attr,
- &queue_dispatch_async_cpus_entry.attr,
- &queue_dispatch_async_entry.attr, #ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif
@@ -820,6 +858,8 @@ static void __blk_release_queue(struct work_struct *work) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb);
- free_percpu(q->last_dispatch_cpu);
- if (!blk_queue_dead(q)) { /*
- Last reference was dropped without having called
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1deaf36eb237..fd1fc4670f31 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -701,6 +701,10 @@ struct request_queue {
struct work_struct release_work;
- /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */
- struct cpumask dispatch_async_cpus;
- int __percpu *last_dispatch_cpu;
这里不用管kabi吗
#define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; @@ -739,6 +743,8 @@ struct request_queue { #define QUEUE_FLAG_FORECE_QUIESCE 29 /* force quiesce when cleanup queue */ /* queue has bee quiesced, used in block layer */ #define QUEUE_FLAG_QUIESCED_INTERNAL 30 +/* bio will be dispatched asynchronous */ +#define QUEUE_FLAG_DISPATCH_ASYNC 31
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \