tree: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS head: 97f98c391a1fd153ae010a2faf73f9e03270da6e commit: c1ea82c0ddde70d5998c2f547520afbee10bea2a [1402/1402] block: Limit number of items taken from the I/O scheduler in one go config: x86_64-buildonly-randconfig-006-20250109 (https://download.01.org/0day-ci/archive/20250110/202501100428.rcoZkFtm-lkp@i...) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250110/202501100428.rcoZkFtm-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202501100428.rcoZkFtm-lkp@intel.com/
All warnings (new ones prefixed by >>):
block/blk-mq-sched.c:219:5: warning: no previous prototype for '__blk_mq_sched_dispatch_requests' [-Wmissing-prototypes]
219 | int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/__blk_mq_sched_dispatch_requests +219 block/blk-mq-sched.c
218
219 int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
220 { 221 struct request_queue *q = hctx->queue; 222 struct elevator_queue *e = q->elevator; 223 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 224 int ret = 0; 225 LIST_HEAD(rq_list); 226 227 /* 228 * If we have previous entries on our dispatch list, grab them first for 229 * more fair dispatch. 230 */ 231 if (!list_empty_careful(&hctx->dispatch)) { 232 spin_lock(&hctx->lock); 233 if (!list_empty(&hctx->dispatch)) 234 list_splice_init(&hctx->dispatch, &rq_list); 235 spin_unlock(&hctx->lock); 236 } 237 238 /* 239 * Only ask the scheduler for requests, if we didn't have residual 240 * requests from the dispatch list. This is to avoid the case where 241 * we only ever dispatch a fraction of the requests available because 242 * of low device queue depth. Once we pull requests out of the IO 243 * scheduler, we can no longer merge or sort them. So it's best to 244 * leave them there for as long as we can. Mark the hw queue as 245 * needing a restart in that case. 246 * 247 * We want to dispatch from the scheduler if there was nothing 248 * on the dispatch list or we were able to dispatch from the 249 * dispatch list. 250 */ 251 if (!list_empty(&rq_list)) { 252 blk_mq_sched_mark_restart_hctx(hctx); 253 if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { 254 if (has_sched_dispatch) 255 ret = blk_mq_do_dispatch_sched(hctx); 256 else 257 ret = blk_mq_do_dispatch_ctx(hctx); 258 } 259 } else if (has_sched_dispatch) { 260 ret = blk_mq_do_dispatch_sched(hctx); 261 } else if (hctx->dispatch_busy) { 262 /* dequeue request one by one from sw queue if queue is busy */ 263 ret = blk_mq_do_dispatch_ctx(hctx); 264 } else { 265 blk_mq_flush_busy_ctxs(hctx, &rq_list); 266 blk_mq_dispatch_rq_list(q, &rq_list, false); 267 } 268 269 return ret; 270 } 271