@ -568,13 +568,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
rq - > rq_flags & = ~ RQF_SOFTBARRIER ;
list_del_init ( & rq - > queuelist ) ;
blk_mq_sched_insert_request ( rq , true , false , false ) ;
blk_mq_sched_insert_request ( rq , true , false , false , true ) ;
}
while ( ! list_empty ( & rq_list ) ) {
rq = list_entry ( rq_list . next , struct request , queuelist ) ;
list_del_init ( & rq - > queuelist ) ;
blk_mq_sched_insert_request ( rq , false , false , false ) ;
blk_mq_sched_insert_request ( rq , false , false , false , true ) ;
}
blk_mq_run_hw_queues ( q , false ) ;
@ -847,12 +847,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min ( BLK_MQ_MAX_DISPATCH_ORDER - 1 , ilog2 ( queued ) + 1 ) ;
}
static bool blk_mq_get_driver_tag ( struct request * rq ,
struct blk_mq_hw_ctx * * hctx , bool wait )
bool blk_mq_get_driver_tag ( struct request * rq , struct blk_mq_hw_ctx * * hctx ,
bool wait )
{
struct blk_mq_alloc_data data = {
. q = rq - > q ,
. ctx = rq - > mq_ctx ,
. hctx = blk_mq_map_queue ( rq - > q , rq - > mq_ctx - > cpu ) ,
. flags = wait ? 0 : BLK_MQ_REQ_NOWAIT ,
} ;
@ -1395,7 +1394,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
}
insert :
blk_mq_sched_insert_request ( rq , false , true , true ) ;
blk_mq_sched_insert_request ( rq , false , true , true , false ) ;
}
/*
@ -1446,10 +1445,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t ( data . hctx , rq ) ;
if ( unlikely ( is_flush_fua ) ) {
blk_mq_put_ctx ( data . ctx ) ;
blk_mq_bio_to_request ( rq , bio ) ;
blk_mq_get_driver_tag ( rq , NULL , true ) ;
blk_insert_flush ( rq ) ;
goto run_queue ;
blk_mq_run_hw_queue ( data . hctx , true ) ;
goto done ;
}
plug = current - > plug ;
@ -1502,7 +1503,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx ( data . ctx ) ;
blk_mq_bio_to_request ( rq , bio ) ;
blk_mq_sched_insert_request ( rq , false , true ,
! is_sync | | is_flush_fua ) ;
! is_sync | | is_flush_fua , true ) ;
goto done ;
}
if ( ! blk_mq_merge_queue_io ( data . hctx , data . ctx , rq , bio ) ) {
@ -1512,7 +1513,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching .
*/
run_queue :
blk_mq_run_hw_queue ( data . hctx , ! is_sync | | is_flush_fua ) ;
}
blk_mq_put_ctx ( data . ctx ) ;
@ -1568,10 +1568,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t ( data . hctx , rq ) ;
if ( unlikely ( is_flush_fua ) ) {
blk_mq_put_ctx ( data . ctx ) ;
blk_mq_bio_to_request ( rq , bio ) ;
blk_mq_get_driver_tag ( rq , NULL , true ) ;
blk_insert_flush ( rq ) ;
goto run_queue ;
blk_mq_run_hw_queue ( data . hctx , true ) ;
goto done ;
}
/*
@ -1612,7 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx ( data . ctx ) ;
blk_mq_bio_to_request ( rq , bio ) ;
blk_mq_sched_insert_request ( rq , false , true ,
! is_sync | | is_flush_fua ) ;
! is_sync | | is_flush_fua , true ) ;
goto done ;
}
if ( ! blk_mq_merge_queue_io ( data . hctx , data . ctx , rq , bio ) ) {
@ -1622,7 +1624,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
* latter allows for merging opportunities and more efficient
* dispatching .
*/
run_queue :
blk_mq_run_hw_queue ( data . hctx , ! is_sync | | is_flush_fua ) ;
}