@@ -4462,7 +4462,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44624462 unsigned long i , j ;
44634463
44644464 /* protect against switching io scheduler */
4465- mutex_lock (& q -> sysfs_lock );
4465+ lockdep_assert_held (& q -> sysfs_lock );
4466+
44664467 for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
44674468 int old_node ;
44684469 int node = blk_mq_get_hctx_node (set , i );
@@ -4495,7 +4496,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44954496
44964497 xa_for_each_start (& q -> hctx_table , j , hctx , j )
44974498 blk_mq_exit_hctx (q , set , hctx , j );
4498- mutex_unlock (& q -> sysfs_lock );
44994499
45004500 /* unregister cpuhp callbacks for exited hctxs */
45014501 blk_mq_remove_hw_queues_cpuhp (q );
@@ -4527,10 +4527,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45274527
45284528 xa_init (& q -> hctx_table );
45294529
4530+ mutex_lock (& q -> sysfs_lock );
4531+
45304532 blk_mq_realloc_hw_ctxs (set , q );
45314533 if (!q -> nr_hw_queues )
45324534 goto err_hctxs ;
45334535
4536+ mutex_unlock (& q -> sysfs_lock );
4537+
45344538 INIT_WORK (& q -> timeout_work , blk_mq_timeout_work );
45354539 blk_queue_rq_timeout (q , set -> timeout ? set -> timeout : 30 * HZ );
45364540
@@ -4549,6 +4553,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45494553 return 0 ;
45504554
45514555err_hctxs :
4556+ mutex_unlock (& q -> sysfs_lock );
45524557 blk_mq_release (q );
45534558err_exit :
45544559 q -> mq_ops = NULL ;
@@ -4929,12 +4934,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49294934 return false;
49304935
49314936 /* q->elevator needs protection from ->sysfs_lock */
4932- mutex_lock (& q -> sysfs_lock );
4937+ lockdep_assert_held (& q -> sysfs_lock );
49334938
49344939 /* the check has to be done with holding sysfs_lock */
49354940 if (!q -> elevator ) {
49364941 kfree (qe );
4937- goto unlock ;
4942+ goto out ;
49384943 }
49394944
49404945 INIT_LIST_HEAD (& qe -> node );
@@ -4944,9 +4949,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49444949 __elevator_get (qe -> type );
49454950 list_add (& qe -> node , head );
49464951 elevator_disable (q );
4947- unlock :
4948- mutex_unlock (& q -> sysfs_lock );
4949-
4952+ out :
49504953 return true;
49514954}
49524955
@@ -4975,11 +4978,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
49754978 list_del (& qe -> node );
49764979 kfree (qe );
49774980
4978- mutex_lock (& q -> sysfs_lock );
49794981 elevator_switch (q , t );
49804982 /* drop the reference acquired in blk_mq_elv_switch_none */
49814983 elevator_put (t );
4982- mutex_unlock (& q -> sysfs_lock );
49834984}
49844985
49854986static void __blk_mq_update_nr_hw_queues (struct blk_mq_tag_set * set ,
@@ -4999,8 +5000,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
49995000 if (set -> nr_maps == 1 && nr_hw_queues == set -> nr_hw_queues )
50005001 return ;
50015002
5002- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5003+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
5004+ mutex_lock (& q -> sysfs_dir_lock );
5005+ mutex_lock (& q -> sysfs_lock );
50035006 blk_mq_freeze_queue (q );
5007+ }
50045008 /*
50055009 * Switch IO scheduler to 'none', cleaning up the data associated
50065010 * with the previous scheduler. We will switch back once we are done
@@ -5056,8 +5060,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50565060 list_for_each_entry (q , & set -> tag_list , tag_set_list )
50575061 blk_mq_elv_switch_back (& head , q );
50585062
5059- list_for_each_entry (q , & set -> tag_list , tag_set_list )
5063+ list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
50605064 blk_mq_unfreeze_queue (q );
5065+ mutex_unlock (& q -> sysfs_lock );
5066+ mutex_unlock (& q -> sysfs_dir_lock );
5067+ }
50615068
50625069 /* Free the excess tags when nr_hw_queues shrink. */
50635070 for (i = set -> nr_hw_queues ; i < prev_nr_hw_queues ; i ++ )
0 commit comments