blk-mq.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350
  1. /*
  2. * Block multiqueue core code
  3. *
  4. * Copyright (C) 2013-2014 Jens Axboe
  5. * Copyright (C) 2013-2014 Christoph Hellwig
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/backing-dev.h>
  10. #include <linux/bio.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/kmemleak.h>
  13. #include <linux/mm.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/smp.h>
  18. #include <linux/llist.h>
  19. #include <linux/list_sort.h>
  20. #include <linux/cpu.h>
  21. #include <linux/cache.h>
  22. #include <linux/sched/sysctl.h>
  23. #include <linux/delay.h>
  24. #include <linux/crash_dump.h>
  25. #include <linux/prefetch.h>
  26. #include <trace/events/block.h>
  27. #include <linux/blk-mq.h>
  28. #include "blk.h"
  29. #include "blk-mq.h"
  30. #include "blk-mq-tag.h"
  31. static DEFINE_MUTEX(all_q_mutex);
  32. static LIST_HEAD(all_q_list);
  33. /*
  34. * Check if any of the ctx's have pending work in this hardware queue
  35. */
  36. static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
  37. {
  38. return sbitmap_any_bit_set(&hctx->ctx_map);
  39. }
  40. /*
  41. * Mark this ctx as having pending work in this hardware queue
  42. */
  43. static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
  44. struct blk_mq_ctx *ctx)
  45. {
  46. if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
  47. sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
  48. }
  49. static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
  50. struct blk_mq_ctx *ctx)
  51. {
  52. sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
  53. }
  54. void blk_mq_freeze_queue_start(struct request_queue *q)
  55. {
  56. int freeze_depth;
  57. freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
  58. if (freeze_depth == 1) {
  59. percpu_ref_kill(&q->q_usage_counter);
  60. blk_mq_run_hw_queues(q, false);
  61. }
  62. }
  63. EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
  64. static void blk_mq_freeze_queue_wait(struct request_queue *q)
  65. {
  66. wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  67. }
  68. /*
  69. * Guarantee no request is in use, so we can change any data structure of
  70. * the queue afterward.
  71. */
  72. void blk_freeze_queue(struct request_queue *q)
  73. {
  74. /*
  75. * In the !blk_mq case we are only calling this to kill the
  76. * q_usage_counter, otherwise this increases the freeze depth
  77. * and waits for it to return to zero. For this reason there is
  78. * no blk_unfreeze_queue(), and blk_freeze_queue() is not
  79. * exported to drivers as the only user for unfreeze is blk_mq.
  80. */
  81. blk_mq_freeze_queue_start(q);
  82. blk_mq_freeze_queue_wait(q);
  83. }
  84. void blk_mq_freeze_queue(struct request_queue *q)
  85. {
  86. /*
  87. * ...just an alias to keep freeze and unfreeze actions balanced
  88. * in the blk_mq_* namespace
  89. */
  90. blk_freeze_queue(q);
  91. }
  92. EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
  93. void blk_mq_unfreeze_queue(struct request_queue *q)
  94. {
  95. int freeze_depth;
  96. freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
  97. WARN_ON_ONCE(freeze_depth < 0);
  98. if (!freeze_depth) {
  99. percpu_ref_reinit(&q->q_usage_counter);
  100. wake_up_all(&q->mq_freeze_wq);
  101. }
  102. }
  103. EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
  104. void blk_mq_wake_waiters(struct request_queue *q)
  105. {
  106. struct blk_mq_hw_ctx *hctx;
  107. unsigned int i;
  108. queue_for_each_hw_ctx(q, hctx, i)
  109. if (blk_mq_hw_queue_mapped(hctx))
  110. blk_mq_tag_wakeup_all(hctx->tags, true);
  111. /*
  112. * If we are called because the queue has now been marked as
  113. * dying, we need to ensure that processes currently waiting on
  114. * the queue are notified as well.
  115. */
  116. wake_up_all(&q->mq_freeze_wq);
  117. }
  118. bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
  119. {
  120. return blk_mq_has_free_tags(hctx->tags);
  121. }
  122. EXPORT_SYMBOL(blk_mq_can_queue);
  123. static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
  124. struct request *rq, int op,
  125. unsigned int op_flags)
  126. {
  127. if (blk_queue_io_stat(q))
  128. op_flags |= REQ_IO_STAT;
  129. INIT_LIST_HEAD(&rq->queuelist);
  130. /* csd/requeue_work/fifo_time is initialized before use */
  131. rq->q = q;
  132. rq->mq_ctx = ctx;
  133. req_set_op_attrs(rq, op, op_flags);
  134. /* do not touch atomic flags, it needs atomic ops against the timer */
  135. rq->cpu = -1;
  136. INIT_HLIST_NODE(&rq->hash);
  137. RB_CLEAR_NODE(&rq->rb_node);
  138. rq->rq_disk = NULL;
  139. rq->part = NULL;
  140. rq->start_time = jiffies;
  141. #ifdef CONFIG_BLK_CGROUP
  142. rq->rl = NULL;
  143. set_start_time_ns(rq);
  144. rq->io_start_time_ns = 0;
  145. #endif
  146. rq->nr_phys_segments = 0;
  147. #if defined(CONFIG_BLK_DEV_INTEGRITY)
  148. rq->nr_integrity_segments = 0;
  149. #endif
  150. rq->special = NULL;
  151. /* tag was already set */
  152. rq->errors = 0;
  153. rq->cmd = rq->__cmd;
  154. rq->extra_len = 0;
  155. rq->sense_len = 0;
  156. rq->resid_len = 0;
  157. rq->sense = NULL;
  158. INIT_LIST_HEAD(&rq->timeout_list);
  159. rq->timeout = 0;
  160. rq->end_io = NULL;
  161. rq->end_io_data = NULL;
  162. rq->next_rq = NULL;
  163. ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
  164. }
  165. static struct request *
  166. __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
  167. {
  168. struct request *rq;
  169. unsigned int tag;
  170. tag = blk_mq_get_tag(data);
  171. if (tag != BLK_MQ_TAG_FAIL) {
  172. rq = data->hctx->tags->rqs[tag];
  173. if (blk_mq_tag_busy(data->hctx)) {
  174. rq->cmd_flags = REQ_MQ_INFLIGHT;
  175. atomic_inc(&data->hctx->nr_active);
  176. }
  177. rq->tag = tag;
  178. blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
  179. return rq;
  180. }
  181. return NULL;
  182. }
  183. struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
  184. unsigned int flags)
  185. {
  186. struct blk_mq_ctx *ctx;
  187. struct blk_mq_hw_ctx *hctx;
  188. struct request *rq;
  189. struct blk_mq_alloc_data alloc_data;
  190. int ret;
  191. ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
  192. if (ret)
  193. return ERR_PTR(ret);
  194. ctx = blk_mq_get_ctx(q);
  195. hctx = blk_mq_map_queue(q, ctx->cpu);
  196. blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
  197. rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
  198. blk_mq_put_ctx(ctx);
  199. if (!rq) {
  200. blk_queue_exit(q);
  201. return ERR_PTR(-EWOULDBLOCK);
  202. }
  203. rq->__data_len = 0;
  204. rq->__sector = (sector_t) -1;
  205. rq->bio = rq->biotail = NULL;
  206. return rq;
  207. }
  208. EXPORT_SYMBOL(blk_mq_alloc_request);
  209. struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
  210. unsigned int flags, unsigned int hctx_idx)
  211. {
  212. struct blk_mq_hw_ctx *hctx;
  213. struct blk_mq_ctx *ctx;
  214. struct request *rq;
  215. struct blk_mq_alloc_data alloc_data;
  216. int ret;
  217. /*
  218. * If the tag allocator sleeps we could get an allocation for a
  219. * different hardware context. No need to complicate the low level
  220. * allocator for this for the rare use case of a command tied to
  221. * a specific queue.
  222. */
  223. if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
  224. return ERR_PTR(-EINVAL);
  225. if (hctx_idx >= q->nr_hw_queues)
  226. return ERR_PTR(-EIO);
  227. ret = blk_queue_enter(q, true);
  228. if (ret)
  229. return ERR_PTR(ret);
  230. /*
  231. * Check if the hardware context is actually mapped to anything.
  232. * If not tell the caller that it should skip this queue.
  233. */
  234. hctx = q->queue_hw_ctx[hctx_idx];
  235. if (!blk_mq_hw_queue_mapped(hctx)) {
  236. ret = -EXDEV;
  237. goto out_queue_exit;
  238. }
  239. ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
  240. blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
  241. rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
  242. if (!rq) {
  243. ret = -EWOULDBLOCK;
  244. goto out_queue_exit;
  245. }
  246. return rq;
  247. out_queue_exit:
  248. blk_queue_exit(q);
  249. return ERR_PTR(ret);
  250. }
  251. EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
  252. static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
  253. struct blk_mq_ctx *ctx, struct request *rq)
  254. {
  255. const int tag = rq->tag;
  256. struct request_queue *q = rq->q;
  257. if (rq->cmd_flags & REQ_MQ_INFLIGHT)
  258. atomic_dec(&hctx->nr_active);
  259. rq->cmd_flags = 0;
  260. clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
  261. blk_mq_put_tag(hctx, ctx, tag);
  262. blk_queue_exit(q);
  263. }
  264. void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
  265. {
  266. struct blk_mq_ctx *ctx = rq->mq_ctx;
  267. ctx->rq_completed[rq_is_sync(rq)]++;
  268. __blk_mq_free_request(hctx, ctx, rq);
  269. }
  270. EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
  271. void blk_mq_free_request(struct request *rq)
  272. {
  273. blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
  274. }
  275. EXPORT_SYMBOL_GPL(blk_mq_free_request);
  276. inline void __blk_mq_end_request(struct request *rq, int error)
  277. {
  278. blk_account_io_done(rq);
  279. if (rq->end_io) {
  280. rq->end_io(rq, error);
  281. } else {
  282. if (unlikely(blk_bidi_rq(rq)))
  283. blk_mq_free_request(rq->next_rq);
  284. blk_mq_free_request(rq);
  285. }
  286. }
  287. EXPORT_SYMBOL(__blk_mq_end_request);
  288. void blk_mq_end_request(struct request *rq, int error)
  289. {
  290. if (blk_update_request(rq, error, blk_rq_bytes(rq)))
  291. BUG();
  292. __blk_mq_end_request(rq, error);
  293. }
  294. EXPORT_SYMBOL(blk_mq_end_request);
  295. static void __blk_mq_complete_request_remote(void *data)
  296. {
  297. struct request *rq = data;
  298. rq->q->softirq_done_fn(rq);
  299. }
  300. static void blk_mq_ipi_complete_request(struct request *rq)
  301. {
  302. struct blk_mq_ctx *ctx = rq->mq_ctx;
  303. bool shared = false;
  304. int cpu;
  305. if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
  306. rq->q->softirq_done_fn(rq);
  307. return;
  308. }
  309. cpu = get_cpu();
  310. if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
  311. shared = cpus_share_cache(cpu, ctx->cpu);
  312. if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
  313. rq->csd.func = __blk_mq_complete_request_remote;
  314. rq->csd.info = rq;
  315. rq->csd.flags = 0;
  316. smp_call_function_single_async(ctx->cpu, &rq->csd);
  317. } else {
  318. rq->q->softirq_done_fn(rq);
  319. }
  320. put_cpu();
  321. }
  322. static void __blk_mq_complete_request(struct request *rq)
  323. {
  324. struct request_queue *q = rq->q;
  325. if (!q->softirq_done_fn)
  326. blk_mq_end_request(rq, rq->errors);
  327. else
  328. blk_mq_ipi_complete_request(rq);
  329. }
  330. /**
  331. * blk_mq_complete_request - end I/O on a request
  332. * @rq: the request being processed
  333. *
  334. * Description:
  335. * Ends all I/O on a request. It does not handle partial completions.
  336. * The actual completion happens out-of-order, through a IPI handler.
  337. **/
  338. void blk_mq_complete_request(struct request *rq, int error)
  339. {
  340. struct request_queue *q = rq->q;
  341. if (unlikely(blk_should_fake_timeout(q)))
  342. return;
  343. if (!blk_mark_rq_complete(rq)) {
  344. rq->errors = error;
  345. __blk_mq_complete_request(rq);
  346. }
  347. }
  348. EXPORT_SYMBOL(blk_mq_complete_request);
  349. int blk_mq_request_started(struct request *rq)
  350. {
  351. return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
  352. }
  353. EXPORT_SYMBOL_GPL(blk_mq_request_started);
  354. void blk_mq_start_request(struct request *rq)
  355. {
  356. struct request_queue *q = rq->q;
  357. trace_block_rq_issue(q, rq);
  358. rq->resid_len = blk_rq_bytes(rq);
  359. if (unlikely(blk_bidi_rq(rq)))
  360. rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
  361. blk_add_timer(rq);
  362. /*
  363. * Ensure that ->deadline is visible before set the started
  364. * flag and clear the completed flag.
  365. */
  366. smp_mb__before_atomic();
  367. /*
  368. * Mark us as started and clear complete. Complete might have been
  369. * set if requeue raced with timeout, which then marked it as
  370. * complete. So be sure to clear complete again when we start
  371. * the request, otherwise we'll ignore the completion event.
  372. */
  373. if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
  374. set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
  375. if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
  376. clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  377. if (q->dma_drain_size && blk_rq_bytes(rq)) {
  378. /*
  379. * Make sure space for the drain appears. We know we can do
  380. * this because max_hw_segments has been adjusted to be one
  381. * fewer than the device can handle.
  382. */
  383. rq->nr_phys_segments++;
  384. }
  385. }
  386. EXPORT_SYMBOL(blk_mq_start_request);
  387. static void __blk_mq_requeue_request(struct request *rq)
  388. {
  389. struct request_queue *q = rq->q;
  390. trace_block_rq_requeue(q, rq);
  391. if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
  392. if (q->dma_drain_size && blk_rq_bytes(rq))
  393. rq->nr_phys_segments--;
  394. }
  395. }
  396. void blk_mq_requeue_request(struct request *rq)
  397. {
  398. __blk_mq_requeue_request(rq);
  399. BUG_ON(blk_queued_rq(rq));
  400. blk_mq_add_to_requeue_list(rq, true);
  401. }
  402. EXPORT_SYMBOL(blk_mq_requeue_request);
  403. static void blk_mq_requeue_work(struct work_struct *work)
  404. {
  405. struct request_queue *q =
  406. container_of(work, struct request_queue, requeue_work.work);
  407. LIST_HEAD(rq_list);
  408. struct request *rq, *next;
  409. unsigned long flags;
  410. spin_lock_irqsave(&q->requeue_lock, flags);
  411. list_splice_init(&q->requeue_list, &rq_list);
  412. spin_unlock_irqrestore(&q->requeue_lock, flags);
  413. list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
  414. if (!(rq->cmd_flags & REQ_SOFTBARRIER))
  415. continue;
  416. rq->cmd_flags &= ~REQ_SOFTBARRIER;
  417. list_del_init(&rq->queuelist);
  418. blk_mq_insert_request(rq, true, false, false);
  419. }
  420. while (!list_empty(&rq_list)) {
  421. rq = list_entry(rq_list.next, struct request, queuelist);
  422. list_del_init(&rq->queuelist);
  423. blk_mq_insert_request(rq, false, false, false);
  424. }
  425. /*
  426. * Use the start variant of queue running here, so that running
  427. * the requeue work will kick stopped queues.
  428. */
  429. blk_mq_start_hw_queues(q);
  430. }
  431. void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
  432. {
  433. struct request_queue *q = rq->q;
  434. unsigned long flags;
  435. /*
  436. * We abuse this flag that is otherwise used by the I/O scheduler to
  437. * request head insertation from the workqueue.
  438. */
  439. BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
  440. spin_lock_irqsave(&q->requeue_lock, flags);
  441. if (at_head) {
  442. rq->cmd_flags |= REQ_SOFTBARRIER;
  443. list_add(&rq->queuelist, &q->requeue_list);
  444. } else {
  445. list_add_tail(&rq->queuelist, &q->requeue_list);
  446. }
  447. spin_unlock_irqrestore(&q->requeue_lock, flags);
  448. }
  449. EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
  450. void blk_mq_cancel_requeue_work(struct request_queue *q)
  451. {
  452. cancel_delayed_work_sync(&q->requeue_work);
  453. }
  454. EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
  455. void blk_mq_kick_requeue_list(struct request_queue *q)
  456. {
  457. kblockd_schedule_delayed_work(&q->requeue_work, 0);
  458. }
  459. EXPORT_SYMBOL(blk_mq_kick_requeue_list);
  460. void blk_mq_delay_kick_requeue_list(struct request_queue *q,
  461. unsigned long msecs)
  462. {
  463. kblockd_schedule_delayed_work(&q->requeue_work,
  464. msecs_to_jiffies(msecs));
  465. }
  466. EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
  467. void blk_mq_abort_requeue_list(struct request_queue *q)
  468. {
  469. unsigned long flags;
  470. LIST_HEAD(rq_list);
  471. spin_lock_irqsave(&q->requeue_lock, flags);
  472. list_splice_init(&q->requeue_list, &rq_list);
  473. spin_unlock_irqrestore(&q->requeue_lock, flags);
  474. while (!list_empty(&rq_list)) {
  475. struct request *rq;
  476. rq = list_first_entry(&rq_list, struct request, queuelist);
  477. list_del_init(&rq->queuelist);
  478. rq->errors = -EIO;
  479. blk_mq_end_request(rq, rq->errors);
  480. }
  481. }
  482. EXPORT_SYMBOL(blk_mq_abort_requeue_list);
  483. struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
  484. {
  485. if (tag < tags->nr_tags) {
  486. prefetch(tags->rqs[tag]);
  487. return tags->rqs[tag];
  488. }
  489. return NULL;
  490. }
  491. EXPORT_SYMBOL(blk_mq_tag_to_rq);
  492. struct blk_mq_timeout_data {
  493. unsigned long next;
  494. unsigned int next_set;
  495. };
  496. void blk_mq_rq_timed_out(struct request *req, bool reserved)
  497. {
  498. struct blk_mq_ops *ops = req->q->mq_ops;
  499. enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
  500. /*
  501. * We know that complete is set at this point. If STARTED isn't set
  502. * anymore, then the request isn't active and the "timeout" should
  503. * just be ignored. This can happen due to the bitflag ordering.
  504. * Timeout first checks if STARTED is set, and if it is, assumes
  505. * the request is active. But if we race with completion, then
  506. * we both flags will get cleared. So check here again, and ignore
  507. * a timeout event with a request that isn't active.
  508. */
  509. if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
  510. return;
  511. if (ops->timeout)
  512. ret = ops->timeout(req, reserved);
  513. switch (ret) {
  514. case BLK_EH_HANDLED:
  515. __blk_mq_complete_request(req);
  516. break;
  517. case BLK_EH_RESET_TIMER:
  518. blk_add_timer(req);
  519. blk_clear_rq_complete(req);
  520. break;
  521. case BLK_EH_NOT_HANDLED:
  522. break;
  523. default:
  524. printk(KERN_ERR "block: bad eh return: %d\n", ret);
  525. break;
  526. }
  527. }
  528. static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
  529. struct request *rq, void *priv, bool reserved)
  530. {
  531. struct blk_mq_timeout_data *data = priv;
  532. if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
  533. return;
  534. if (time_after_eq(jiffies, rq->deadline)) {
  535. if (!blk_mark_rq_complete(rq))
  536. blk_mq_rq_timed_out(rq, reserved);
  537. } else if (!data->next_set || time_after(data->next, rq->deadline)) {
  538. data->next = rq->deadline;
  539. data->next_set = 1;
  540. }
  541. }
  542. static void blk_mq_timeout_work(struct work_struct *work)
  543. {
  544. struct request_queue *q =
  545. container_of(work, struct request_queue, timeout_work);
  546. struct blk_mq_timeout_data data = {
  547. .next = 0,
  548. .next_set = 0,
  549. };
  550. int i;
  551. /* A deadlock might occur if a request is stuck requiring a
  552. * timeout at the same time a queue freeze is waiting
  553. * completion, since the timeout code would not be able to
  554. * acquire the queue reference here.
  555. *
  556. * That's why we don't use blk_queue_enter here; instead, we use
  557. * percpu_ref_tryget directly, because we need to be able to
  558. * obtain a reference even in the short window between the queue
  559. * starting to freeze, by dropping the first reference in
  560. * blk_mq_freeze_queue_start, and the moment the last request is
  561. * consumed, marked by the instant q_usage_counter reaches
  562. * zero.
  563. */
  564. if (!percpu_ref_tryget(&q->q_usage_counter))
  565. return;
  566. blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
  567. if (data.next_set) {
  568. data.next = blk_rq_timeout(round_jiffies_up(data.next));
  569. mod_timer(&q->timeout, data.next);
  570. } else {
  571. struct blk_mq_hw_ctx *hctx;
  572. queue_for_each_hw_ctx(q, hctx, i) {
  573. /* the hctx may be unmapped, so check it here */
  574. if (blk_mq_hw_queue_mapped(hctx))
  575. blk_mq_tag_idle(hctx);
  576. }
  577. }
  578. blk_queue_exit(q);
  579. }
  580. /*
  581. * Reverse check our software queue for entries that we could potentially
  582. * merge with. Currently includes a hand-wavy stop count of 8, to not spend
  583. * too much time checking for merges.
  584. */
  585. static bool blk_mq_attempt_merge(struct request_queue *q,
  586. struct blk_mq_ctx *ctx, struct bio *bio)
  587. {
  588. struct request *rq;
  589. int checked = 8;
  590. list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
  591. int el_ret;
  592. if (!checked--)
  593. break;
  594. if (!blk_rq_merge_ok(rq, bio))
  595. continue;
  596. el_ret = blk_try_merge(rq, bio);
  597. if (el_ret == ELEVATOR_BACK_MERGE) {
  598. if (bio_attempt_back_merge(q, rq, bio)) {
  599. ctx->rq_merged++;
  600. return true;
  601. }
  602. break;
  603. } else if (el_ret == ELEVATOR_FRONT_MERGE) {
  604. if (bio_attempt_front_merge(q, rq, bio)) {
  605. ctx->rq_merged++;
  606. return true;
  607. }
  608. break;
  609. }
  610. }
  611. return false;
  612. }
  613. struct flush_busy_ctx_data {
  614. struct blk_mq_hw_ctx *hctx;
  615. struct list_head *list;
  616. };
  617. static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
  618. {
  619. struct flush_busy_ctx_data *flush_data = data;
  620. struct blk_mq_hw_ctx *hctx = flush_data->hctx;
  621. struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
  622. sbitmap_clear_bit(sb, bitnr);
  623. spin_lock(&ctx->lock);
  624. list_splice_tail_init(&ctx->rq_list, flush_data->list);
  625. spin_unlock(&ctx->lock);
  626. return true;
  627. }
  628. /*
  629. * Process software queues that have been marked busy, splicing them
  630. * to the for-dispatch
  631. */
  632. static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
  633. {
  634. struct flush_busy_ctx_data data = {
  635. .hctx = hctx,
  636. .list = list,
  637. };
  638. sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
  639. }
  640. static inline unsigned int queued_to_index(unsigned int queued)
  641. {
  642. if (!queued)
  643. return 0;
  644. return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
  645. }
  646. /*
  647. * Run this hardware queue, pulling any software queues mapped to it in.
  648. * Note that this function currently has various problems around ordering
  649. * of IO. In particular, we'd like FIFO behaviour on handling existing
  650. * items on the hctx->dispatch list. Ignore that for now.
  651. */
  652. static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
  653. {
  654. struct request_queue *q = hctx->queue;
  655. struct request *rq;
  656. LIST_HEAD(rq_list);
  657. LIST_HEAD(driver_list);
  658. struct list_head *dptr;
  659. int queued;
  660. if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
  661. return;
  662. WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
  663. cpu_online(hctx->next_cpu));
  664. hctx->run++;
  665. /*
  666. * Touch any software queue that has pending entries.
  667. */
  668. flush_busy_ctxs(hctx, &rq_list);
  669. /*
  670. * If we have previous entries on our dispatch list, grab them
  671. * and stuff them at the front for more fair dispatch.
  672. */
  673. if (!list_empty_careful(&hctx->dispatch)) {
  674. spin_lock(&hctx->lock);
  675. if (!list_empty(&hctx->dispatch))
  676. list_splice_init(&hctx->dispatch, &rq_list);
  677. spin_unlock(&hctx->lock);
  678. }
  679. /*
  680. * Start off with dptr being NULL, so we start the first request
  681. * immediately, even if we have more pending.
  682. */
  683. dptr = NULL;
  684. /*
  685. * Now process all the entries, sending them to the driver.
  686. */
  687. queued = 0;
  688. while (!list_empty(&rq_list)) {
  689. struct blk_mq_queue_data bd;
  690. int ret;
  691. rq = list_first_entry(&rq_list, struct request, queuelist);
  692. list_del_init(&rq->queuelist);
  693. bd.rq = rq;
  694. bd.list = dptr;
  695. bd.last = list_empty(&rq_list);
  696. ret = q->mq_ops->queue_rq(hctx, &bd);
  697. switch (ret) {
  698. case BLK_MQ_RQ_QUEUE_OK:
  699. queued++;
  700. break;
  701. case BLK_MQ_RQ_QUEUE_BUSY:
  702. list_add(&rq->queuelist, &rq_list);
  703. __blk_mq_requeue_request(rq);
  704. break;
  705. default:
  706. pr_err("blk-mq: bad return on queue: %d\n", ret);
  707. case BLK_MQ_RQ_QUEUE_ERROR:
  708. rq->errors = -EIO;
  709. blk_mq_end_request(rq, rq->errors);
  710. break;
  711. }
  712. if (ret == BLK_MQ_RQ_QUEUE_BUSY)
  713. break;
  714. /*
  715. * We've done the first request. If we have more than 1
  716. * left in the list, set dptr to defer issue.
  717. */
  718. if (!dptr && rq_list.next != rq_list.prev)
  719. dptr = &driver_list;
  720. }
  721. hctx->dispatched[queued_to_index(queued)]++;
  722. /*
  723. * Any items that need requeuing? Stuff them into hctx->dispatch,
  724. * that is where we will continue on next queue run.
  725. */
  726. if (!list_empty(&rq_list)) {
  727. spin_lock(&hctx->lock);
  728. list_splice(&rq_list, &hctx->dispatch);
  729. spin_unlock(&hctx->lock);
  730. /*
  731. * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
  732. * it's possible the queue is stopped and restarted again
  733. * before this. Queue restart will dispatch requests. And since
  734. * requests in rq_list aren't added into hctx->dispatch yet,
  735. * the requests in rq_list might get lost.
  736. *
  737. * blk_mq_run_hw_queue() already checks the STOPPED bit
  738. **/
  739. blk_mq_run_hw_queue(hctx, true);
  740. }
  741. }
  742. /*
  743. * It'd be great if the workqueue API had a way to pass
  744. * in a mask and had some smarts for more clever placement.
  745. * For now we just round-robin here, switching for every
  746. * BLK_MQ_CPU_WORK_BATCH queued items.
  747. */
  748. static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
  749. {
  750. if (hctx->queue->nr_hw_queues == 1)
  751. return WORK_CPU_UNBOUND;
  752. if (--hctx->next_cpu_batch <= 0) {
  753. int next_cpu;
  754. next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
  755. if (next_cpu >= nr_cpu_ids)
  756. next_cpu = cpumask_first(hctx->cpumask);
  757. hctx->next_cpu = next_cpu;
  758. hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
  759. }
  760. return hctx->next_cpu;
  761. }
  762. void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
  763. {
  764. if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
  765. !blk_mq_hw_queue_mapped(hctx)))
  766. return;
  767. if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
  768. int cpu = get_cpu();
  769. if (cpumask_test_cpu(cpu, hctx->cpumask)) {
  770. __blk_mq_run_hw_queue(hctx);
  771. put_cpu();
  772. return;
  773. }
  774. put_cpu();
  775. }
  776. kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
  777. }
  778. void blk_mq_run_hw_queues(struct request_queue *q, bool async)
  779. {
  780. struct blk_mq_hw_ctx *hctx;
  781. int i;
  782. queue_for_each_hw_ctx(q, hctx, i) {
  783. if ((!blk_mq_hctx_has_pending(hctx) &&
  784. list_empty_careful(&hctx->dispatch)) ||
  785. test_bit(BLK_MQ_S_STOPPED, &hctx->state))
  786. continue;
  787. blk_mq_run_hw_queue(hctx, async);
  788. }
  789. }
  790. EXPORT_SYMBOL(blk_mq_run_hw_queues);
  791. void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
  792. {
  793. cancel_work(&hctx->run_work);
  794. cancel_delayed_work(&hctx->delay_work);
  795. set_bit(BLK_MQ_S_STOPPED, &hctx->state);
  796. }
  797. EXPORT_SYMBOL(blk_mq_stop_hw_queue);
  798. void blk_mq_stop_hw_queues(struct request_queue *q)
  799. {
  800. struct blk_mq_hw_ctx *hctx;
  801. int i;
  802. queue_for_each_hw_ctx(q, hctx, i)
  803. blk_mq_stop_hw_queue(hctx);
  804. }
  805. EXPORT_SYMBOL(blk_mq_stop_hw_queues);
  806. void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
  807. {
  808. clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
  809. blk_mq_run_hw_queue(hctx, false);
  810. }
  811. EXPORT_SYMBOL(blk_mq_start_hw_queue);
  812. void blk_mq_start_hw_queues(struct request_queue *q)
  813. {
  814. struct blk_mq_hw_ctx *hctx;
  815. int i;
  816. queue_for_each_hw_ctx(q, hctx, i)
  817. blk_mq_start_hw_queue(hctx);
  818. }
  819. EXPORT_SYMBOL(blk_mq_start_hw_queues);
  820. void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
  821. {
  822. struct blk_mq_hw_ctx *hctx;
  823. int i;
  824. queue_for_each_hw_ctx(q, hctx, i) {
  825. if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
  826. continue;
  827. clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
  828. blk_mq_run_hw_queue(hctx, async);
  829. }
  830. }
  831. EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
  832. static void blk_mq_run_work_fn(struct work_struct *work)
  833. {
  834. struct blk_mq_hw_ctx *hctx;
  835. hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
  836. __blk_mq_run_hw_queue(hctx);
  837. }
  838. static void blk_mq_delay_work_fn(struct work_struct *work)
  839. {
  840. struct blk_mq_hw_ctx *hctx;
  841. hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
  842. if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
  843. __blk_mq_run_hw_queue(hctx);
  844. }
  845. void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
  846. {
  847. if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
  848. return;
  849. kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
  850. &hctx->delay_work, msecs_to_jiffies(msecs));
  851. }
  852. EXPORT_SYMBOL(blk_mq_delay_queue);
  853. static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
  854. struct request *rq,
  855. bool at_head)
  856. {
  857. struct blk_mq_ctx *ctx = rq->mq_ctx;
  858. trace_block_rq_insert(hctx->queue, rq);
  859. if (at_head)
  860. list_add(&rq->queuelist, &ctx->rq_list);
  861. else
  862. list_add_tail(&rq->queuelist, &ctx->rq_list);
  863. }
  864. static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
  865. struct request *rq, bool at_head)
  866. {
  867. struct blk_mq_ctx *ctx = rq->mq_ctx;
  868. __blk_mq_insert_req_list(hctx, rq, at_head);
  869. blk_mq_hctx_mark_pending(hctx, ctx);
  870. }
  871. void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
  872. bool async)
  873. {
  874. struct blk_mq_ctx *ctx = rq->mq_ctx;
  875. struct request_queue *q = rq->q;
  876. struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  877. spin_lock(&ctx->lock);
  878. __blk_mq_insert_request(hctx, rq, at_head);
  879. spin_unlock(&ctx->lock);
  880. if (run_queue)
  881. blk_mq_run_hw_queue(hctx, async);
  882. }
  883. static void blk_mq_insert_requests(struct request_queue *q,
  884. struct blk_mq_ctx *ctx,
  885. struct list_head *list,
  886. int depth,
  887. bool from_schedule)
  888. {
  889. struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  890. trace_block_unplug(q, depth, !from_schedule);
  891. /*
  892. * preemption doesn't flush plug list, so it's possible ctx->cpu is
  893. * offline now
  894. */
  895. spin_lock(&ctx->lock);
  896. while (!list_empty(list)) {
  897. struct request *rq;
  898. rq = list_first_entry(list, struct request, queuelist);
  899. BUG_ON(rq->mq_ctx != ctx);
  900. list_del_init(&rq->queuelist);
  901. __blk_mq_insert_req_list(hctx, rq, false);
  902. }
  903. blk_mq_hctx_mark_pending(hctx, ctx);
  904. spin_unlock(&ctx->lock);
  905. blk_mq_run_hw_queue(hctx, from_schedule);
  906. }
  907. static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
  908. {
  909. struct request *rqa = container_of(a, struct request, queuelist);
  910. struct request *rqb = container_of(b, struct request, queuelist);
  911. return !(rqa->mq_ctx < rqb->mq_ctx ||
  912. (rqa->mq_ctx == rqb->mq_ctx &&
  913. blk_rq_pos(rqa) < blk_rq_pos(rqb)));
  914. }
  915. void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
  916. {
  917. struct blk_mq_ctx *this_ctx;
  918. struct request_queue *this_q;
  919. struct request *rq;
  920. LIST_HEAD(list);
  921. LIST_HEAD(ctx_list);
  922. unsigned int depth;
  923. list_splice_init(&plug->mq_list, &list);
  924. list_sort(NULL, &list, plug_ctx_cmp);
  925. this_q = NULL;
  926. this_ctx = NULL;
  927. depth = 0;
  928. while (!list_empty(&list)) {
  929. rq = list_entry_rq(list.next);
  930. list_del_init(&rq->queuelist);
  931. BUG_ON(!rq->q);
  932. if (rq->mq_ctx != this_ctx) {
  933. if (this_ctx) {
  934. blk_mq_insert_requests(this_q, this_ctx,
  935. &ctx_list, depth,
  936. from_schedule);
  937. }
  938. this_ctx = rq->mq_ctx;
  939. this_q = rq->q;
  940. depth = 0;
  941. }
  942. depth++;
  943. list_add_tail(&rq->queuelist, &ctx_list);
  944. }
  945. /*
  946. * If 'this_ctx' is set, we know we have entries to complete
  947. * on 'ctx_list'. Do those.
  948. */
  949. if (this_ctx) {
  950. blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
  951. from_schedule);
  952. }
  953. }
  954. static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
  955. {
  956. init_request_from_bio(rq, bio);
  957. blk_account_io_start(rq, 1);
  958. }
  959. static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
  960. {
  961. return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
  962. !blk_queue_nomerges(hctx->queue);
  963. }
  964. static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
  965. struct blk_mq_ctx *ctx,
  966. struct request *rq, struct bio *bio)
  967. {
  968. if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
  969. blk_mq_bio_to_request(rq, bio);
  970. spin_lock(&ctx->lock);
  971. insert_rq:
  972. __blk_mq_insert_request(hctx, rq, false);
  973. spin_unlock(&ctx->lock);
  974. return false;
  975. } else {
  976. struct request_queue *q = hctx->queue;
  977. spin_lock(&ctx->lock);
  978. if (!blk_mq_attempt_merge(q, ctx, bio)) {
  979. blk_mq_bio_to_request(rq, bio);
  980. goto insert_rq;
  981. }
  982. spin_unlock(&ctx->lock);
  983. __blk_mq_free_request(hctx, ctx, rq);
  984. return true;
  985. }
  986. }
  987. struct blk_map_ctx {
  988. struct blk_mq_hw_ctx *hctx;
  989. struct blk_mq_ctx *ctx;
  990. };
  991. static struct request *blk_mq_map_request(struct request_queue *q,
  992. struct bio *bio,
  993. struct blk_map_ctx *data)
  994. {
  995. struct blk_mq_hw_ctx *hctx;
  996. struct blk_mq_ctx *ctx;
  997. struct request *rq;
  998. int op = bio_data_dir(bio);
  999. int op_flags = 0;
  1000. struct blk_mq_alloc_data alloc_data;
  1001. blk_queue_enter_live(q);
  1002. ctx = blk_mq_get_ctx(q);
  1003. hctx = blk_mq_map_queue(q, ctx->cpu);
  1004. if (rw_is_sync(bio_op(bio), bio->bi_opf))
  1005. op_flags |= REQ_SYNC;
  1006. trace_block_getrq(q, bio, op);
  1007. blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
  1008. rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
  1009. data->hctx = alloc_data.hctx;
  1010. data->ctx = alloc_data.ctx;
  1011. data->hctx->queued++;
  1012. return rq;
  1013. }
  1014. static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
  1015. {
  1016. int ret;
  1017. struct request_queue *q = rq->q;
  1018. struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
  1019. struct blk_mq_queue_data bd = {
  1020. .rq = rq,
  1021. .list = NULL,
  1022. .last = 1
  1023. };
  1024. blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
  1025. /*
  1026. * For OK queue, we are done. For error, kill it. Any other
  1027. * error (busy), just add it to our list as we previously
  1028. * would have done
  1029. */
  1030. ret = q->mq_ops->queue_rq(hctx, &bd);
  1031. if (ret == BLK_MQ_RQ_QUEUE_OK) {
  1032. *cookie = new_cookie;
  1033. return 0;
  1034. }
  1035. __blk_mq_requeue_request(rq);
  1036. if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
  1037. *cookie = BLK_QC_T_NONE;
  1038. rq->errors = -EIO;
  1039. blk_mq_end_request(rq, rq->errors);
  1040. return 0;
  1041. }
  1042. return -1;
  1043. }
  1044. /*
  1045. * Multiple hardware queue variant. This will not use per-process plugs,
  1046. * but will attempt to bypass the hctx queueing if we can go straight to
  1047. * hardware for SYNC IO.
  1048. */
  1049. static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
  1050. {
  1051. const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
  1052. const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
  1053. struct blk_map_ctx data;
  1054. struct request *rq;
  1055. unsigned int request_count = 0;
  1056. struct blk_plug *plug;
  1057. struct request *same_queue_rq = NULL;
  1058. blk_qc_t cookie;
  1059. blk_queue_bounce(q, &bio);
  1060. blk_queue_split(q, &bio, q->bio_split);
  1061. if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
  1062. bio_io_error(bio);
  1063. return BLK_QC_T_NONE;
  1064. }
  1065. if (!is_flush_fua && !blk_queue_nomerges(q) &&
  1066. blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
  1067. return BLK_QC_T_NONE;
  1068. rq = blk_mq_map_request(q, bio, &data);
  1069. if (unlikely(!rq))
  1070. return BLK_QC_T_NONE;
  1071. cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
  1072. if (unlikely(is_flush_fua)) {
  1073. blk_mq_bio_to_request(rq, bio);
  1074. blk_insert_flush(rq);
  1075. goto run_queue;
  1076. }
  1077. plug = current->plug;
  1078. /*
  1079. * If the driver supports defer issued based on 'last', then
  1080. * queue it up like normal since we can potentially save some
  1081. * CPU this way.
  1082. */
  1083. if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
  1084. !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
  1085. struct request *old_rq = NULL;
  1086. blk_mq_bio_to_request(rq, bio);
  1087. /*
  1088. * We do limited pluging. If the bio can be merged, do that.
  1089. * Otherwise the existing request in the plug list will be
  1090. * issued. So the plug list will have one request at most
  1091. */
  1092. if (plug) {
  1093. /*
  1094. * The plug list might get flushed before this. If that
  1095. * happens, same_queue_rq is invalid and plug list is
  1096. * empty
  1097. */
  1098. if (same_queue_rq && !list_empty(&plug->mq_list)) {
  1099. old_rq = same_queue_rq;
  1100. list_del_init(&old_rq->queuelist);
  1101. }
  1102. list_add_tail(&rq->queuelist, &plug->mq_list);
  1103. } else /* is_sync */
  1104. old_rq = rq;
  1105. blk_mq_put_ctx(data.ctx);
  1106. if (!old_rq)
  1107. goto done;
  1108. if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
  1109. blk_mq_direct_issue_request(old_rq, &cookie) != 0)
  1110. blk_mq_insert_request(old_rq, false, true, true);
  1111. goto done;
  1112. }
  1113. if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
  1114. /*
  1115. * For a SYNC request, send it to the hardware immediately. For
  1116. * an ASYNC request, just ensure that we run it later on. The
  1117. * latter allows for merging opportunities and more efficient
  1118. * dispatching.
  1119. */
  1120. run_queue:
  1121. blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
  1122. }
  1123. blk_mq_put_ctx(data.ctx);
  1124. done:
  1125. return cookie;
  1126. }
  1127. /*
  1128. * Single hardware queue variant. This will attempt to use any per-process
  1129. * plug for merging and IO deferral.
  1130. */
  1131. static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
  1132. {
  1133. const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
  1134. const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
  1135. struct blk_plug *plug;
  1136. unsigned int request_count = 0;
  1137. struct blk_map_ctx data;
  1138. struct request *rq;
  1139. blk_qc_t cookie;
  1140. blk_queue_bounce(q, &bio);
  1141. if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
  1142. bio_io_error(bio);
  1143. return BLK_QC_T_NONE;
  1144. }
  1145. blk_queue_split(q, &bio, q->bio_split);
  1146. if (!is_flush_fua && !blk_queue_nomerges(q)) {
  1147. if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
  1148. return BLK_QC_T_NONE;
  1149. } else
  1150. request_count = blk_plug_queued_count(q);
  1151. rq = blk_mq_map_request(q, bio, &data);
  1152. if (unlikely(!rq))
  1153. return BLK_QC_T_NONE;
  1154. cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
  1155. if (unlikely(is_flush_fua)) {
  1156. blk_mq_bio_to_request(rq, bio);
  1157. blk_insert_flush(rq);
  1158. goto run_queue;
  1159. }
  1160. /*
  1161. * A task plug currently exists. Since this is completely lockless,
  1162. * utilize that to temporarily store requests until the task is
  1163. * either done or scheduled away.
  1164. */
  1165. plug = current->plug;
  1166. if (plug) {
  1167. blk_mq_bio_to_request(rq, bio);
  1168. if (!request_count)
  1169. trace_block_plug(q);
  1170. blk_mq_put_ctx(data.ctx);
  1171. if (request_count >= BLK_MAX_REQUEST_COUNT) {
  1172. blk_flush_plug_list(plug, false);
  1173. trace_block_plug(q);
  1174. }
  1175. list_add_tail(&rq->queuelist, &plug->mq_list);
  1176. return cookie;
  1177. }
  1178. if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
  1179. /*
  1180. * For a SYNC request, send it to the hardware immediately. For
  1181. * an ASYNC request, just ensure that we run it later on. The
  1182. * latter allows for merging opportunities and more efficient
  1183. * dispatching.
  1184. */
  1185. run_queue:
  1186. blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
  1187. }
  1188. blk_mq_put_ctx(data.ctx);
  1189. return cookie;
  1190. }
  1191. static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
  1192. struct blk_mq_tags *tags, unsigned int hctx_idx)
  1193. {
  1194. struct page *page;
  1195. if (tags->rqs && set->ops->exit_request) {
  1196. int i;
  1197. for (i = 0; i < tags->nr_tags; i++) {
  1198. if (!tags->rqs[i])
  1199. continue;
  1200. set->ops->exit_request(set->driver_data, tags->rqs[i],
  1201. hctx_idx, i);
  1202. tags->rqs[i] = NULL;
  1203. }
  1204. }
  1205. while (!list_empty(&tags->page_list)) {
  1206. page = list_first_entry(&tags->page_list, struct page, lru);
  1207. list_del_init(&page->lru);
  1208. /*
  1209. * Remove kmemleak object previously allocated in
  1210. * blk_mq_init_rq_map().
  1211. */
  1212. kmemleak_free(page_address(page));
  1213. __free_pages(page, page->private);
  1214. }
  1215. kfree(tags->rqs);
  1216. blk_mq_free_tags(tags);
  1217. }
  1218. static size_t order_to_size(unsigned int order)
  1219. {
  1220. return (size_t)PAGE_SIZE << order;
  1221. }
  1222. static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
  1223. unsigned int hctx_idx)
  1224. {
  1225. struct blk_mq_tags *tags;
  1226. unsigned int i, j, entries_per_page, max_order = 4;
  1227. size_t rq_size, left;
  1228. tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
  1229. set->numa_node,
  1230. BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
  1231. if (!tags)
  1232. return NULL;
  1233. INIT_LIST_HEAD(&tags->page_list);
  1234. tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
  1235. GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
  1236. set->numa_node);
  1237. if (!tags->rqs) {
  1238. blk_mq_free_tags(tags);
  1239. return NULL;
  1240. }
  1241. /*
  1242. * rq_size is the size of the request plus driver payload, rounded
  1243. * to the cacheline size
  1244. */
  1245. rq_size = round_up(sizeof(struct request) + set->cmd_size,
  1246. cache_line_size());
  1247. left = rq_size * set->queue_depth;
  1248. for (i = 0; i < set->queue_depth; ) {
  1249. int this_order = max_order;
  1250. struct page *page;
  1251. int to_do;
  1252. void *p;
  1253. while (this_order && left < order_to_size(this_order - 1))
  1254. this_order--;
  1255. do {
  1256. page = alloc_pages_node(set->numa_node,
  1257. GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
  1258. this_order);
  1259. if (page)
  1260. break;
  1261. if (!this_order--)
  1262. break;
  1263. if (order_to_size(this_order) < rq_size)
  1264. break;
  1265. } while (1);
  1266. if (!page)
  1267. goto fail;
  1268. page->private = this_order;
  1269. list_add_tail(&page->lru, &tags->page_list);
  1270. p = page_address(page);
  1271. /*
  1272. * Allow kmemleak to scan these pages as they contain pointers
  1273. * to additional allocations like via ops->init_request().
  1274. */
  1275. kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
  1276. entries_per_page = order_to_size(this_order) / rq_size;
  1277. to_do = min(entries_per_page, set->queue_depth - i);
  1278. left -= to_do * rq_size;
  1279. for (j = 0; j < to_do; j++) {
  1280. tags->rqs[i] = p;
  1281. if (set->ops->init_request) {
  1282. if (set->ops->init_request(set->driver_data,
  1283. tags->rqs[i], hctx_idx, i,
  1284. set->numa_node)) {
  1285. tags->rqs[i] = NULL;
  1286. goto fail;
  1287. }
  1288. }
  1289. p += rq_size;
  1290. i++;
  1291. }
  1292. }
  1293. return tags;
  1294. fail:
  1295. blk_mq_free_rq_map(set, tags, hctx_idx);
  1296. return NULL;
  1297. }
  1298. /*
  1299. * 'cpu' is going away. splice any existing rq_list entries from this
  1300. * software queue to the hw queue dispatch list, and ensure that it
  1301. * gets run.
  1302. */
  1303. static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
  1304. {
  1305. struct blk_mq_hw_ctx *hctx;
  1306. struct blk_mq_ctx *ctx;
  1307. LIST_HEAD(tmp);
  1308. hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
  1309. ctx = __blk_mq_get_ctx(hctx->queue, cpu);
  1310. spin_lock(&ctx->lock);
  1311. if (!list_empty(&ctx->rq_list)) {
  1312. list_splice_init(&ctx->rq_list, &tmp);
  1313. blk_mq_hctx_clear_pending(hctx, ctx);
  1314. }
  1315. spin_unlock(&ctx->lock);
  1316. if (list_empty(&tmp))
  1317. return 0;
  1318. spin_lock(&hctx->lock);
  1319. list_splice_tail_init(&tmp, &hctx->dispatch);
  1320. spin_unlock(&hctx->lock);
  1321. blk_mq_run_hw_queue(hctx, true);
  1322. return 0;
  1323. }
  1324. static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
  1325. {
  1326. cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
  1327. &hctx->cpuhp_dead);
  1328. }
  1329. /* hctx->ctxs will be freed in queue's release handler */
  1330. static void blk_mq_exit_hctx(struct request_queue *q,
  1331. struct blk_mq_tag_set *set,
  1332. struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  1333. {
  1334. unsigned flush_start_tag = set->queue_depth;
  1335. if (blk_mq_hw_queue_mapped(hctx))
  1336. blk_mq_tag_idle(hctx);
  1337. if (set->ops->exit_request)
  1338. set->ops->exit_request(set->driver_data,
  1339. hctx->fq->flush_rq, hctx_idx,
  1340. flush_start_tag + hctx_idx);
  1341. if (set->ops->exit_hctx)
  1342. set->ops->exit_hctx(hctx, hctx_idx);
  1343. blk_mq_remove_cpuhp(hctx);
  1344. blk_free_flush_queue(hctx->fq);
  1345. sbitmap_free(&hctx->ctx_map);
  1346. }
  1347. static void blk_mq_exit_hw_queues(struct request_queue *q,
  1348. struct blk_mq_tag_set *set, int nr_queue)
  1349. {
  1350. struct blk_mq_hw_ctx *hctx;
  1351. unsigned int i;
  1352. queue_for_each_hw_ctx(q, hctx, i) {
  1353. if (i == nr_queue)
  1354. break;
  1355. blk_mq_exit_hctx(q, set, hctx, i);
  1356. }
  1357. }
  1358. static void blk_mq_free_hw_queues(struct request_queue *q,
  1359. struct blk_mq_tag_set *set)
  1360. {
  1361. struct blk_mq_hw_ctx *hctx;
  1362. unsigned int i;
  1363. queue_for_each_hw_ctx(q, hctx, i)
  1364. free_cpumask_var(hctx->cpumask);
  1365. }
  1366. static int blk_mq_init_hctx(struct request_queue *q,
  1367. struct blk_mq_tag_set *set,
  1368. struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
  1369. {
  1370. int node;
  1371. unsigned flush_start_tag = set->queue_depth;
  1372. node = hctx->numa_node;
  1373. if (node == NUMA_NO_NODE)
  1374. node = hctx->numa_node = set->numa_node;
  1375. INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
  1376. INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
  1377. spin_lock_init(&hctx->lock);
  1378. INIT_LIST_HEAD(&hctx->dispatch);
  1379. hctx->queue = q;
  1380. hctx->queue_num = hctx_idx;
  1381. hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
  1382. cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
  1383. hctx->tags = set->tags[hctx_idx];
  1384. /*
  1385. * Allocate space for all possible cpus to avoid allocation at
  1386. * runtime
  1387. */
  1388. hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
  1389. GFP_KERNEL, node);
  1390. if (!hctx->ctxs)
  1391. goto unregister_cpu_notifier;
  1392. if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
  1393. node))
  1394. goto free_ctxs;
  1395. hctx->nr_ctx = 0;
  1396. if (set->ops->init_hctx &&
  1397. set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
  1398. goto free_bitmap;
  1399. hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
  1400. if (!hctx->fq)
  1401. goto exit_hctx;
  1402. if (set->ops->init_request &&
  1403. set->ops->init_request(set->driver_data,
  1404. hctx->fq->flush_rq, hctx_idx,
  1405. flush_start_tag + hctx_idx, node))
  1406. goto free_fq;
  1407. return 0;
  1408. free_fq:
  1409. kfree(hctx->fq);
  1410. exit_hctx:
  1411. if (set->ops->exit_hctx)
  1412. set->ops->exit_hctx(hctx, hctx_idx);
  1413. free_bitmap:
  1414. sbitmap_free(&hctx->ctx_map);
  1415. free_ctxs:
  1416. kfree(hctx->ctxs);
  1417. unregister_cpu_notifier:
  1418. blk_mq_remove_cpuhp(hctx);
  1419. return -1;
  1420. }
  1421. static void blk_mq_init_cpu_queues(struct request_queue *q,
  1422. unsigned int nr_hw_queues)
  1423. {
  1424. unsigned int i;
  1425. for_each_possible_cpu(i) {
  1426. struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
  1427. struct blk_mq_hw_ctx *hctx;
  1428. __ctx->cpu = i;
  1429. spin_lock_init(&__ctx->lock);
  1430. INIT_LIST_HEAD(&__ctx->rq_list);
  1431. __ctx->queue = q;
  1432. hctx = blk_mq_map_queue(q, i);
  1433. /*
  1434. * Set local node, IFF we have more than one hw queue. If
  1435. * not, we remain on the home node of the device
  1436. */
  1437. if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
  1438. hctx->numa_node = local_memory_node(cpu_to_node(i));
  1439. }
  1440. }
  1441. static void blk_mq_map_swqueue(struct request_queue *q,
  1442. const struct cpumask *online_mask)
  1443. {
  1444. unsigned int i;
  1445. struct blk_mq_hw_ctx *hctx;
  1446. struct blk_mq_ctx *ctx;
  1447. struct blk_mq_tag_set *set = q->tag_set;
  1448. /*
  1449. * Avoid others reading imcomplete hctx->cpumask through sysfs
  1450. */
  1451. mutex_lock(&q->sysfs_lock);
  1452. queue_for_each_hw_ctx(q, hctx, i) {
  1453. cpumask_clear(hctx->cpumask);
  1454. hctx->nr_ctx = 0;
  1455. }
  1456. /*
  1457. * Map software to hardware queues
  1458. */
  1459. for_each_possible_cpu(i) {
  1460. ctx = per_cpu_ptr(q->queue_ctx, i);
  1461. hctx = blk_mq_map_queue(q, i);
  1462. if (cpumask_test_cpu(i, online_mask))
  1463. cpumask_set_cpu(i, hctx->cpumask);
  1464. ctx->index_hw = hctx->nr_ctx;
  1465. hctx->ctxs[hctx->nr_ctx++] = ctx;
  1466. }
  1467. mutex_unlock(&q->sysfs_lock);
  1468. queue_for_each_hw_ctx(q, hctx, i) {
  1469. /*
  1470. * If no software queues are mapped to this hardware queue,
  1471. * disable it and free the request entries.
  1472. */
  1473. if (!hctx->nr_ctx) {
  1474. if (set->tags[i]) {
  1475. blk_mq_free_rq_map(set, set->tags[i], i);
  1476. set->tags[i] = NULL;
  1477. }
  1478. hctx->tags = NULL;
  1479. continue;
  1480. }
  1481. /* unmapped hw queue can be remapped after CPU topo changed */
  1482. if (!set->tags[i])
  1483. set->tags[i] = blk_mq_init_rq_map(set, i);
  1484. hctx->tags = set->tags[i];
  1485. WARN_ON(!hctx->tags);
  1486. /*
  1487. * Set the map size to the number of mapped software queues.
  1488. * This is more accurate and more efficient than looping
  1489. * over all possibly mapped software queues.
  1490. */
  1491. sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
  1492. /*
  1493. * Initialize batch roundrobin counts
  1494. * Set next_cpu for only those hctxs that have an online CPU
  1495. * in their cpumask field. For hctxs that belong to few online
  1496. * and few offline CPUs, this will always provide one CPU from
  1497. * online ones. For hctxs belonging to all offline CPUs, their
  1498. * cpumask will be updated in reinit_notify.
  1499. */
  1500. if (cpumask_first(hctx->cpumask) < nr_cpu_ids) {
  1501. hctx->next_cpu = cpumask_first(hctx->cpumask);
  1502. hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
  1503. }
  1504. }
  1505. }
  1506. static void queue_set_hctx_shared(struct request_queue *q, bool shared)
  1507. {
  1508. struct blk_mq_hw_ctx *hctx;
  1509. int i;
  1510. queue_for_each_hw_ctx(q, hctx, i) {
  1511. if (shared)
  1512. hctx->flags |= BLK_MQ_F_TAG_SHARED;
  1513. else
  1514. hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
  1515. }
  1516. }
  1517. static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
  1518. {
  1519. struct request_queue *q;
  1520. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  1521. blk_mq_freeze_queue(q);
  1522. queue_set_hctx_shared(q, shared);
  1523. blk_mq_unfreeze_queue(q);
  1524. }
  1525. }
  1526. static void blk_mq_del_queue_tag_set(struct request_queue *q)
  1527. {
  1528. struct blk_mq_tag_set *set = q->tag_set;
  1529. mutex_lock(&set->tag_list_lock);
  1530. list_del_init(&q->tag_set_list);
  1531. if (list_is_singular(&set->tag_list)) {
  1532. /* just transitioned to unshared */
  1533. set->flags &= ~BLK_MQ_F_TAG_SHARED;
  1534. /* update existing queue */
  1535. blk_mq_update_tag_set_depth(set, false);
  1536. }
  1537. mutex_unlock(&set->tag_list_lock);
  1538. }
  1539. static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
  1540. struct request_queue *q)
  1541. {
  1542. q->tag_set = set;
  1543. mutex_lock(&set->tag_list_lock);
  1544. /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
  1545. if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
  1546. set->flags |= BLK_MQ_F_TAG_SHARED;
  1547. /* update existing queue */
  1548. blk_mq_update_tag_set_depth(set, true);
  1549. }
  1550. if (set->flags & BLK_MQ_F_TAG_SHARED)
  1551. queue_set_hctx_shared(q, true);
  1552. list_add_tail(&q->tag_set_list, &set->tag_list);
  1553. mutex_unlock(&set->tag_list_lock);
  1554. }
  1555. /*
  1556. * It is the actual release handler for mq, but we do it from
  1557. * request queue's release handler for avoiding use-after-free
  1558. * and headache because q->mq_kobj shouldn't have been introduced,
  1559. * but we can't group ctx/kctx kobj without it.
  1560. */
  1561. void blk_mq_release(struct request_queue *q)
  1562. {
  1563. struct blk_mq_hw_ctx *hctx;
  1564. unsigned int i;
  1565. /* hctx kobj stays in hctx */
  1566. queue_for_each_hw_ctx(q, hctx, i) {
  1567. if (!hctx)
  1568. continue;
  1569. kfree(hctx->ctxs);
  1570. kfree(hctx);
  1571. }
  1572. q->mq_map = NULL;
  1573. kfree(q->queue_hw_ctx);
  1574. /* ctx kobj stays in queue_ctx */
  1575. free_percpu(q->queue_ctx);
  1576. }
  1577. struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
  1578. {
  1579. struct request_queue *uninit_q, *q;
  1580. uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
  1581. if (!uninit_q)
  1582. return ERR_PTR(-ENOMEM);
  1583. q = blk_mq_init_allocated_queue(set, uninit_q);
  1584. if (IS_ERR(q))
  1585. blk_cleanup_queue(uninit_q);
  1586. return q;
  1587. }
  1588. EXPORT_SYMBOL(blk_mq_init_queue);
  1589. static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
  1590. struct request_queue *q)
  1591. {
  1592. int i, j;
  1593. struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
  1594. blk_mq_sysfs_unregister(q);
  1595. /* protect against switching io scheduler */
  1596. mutex_lock(&q->sysfs_lock);
  1597. for (i = 0; i < set->nr_hw_queues; i++) {
  1598. int node;
  1599. if (hctxs[i])
  1600. continue;
  1601. node = blk_mq_hw_queue_to_node(q->mq_map, i);
  1602. hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
  1603. GFP_KERNEL, node);
  1604. if (!hctxs[i])
  1605. break;
  1606. if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
  1607. node)) {
  1608. kfree(hctxs[i]);
  1609. hctxs[i] = NULL;
  1610. break;
  1611. }
  1612. atomic_set(&hctxs[i]->nr_active, 0);
  1613. hctxs[i]->numa_node = node;
  1614. hctxs[i]->queue_num = i;
  1615. if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
  1616. free_cpumask_var(hctxs[i]->cpumask);
  1617. kfree(hctxs[i]);
  1618. hctxs[i] = NULL;
  1619. break;
  1620. }
  1621. blk_mq_hctx_kobj_init(hctxs[i]);
  1622. }
  1623. for (j = i; j < q->nr_hw_queues; j++) {
  1624. struct blk_mq_hw_ctx *hctx = hctxs[j];
  1625. if (hctx) {
  1626. if (hctx->tags) {
  1627. blk_mq_free_rq_map(set, hctx->tags, j);
  1628. set->tags[j] = NULL;
  1629. }
  1630. blk_mq_exit_hctx(q, set, hctx, j);
  1631. free_cpumask_var(hctx->cpumask);
  1632. kobject_put(&hctx->kobj);
  1633. kfree(hctx->ctxs);
  1634. kfree(hctx);
  1635. hctxs[j] = NULL;
  1636. }
  1637. }
  1638. q->nr_hw_queues = i;
  1639. mutex_unlock(&q->sysfs_lock);
  1640. blk_mq_sysfs_register(q);
  1641. }
  1642. struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
  1643. struct request_queue *q)
  1644. {
  1645. /* mark the queue as mq asap */
  1646. q->mq_ops = set->ops;
  1647. q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
  1648. if (!q->queue_ctx)
  1649. goto err_exit;
  1650. /* init q->mq_kobj and sw queues' kobjects */
  1651. blk_mq_sysfs_init(q);
  1652. q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
  1653. GFP_KERNEL, set->numa_node);
  1654. if (!q->queue_hw_ctx)
  1655. goto err_percpu;
  1656. q->mq_map = set->mq_map;
  1657. blk_mq_realloc_hw_ctxs(set, q);
  1658. if (!q->nr_hw_queues)
  1659. goto err_hctxs;
  1660. INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
  1661. blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
  1662. q->nr_queues = nr_cpu_ids;
  1663. q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
  1664. if (!(set->flags & BLK_MQ_F_SG_MERGE))
  1665. q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
  1666. q->sg_reserved_size = INT_MAX;
  1667. INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
  1668. INIT_LIST_HEAD(&q->requeue_list);
  1669. spin_lock_init(&q->requeue_lock);
  1670. if (q->nr_hw_queues > 1)
  1671. blk_queue_make_request(q, blk_mq_make_request);
  1672. else
  1673. blk_queue_make_request(q, blk_sq_make_request);
  1674. /*
  1675. * Do this after blk_queue_make_request() overrides it...
  1676. */
  1677. q->nr_requests = set->queue_depth;
  1678. if (set->ops->complete)
  1679. blk_queue_softirq_done(q, set->ops->complete);
  1680. blk_mq_init_cpu_queues(q, set->nr_hw_queues);
  1681. get_online_cpus();
  1682. mutex_lock(&all_q_mutex);
  1683. list_add_tail(&q->all_q_node, &all_q_list);
  1684. blk_mq_add_queue_tag_set(set, q);
  1685. blk_mq_map_swqueue(q, cpu_online_mask);
  1686. mutex_unlock(&all_q_mutex);
  1687. put_online_cpus();
  1688. return q;
  1689. err_hctxs:
  1690. kfree(q->queue_hw_ctx);
  1691. err_percpu:
  1692. free_percpu(q->queue_ctx);
  1693. err_exit:
  1694. q->mq_ops = NULL;
  1695. return ERR_PTR(-ENOMEM);
  1696. }
  1697. EXPORT_SYMBOL(blk_mq_init_allocated_queue);
  1698. void blk_mq_free_queue(struct request_queue *q)
  1699. {
  1700. struct blk_mq_tag_set *set = q->tag_set;
  1701. mutex_lock(&all_q_mutex);
  1702. list_del_init(&q->all_q_node);
  1703. mutex_unlock(&all_q_mutex);
  1704. blk_mq_del_queue_tag_set(q);
  1705. blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
  1706. blk_mq_free_hw_queues(q, set);
  1707. }
  1708. /* Basically redo blk_mq_init_queue with queue frozen */
  1709. static void blk_mq_queue_reinit(struct request_queue *q,
  1710. const struct cpumask *online_mask)
  1711. {
  1712. WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
  1713. blk_mq_sysfs_unregister(q);
  1714. /*
  1715. * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
  1716. * we should change hctx numa_node according to new topology (this
  1717. * involves free and re-allocate memory, worthy doing?)
  1718. */
  1719. blk_mq_map_swqueue(q, online_mask);
  1720. blk_mq_sysfs_register(q);
  1721. }
  1722. static int blk_mq_queue_reinit_dead(unsigned int cpu)
  1723. {
  1724. struct request_queue *q;
  1725. struct blk_mq_hw_ctx *hctx;
  1726. int i;
  1727. mutex_lock(&all_q_mutex);
  1728. list_for_each_entry(q, &all_q_list, all_q_node) {
  1729. queue_for_each_hw_ctx(q, hctx, i) {
  1730. cpumask_clear_cpu(cpu, hctx->cpumask);
  1731. }
  1732. }
  1733. mutex_unlock(&all_q_mutex);
  1734. return 0;
  1735. }
  1736. /*
  1737. * Before hotadded cpu starts handling requests, new mappings must be
  1738. * established. Otherwise, these requests in hw queue might never be
  1739. * dispatched.
  1740. *
  1741. * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
  1742. * for CPU0, and ctx1 for CPU1).
  1743. *
  1744. * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
  1745. * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
  1746. *
  1747. * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
  1748. * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
  1749. * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
  1750. * is ignored.
  1751. */
  1752. static int blk_mq_queue_reinit_prepare(unsigned int cpu)
  1753. {
  1754. struct request_queue *q;
  1755. struct blk_mq_hw_ctx *hctx;
  1756. int i;
  1757. mutex_lock(&all_q_mutex);
  1758. list_for_each_entry(q, &all_q_list, all_q_node) {
  1759. queue_for_each_hw_ctx(q, hctx, i) {
  1760. cpumask_set_cpu(cpu, hctx->cpumask);
  1761. }
  1762. }
  1763. mutex_unlock(&all_q_mutex);
  1764. return 0;
  1765. }
  1766. static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
  1767. {
  1768. int i;
  1769. for (i = 0; i < set->nr_hw_queues; i++) {
  1770. set->tags[i] = blk_mq_init_rq_map(set, i);
  1771. if (!set->tags[i])
  1772. goto out_unwind;
  1773. }
  1774. return 0;
  1775. out_unwind:
  1776. while (--i >= 0)
  1777. blk_mq_free_rq_map(set, set->tags[i], i);
  1778. return -ENOMEM;
  1779. }
  1780. /*
  1781. * Allocate the request maps associated with this tag_set. Note that this
  1782. * may reduce the depth asked for, if memory is tight. set->queue_depth
  1783. * will be updated to reflect the allocated depth.
  1784. */
  1785. static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
  1786. {
  1787. unsigned int depth;
  1788. int err;
  1789. depth = set->queue_depth;
  1790. do {
  1791. err = __blk_mq_alloc_rq_maps(set);
  1792. if (!err)
  1793. break;
  1794. set->queue_depth >>= 1;
  1795. if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
  1796. err = -ENOMEM;
  1797. break;
  1798. }
  1799. } while (set->queue_depth);
  1800. if (!set->queue_depth || err) {
  1801. pr_err("blk-mq: failed to allocate request map\n");
  1802. return -ENOMEM;
  1803. }
  1804. if (depth != set->queue_depth)
  1805. pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
  1806. depth, set->queue_depth);
  1807. return 0;
  1808. }
  1809. /*
  1810. * Alloc a tag set to be associated with one or more request queues.
  1811. * May fail with EINVAL for various error conditions. May adjust the
  1812. * requested depth down, if if it too large. In that case, the set
  1813. * value will be stored in set->queue_depth.
  1814. */
  1815. int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
  1816. {
  1817. int ret;
  1818. BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
  1819. if (!set->nr_hw_queues)
  1820. return -EINVAL;
  1821. if (!set->queue_depth)
  1822. return -EINVAL;
  1823. if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
  1824. return -EINVAL;
  1825. if (!set->ops->queue_rq)
  1826. return -EINVAL;
  1827. if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
  1828. pr_info("blk-mq: reduced tag depth to %u\n",
  1829. BLK_MQ_MAX_DEPTH);
  1830. set->queue_depth = BLK_MQ_MAX_DEPTH;
  1831. }
  1832. /*
  1833. * If a crashdump is active, then we are potentially in a very
  1834. * memory constrained environment. Limit us to 1 queue and
  1835. * 64 tags to prevent using too much memory.
  1836. */
  1837. if (is_kdump_kernel()) {
  1838. set->nr_hw_queues = 1;
  1839. set->queue_depth = min(64U, set->queue_depth);
  1840. }
  1841. /*
  1842. * There is no use for more h/w queues than cpus.
  1843. */
  1844. if (set->nr_hw_queues > nr_cpu_ids)
  1845. set->nr_hw_queues = nr_cpu_ids;
  1846. set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
  1847. GFP_KERNEL, set->numa_node);
  1848. if (!set->tags)
  1849. return -ENOMEM;
  1850. ret = -ENOMEM;
  1851. set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
  1852. GFP_KERNEL, set->numa_node);
  1853. if (!set->mq_map)
  1854. goto out_free_tags;
  1855. if (set->ops->map_queues)
  1856. ret = set->ops->map_queues(set);
  1857. else
  1858. ret = blk_mq_map_queues(set);
  1859. if (ret)
  1860. goto out_free_mq_map;
  1861. ret = blk_mq_alloc_rq_maps(set);
  1862. if (ret)
  1863. goto out_free_mq_map;
  1864. mutex_init(&set->tag_list_lock);
  1865. INIT_LIST_HEAD(&set->tag_list);
  1866. return 0;
  1867. out_free_mq_map:
  1868. kfree(set->mq_map);
  1869. set->mq_map = NULL;
  1870. out_free_tags:
  1871. kfree(set->tags);
  1872. set->tags = NULL;
  1873. return ret;
  1874. }
  1875. EXPORT_SYMBOL(blk_mq_alloc_tag_set);
  1876. void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
  1877. {
  1878. int i;
  1879. for (i = 0; i < nr_cpu_ids; i++) {
  1880. if (set->tags[i])
  1881. blk_mq_free_rq_map(set, set->tags[i], i);
  1882. }
  1883. kfree(set->mq_map);
  1884. set->mq_map = NULL;
  1885. kfree(set->tags);
  1886. set->tags = NULL;
  1887. }
  1888. EXPORT_SYMBOL(blk_mq_free_tag_set);
  1889. int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
  1890. {
  1891. struct blk_mq_tag_set *set = q->tag_set;
  1892. struct blk_mq_hw_ctx *hctx;
  1893. int i, ret;
  1894. if (!set || nr > set->queue_depth)
  1895. return -EINVAL;
  1896. ret = 0;
  1897. queue_for_each_hw_ctx(q, hctx, i) {
  1898. if (!hctx->tags)
  1899. continue;
  1900. ret = blk_mq_tag_update_depth(hctx->tags, nr);
  1901. if (ret)
  1902. break;
  1903. }
  1904. if (!ret)
  1905. q->nr_requests = nr;
  1906. return ret;
  1907. }
  1908. void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
  1909. {
  1910. struct request_queue *q;
  1911. if (nr_hw_queues > nr_cpu_ids)
  1912. nr_hw_queues = nr_cpu_ids;
  1913. if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
  1914. return;
  1915. list_for_each_entry(q, &set->tag_list, tag_set_list)
  1916. blk_mq_freeze_queue(q);
  1917. set->nr_hw_queues = nr_hw_queues;
  1918. list_for_each_entry(q, &set->tag_list, tag_set_list) {
  1919. blk_mq_realloc_hw_ctxs(set, q);
  1920. if (q->nr_hw_queues > 1)
  1921. blk_queue_make_request(q, blk_mq_make_request);
  1922. else
  1923. blk_queue_make_request(q, blk_sq_make_request);
  1924. blk_mq_queue_reinit(q, cpu_online_mask);
  1925. }
  1926. list_for_each_entry(q, &set->tag_list, tag_set_list)
  1927. blk_mq_unfreeze_queue(q);
  1928. }
  1929. EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
  1930. void blk_mq_disable_hotplug(void)
  1931. {
  1932. mutex_lock(&all_q_mutex);
  1933. }
  1934. void blk_mq_enable_hotplug(void)
  1935. {
  1936. mutex_unlock(&all_q_mutex);
  1937. }
  1938. static int __init blk_mq_init(void)
  1939. {
  1940. cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
  1941. blk_mq_hctx_notify_dead);
  1942. cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
  1943. blk_mq_queue_reinit_prepare,
  1944. blk_mq_queue_reinit_dead);
  1945. return 0;
  1946. }
  1947. subsys_initcall(blk_mq_init);