blk-cgroup.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. *
  13. * For policy-specific per-blkcg data:
  14. * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15. * Arianna Avanzini <avanzini.arianna@gmail.com>
  16. */
  17. #include <linux/ioprio.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/module.h>
  20. #include <linux/sched/signal.h>
  21. #include <linux/err.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/slab.h>
  25. #include <linux/genhd.h>
  26. #include <linux/delay.h>
  27. #include <linux/atomic.h>
  28. #include <linux/ctype.h>
  29. #include <linux/blk-cgroup.h>
  30. #include <linux/tracehook.h>
  31. #include "blk.h"
  32. #define MAX_KEY_LEN 100
  33. /*
  34. * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  35. * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  36. * policy [un]register operations including cgroup file additions /
  37. * removals. Putting cgroup file registration outside blkcg_pol_mutex
  38. * allows grabbing it from cgroup callbacks.
  39. */
  40. static DEFINE_MUTEX(blkcg_pol_register_mutex);
  41. static DEFINE_MUTEX(blkcg_pol_mutex);
  42. struct blkcg blkcg_root;
  43. EXPORT_SYMBOL_GPL(blkcg_root);
  44. struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  45. static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  46. static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
  47. static bool blkcg_debug_stats = false;
  48. static bool blkcg_policy_enabled(struct request_queue *q,
  49. const struct blkcg_policy *pol)
  50. {
  51. return pol && test_bit(pol->plid, q->blkcg_pols);
  52. }
  53. /**
  54. * blkg_free - free a blkg
  55. * @blkg: blkg to free
  56. *
  57. * Free @blkg which may be partially allocated.
  58. */
  59. static void blkg_free(struct blkcg_gq *blkg)
  60. {
  61. int i;
  62. if (!blkg)
  63. return;
  64. for (i = 0; i < BLKCG_MAX_POLS; i++)
  65. if (blkg->pd[i])
  66. blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  67. if (blkg->blkcg != &blkcg_root)
  68. blk_exit_rl(blkg->q, &blkg->rl);
  69. blkg_rwstat_exit(&blkg->stat_ios);
  70. blkg_rwstat_exit(&blkg->stat_bytes);
  71. kfree(blkg);
  72. }
  73. /**
  74. * blkg_alloc - allocate a blkg
  75. * @blkcg: block cgroup the new blkg is associated with
  76. * @q: request_queue the new blkg is associated with
  77. * @gfp_mask: allocation mask to use
  78. *
  79. * Allocate a new blkg assocating @blkcg and @q.
  80. */
  81. static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  82. gfp_t gfp_mask)
  83. {
  84. struct blkcg_gq *blkg;
  85. int i;
  86. /* alloc and init base part */
  87. blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  88. if (!blkg)
  89. return NULL;
  90. if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
  91. blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
  92. goto err_free;
  93. blkg->q = q;
  94. INIT_LIST_HEAD(&blkg->q_node);
  95. blkg->blkcg = blkcg;
  96. atomic_set(&blkg->refcnt, 1);
  97. /* root blkg uses @q->root_rl, init rl only for !root blkgs */
  98. if (blkcg != &blkcg_root) {
  99. if (blk_init_rl(&blkg->rl, q, gfp_mask))
  100. goto err_free;
  101. blkg->rl.blkg = blkg;
  102. }
  103. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  104. struct blkcg_policy *pol = blkcg_policy[i];
  105. struct blkg_policy_data *pd;
  106. if (!blkcg_policy_enabled(q, pol))
  107. continue;
  108. /* alloc per-policy data and attach it to blkg */
  109. pd = pol->pd_alloc_fn(gfp_mask, q->node);
  110. if (!pd)
  111. goto err_free;
  112. blkg->pd[i] = pd;
  113. pd->blkg = blkg;
  114. pd->plid = i;
  115. }
  116. return blkg;
  117. err_free:
  118. blkg_free(blkg);
  119. return NULL;
  120. }
  121. struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  122. struct request_queue *q, bool update_hint)
  123. {
  124. struct blkcg_gq *blkg;
  125. /*
  126. * Hint didn't match. Look up from the radix tree. Note that the
  127. * hint can only be updated under queue_lock as otherwise @blkg
  128. * could have already been removed from blkg_tree. The caller is
  129. * responsible for grabbing queue_lock if @update_hint.
  130. */
  131. blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
  132. if (blkg && blkg->q == q) {
  133. if (update_hint) {
  134. lockdep_assert_held(q->queue_lock);
  135. rcu_assign_pointer(blkcg->blkg_hint, blkg);
  136. }
  137. return blkg;
  138. }
  139. return NULL;
  140. }
  141. EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
  142. /*
  143. * If @new_blkg is %NULL, this function tries to allocate a new one as
  144. * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
  145. */
  146. static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
  147. struct request_queue *q,
  148. struct blkcg_gq *new_blkg)
  149. {
  150. struct blkcg_gq *blkg;
  151. struct bdi_writeback_congested *wb_congested;
  152. int i, ret;
  153. WARN_ON_ONCE(!rcu_read_lock_held());
  154. lockdep_assert_held(q->queue_lock);
  155. /* blkg holds a reference to blkcg */
  156. if (!css_tryget_online(&blkcg->css)) {
  157. ret = -ENODEV;
  158. goto err_free_blkg;
  159. }
  160. wb_congested = wb_congested_get_create(q->backing_dev_info,
  161. blkcg->css.id,
  162. GFP_NOWAIT | __GFP_NOWARN);
  163. if (!wb_congested) {
  164. ret = -ENOMEM;
  165. goto err_put_css;
  166. }
  167. /* allocate */
  168. if (!new_blkg) {
  169. new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
  170. if (unlikely(!new_blkg)) {
  171. ret = -ENOMEM;
  172. goto err_put_congested;
  173. }
  174. }
  175. blkg = new_blkg;
  176. blkg->wb_congested = wb_congested;
  177. /* link parent */
  178. if (blkcg_parent(blkcg)) {
  179. blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
  180. if (WARN_ON_ONCE(!blkg->parent)) {
  181. ret = -ENODEV;
  182. goto err_put_congested;
  183. }
  184. blkg_get(blkg->parent);
  185. }
  186. /* invoke per-policy init */
  187. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  188. struct blkcg_policy *pol = blkcg_policy[i];
  189. if (blkg->pd[i] && pol->pd_init_fn)
  190. pol->pd_init_fn(blkg->pd[i]);
  191. }
  192. /* insert */
  193. spin_lock(&blkcg->lock);
  194. ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
  195. if (likely(!ret)) {
  196. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  197. list_add(&blkg->q_node, &q->blkg_list);
  198. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  199. struct blkcg_policy *pol = blkcg_policy[i];
  200. if (blkg->pd[i] && pol->pd_online_fn)
  201. pol->pd_online_fn(blkg->pd[i]);
  202. }
  203. }
  204. blkg->online = true;
  205. spin_unlock(&blkcg->lock);
  206. if (!ret)
  207. return blkg;
  208. /* @blkg failed fully initialized, use the usual release path */
  209. blkg_put(blkg);
  210. return ERR_PTR(ret);
  211. err_put_congested:
  212. wb_congested_put(wb_congested);
  213. err_put_css:
  214. css_put(&blkcg->css);
  215. err_free_blkg:
  216. blkg_free(new_blkg);
  217. return ERR_PTR(ret);
  218. }
  219. /**
  220. * blkg_lookup_create - lookup blkg, try to create one if not there
  221. * @blkcg: blkcg of interest
  222. * @q: request_queue of interest
  223. *
  224. * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
  225. * create one. blkg creation is performed recursively from blkcg_root such
  226. * that all non-root blkg's have access to the parent blkg. This function
  227. * should be called under RCU read lock and @q->queue_lock.
  228. *
  229. * Returns pointer to the looked up or created blkg on success, ERR_PTR()
  230. * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
  231. * dead and bypassing, returns ERR_PTR(-EBUSY).
  232. */
  233. struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  234. struct request_queue *q)
  235. {
  236. struct blkcg_gq *blkg;
  237. WARN_ON_ONCE(!rcu_read_lock_held());
  238. lockdep_assert_held(q->queue_lock);
  239. /*
  240. * This could be the first entry point of blkcg implementation and
  241. * we shouldn't allow anything to go through for a bypassing queue.
  242. */
  243. if (unlikely(blk_queue_bypass(q)))
  244. return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
  245. blkg = __blkg_lookup(blkcg, q, true);
  246. if (blkg)
  247. return blkg;
  248. /*
  249. * Create blkgs walking down from blkcg_root to @blkcg, so that all
  250. * non-root blkgs have access to their parents.
  251. */
  252. while (true) {
  253. struct blkcg *pos = blkcg;
  254. struct blkcg *parent = blkcg_parent(blkcg);
  255. while (parent && !__blkg_lookup(parent, q, false)) {
  256. pos = parent;
  257. parent = blkcg_parent(parent);
  258. }
  259. blkg = blkg_create(pos, q, NULL);
  260. if (pos == blkcg || IS_ERR(blkg))
  261. return blkg;
  262. }
  263. }
  264. static void blkg_destroy(struct blkcg_gq *blkg)
  265. {
  266. struct blkcg *blkcg = blkg->blkcg;
  267. struct blkcg_gq *parent = blkg->parent;
  268. int i;
  269. lockdep_assert_held(blkg->q->queue_lock);
  270. lockdep_assert_held(&blkcg->lock);
  271. /* Something wrong if we are trying to remove same group twice */
  272. WARN_ON_ONCE(list_empty(&blkg->q_node));
  273. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  274. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  275. struct blkcg_policy *pol = blkcg_policy[i];
  276. if (blkg->pd[i] && pol->pd_offline_fn)
  277. pol->pd_offline_fn(blkg->pd[i]);
  278. }
  279. if (parent) {
  280. blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
  281. blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
  282. }
  283. blkg->online = false;
  284. radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
  285. list_del_init(&blkg->q_node);
  286. hlist_del_init_rcu(&blkg->blkcg_node);
  287. /*
  288. * Both setting lookup hint to and clearing it from @blkg are done
  289. * under queue_lock. If it's not pointing to @blkg now, it never
  290. * will. Hint assignment itself can race safely.
  291. */
  292. if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
  293. rcu_assign_pointer(blkcg->blkg_hint, NULL);
  294. /*
  295. * Put the reference taken at the time of creation so that when all
  296. * queues are gone, group can be destroyed.
  297. */
  298. blkg_put(blkg);
  299. }
  300. /**
  301. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  302. * @q: request_queue of interest
  303. *
  304. * Destroy all blkgs associated with @q.
  305. */
  306. static void blkg_destroy_all(struct request_queue *q)
  307. {
  308. struct blkcg_gq *blkg, *n;
  309. lockdep_assert_held(q->queue_lock);
  310. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  311. struct blkcg *blkcg = blkg->blkcg;
  312. spin_lock(&blkcg->lock);
  313. blkg_destroy(blkg);
  314. spin_unlock(&blkcg->lock);
  315. }
  316. q->root_blkg = NULL;
  317. q->root_rl.blkg = NULL;
  318. }
  319. /*
  320. * A group is RCU protected, but having an rcu lock does not mean that one
  321. * can access all the fields of blkg and assume these are valid. For
  322. * example, don't try to follow throtl_data and request queue links.
  323. *
  324. * Having a reference to blkg under an rcu allows accesses to only values
  325. * local to groups like group stats and group rate limits.
  326. */
  327. void __blkg_release_rcu(struct rcu_head *rcu_head)
  328. {
  329. struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
  330. /* release the blkcg and parent blkg refs this blkg has been holding */
  331. css_put(&blkg->blkcg->css);
  332. if (blkg->parent)
  333. blkg_put(blkg->parent);
  334. wb_congested_put(blkg->wb_congested);
  335. blkg_free(blkg);
  336. }
  337. EXPORT_SYMBOL_GPL(__blkg_release_rcu);
  338. /*
  339. * The next function used by blk_queue_for_each_rl(). It's a bit tricky
  340. * because the root blkg uses @q->root_rl instead of its own rl.
  341. */
  342. struct request_list *__blk_queue_next_rl(struct request_list *rl,
  343. struct request_queue *q)
  344. {
  345. struct list_head *ent;
  346. struct blkcg_gq *blkg;
  347. /*
  348. * Determine the current blkg list_head. The first entry is
  349. * root_rl which is off @q->blkg_list and mapped to the head.
  350. */
  351. if (rl == &q->root_rl) {
  352. ent = &q->blkg_list;
  353. /* There are no more block groups, hence no request lists */
  354. if (list_empty(ent))
  355. return NULL;
  356. } else {
  357. blkg = container_of(rl, struct blkcg_gq, rl);
  358. ent = &blkg->q_node;
  359. }
  360. /* walk to the next list_head, skip root blkcg */
  361. ent = ent->next;
  362. if (ent == &q->root_blkg->q_node)
  363. ent = ent->next;
  364. if (ent == &q->blkg_list)
  365. return NULL;
  366. blkg = container_of(ent, struct blkcg_gq, q_node);
  367. return &blkg->rl;
  368. }
  369. static int blkcg_reset_stats(struct cgroup_subsys_state *css,
  370. struct cftype *cftype, u64 val)
  371. {
  372. struct blkcg *blkcg = css_to_blkcg(css);
  373. struct blkcg_gq *blkg;
  374. int i;
  375. mutex_lock(&blkcg_pol_mutex);
  376. spin_lock_irq(&blkcg->lock);
  377. /*
  378. * Note that stat reset is racy - it doesn't synchronize against
  379. * stat updates. This is a debug feature which shouldn't exist
  380. * anyway. If you get hit by a race, retry.
  381. */
  382. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  383. blkg_rwstat_reset(&blkg->stat_bytes);
  384. blkg_rwstat_reset(&blkg->stat_ios);
  385. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  386. struct blkcg_policy *pol = blkcg_policy[i];
  387. if (blkg->pd[i] && pol->pd_reset_stats_fn)
  388. pol->pd_reset_stats_fn(blkg->pd[i]);
  389. }
  390. }
  391. spin_unlock_irq(&blkcg->lock);
  392. mutex_unlock(&blkcg_pol_mutex);
  393. return 0;
  394. }
  395. const char *blkg_dev_name(struct blkcg_gq *blkg)
  396. {
  397. /* some drivers (floppy) instantiate a queue w/o disk registered */
  398. if (blkg->q->backing_dev_info->dev)
  399. return dev_name(blkg->q->backing_dev_info->dev);
  400. return NULL;
  401. }
  402. EXPORT_SYMBOL_GPL(blkg_dev_name);
  403. /**
  404. * blkcg_print_blkgs - helper for printing per-blkg data
  405. * @sf: seq_file to print to
  406. * @blkcg: blkcg of interest
  407. * @prfill: fill function to print out a blkg
  408. * @pol: policy in question
  409. * @data: data to be passed to @prfill
  410. * @show_total: to print out sum of prfill return values or not
  411. *
  412. * This function invokes @prfill on each blkg of @blkcg if pd for the
  413. * policy specified by @pol exists. @prfill is invoked with @sf, the
  414. * policy data and @data and the matching queue lock held. If @show_total
  415. * is %true, the sum of the return values from @prfill is printed with
  416. * "Total" label at the end.
  417. *
  418. * This is to be used to construct print functions for
  419. * cftype->read_seq_string method.
  420. */
  421. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  422. u64 (*prfill)(struct seq_file *,
  423. struct blkg_policy_data *, int),
  424. const struct blkcg_policy *pol, int data,
  425. bool show_total)
  426. {
  427. struct blkcg_gq *blkg;
  428. u64 total = 0;
  429. rcu_read_lock();
  430. hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
  431. spin_lock_irq(blkg->q->queue_lock);
  432. if (blkcg_policy_enabled(blkg->q, pol))
  433. total += prfill(sf, blkg->pd[pol->plid], data);
  434. spin_unlock_irq(blkg->q->queue_lock);
  435. }
  436. rcu_read_unlock();
  437. if (show_total)
  438. seq_printf(sf, "Total %llu\n", (unsigned long long)total);
  439. }
  440. EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
  441. /**
  442. * __blkg_prfill_u64 - prfill helper for a single u64 value
  443. * @sf: seq_file to print to
  444. * @pd: policy private data of interest
  445. * @v: value to print
  446. *
  447. * Print @v to @sf for the device assocaited with @pd.
  448. */
  449. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
  450. {
  451. const char *dname = blkg_dev_name(pd->blkg);
  452. if (!dname)
  453. return 0;
  454. seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
  455. return v;
  456. }
  457. EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
  458. /**
  459. * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
  460. * @sf: seq_file to print to
  461. * @pd: policy private data of interest
  462. * @rwstat: rwstat to print
  463. *
  464. * Print @rwstat to @sf for the device assocaited with @pd.
  465. */
  466. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  467. const struct blkg_rwstat *rwstat)
  468. {
  469. static const char *rwstr[] = {
  470. [BLKG_RWSTAT_READ] = "Read",
  471. [BLKG_RWSTAT_WRITE] = "Write",
  472. [BLKG_RWSTAT_SYNC] = "Sync",
  473. [BLKG_RWSTAT_ASYNC] = "Async",
  474. [BLKG_RWSTAT_DISCARD] = "Discard",
  475. };
  476. const char *dname = blkg_dev_name(pd->blkg);
  477. u64 v;
  478. int i;
  479. if (!dname)
  480. return 0;
  481. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  482. seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
  483. (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
  484. v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
  485. atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
  486. atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
  487. seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
  488. return v;
  489. }
  490. EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
  491. /**
  492. * blkg_prfill_stat - prfill callback for blkg_stat
  493. * @sf: seq_file to print to
  494. * @pd: policy private data of interest
  495. * @off: offset to the blkg_stat in @pd
  496. *
  497. * prfill callback for printing a blkg_stat.
  498. */
  499. u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
  500. {
  501. return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
  502. }
  503. EXPORT_SYMBOL_GPL(blkg_prfill_stat);
  504. /**
  505. * blkg_prfill_rwstat - prfill callback for blkg_rwstat
  506. * @sf: seq_file to print to
  507. * @pd: policy private data of interest
  508. * @off: offset to the blkg_rwstat in @pd
  509. *
  510. * prfill callback for printing a blkg_rwstat.
  511. */
  512. u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  513. int off)
  514. {
  515. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
  516. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  517. }
  518. EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
  519. static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
  520. struct blkg_policy_data *pd, int off)
  521. {
  522. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
  523. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  524. }
  525. /**
  526. * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
  527. * @sf: seq_file to print to
  528. * @v: unused
  529. *
  530. * To be used as cftype->seq_show to print blkg->stat_bytes.
  531. * cftype->private must be set to the blkcg_policy.
  532. */
  533. int blkg_print_stat_bytes(struct seq_file *sf, void *v)
  534. {
  535. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  536. blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  537. offsetof(struct blkcg_gq, stat_bytes), true);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
  541. /**
  542. * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
  543. * @sf: seq_file to print to
  544. * @v: unused
  545. *
  546. * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
  547. * must be set to the blkcg_policy.
  548. */
  549. int blkg_print_stat_ios(struct seq_file *sf, void *v)
  550. {
  551. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  552. blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  553. offsetof(struct blkcg_gq, stat_ios), true);
  554. return 0;
  555. }
  556. EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
  557. static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
  558. struct blkg_policy_data *pd,
  559. int off)
  560. {
  561. struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
  562. NULL, off);
  563. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  564. }
  565. /**
  566. * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
  567. * @sf: seq_file to print to
  568. * @v: unused
  569. */
  570. int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
  571. {
  572. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  573. blkg_prfill_rwstat_field_recursive,
  574. (void *)seq_cft(sf)->private,
  575. offsetof(struct blkcg_gq, stat_bytes), true);
  576. return 0;
  577. }
  578. EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
  579. /**
  580. * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
  581. * @sf: seq_file to print to
  582. * @v: unused
  583. */
  584. int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
  585. {
  586. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  587. blkg_prfill_rwstat_field_recursive,
  588. (void *)seq_cft(sf)->private,
  589. offsetof(struct blkcg_gq, stat_ios), true);
  590. return 0;
  591. }
  592. EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
  593. /**
  594. * blkg_stat_recursive_sum - collect hierarchical blkg_stat
  595. * @blkg: blkg of interest
  596. * @pol: blkcg_policy which contains the blkg_stat
  597. * @off: offset to the blkg_stat in blkg_policy_data or @blkg
  598. *
  599. * Collect the blkg_stat specified by @blkg, @pol and @off and all its
  600. * online descendants and their aux counts. The caller must be holding the
  601. * queue lock for online tests.
  602. *
  603. * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
  604. * at @off bytes into @blkg's blkg_policy_data of the policy.
  605. */
  606. u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
  607. struct blkcg_policy *pol, int off)
  608. {
  609. struct blkcg_gq *pos_blkg;
  610. struct cgroup_subsys_state *pos_css;
  611. u64 sum = 0;
  612. lockdep_assert_held(blkg->q->queue_lock);
  613. rcu_read_lock();
  614. blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
  615. struct blkg_stat *stat;
  616. if (!pos_blkg->online)
  617. continue;
  618. if (pol)
  619. stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
  620. else
  621. stat = (void *)blkg + off;
  622. sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
  623. }
  624. rcu_read_unlock();
  625. return sum;
  626. }
  627. EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
  628. /**
  629. * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
  630. * @blkg: blkg of interest
  631. * @pol: blkcg_policy which contains the blkg_rwstat
  632. * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
  633. *
  634. * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
  635. * online descendants and their aux counts. The caller must be holding the
  636. * queue lock for online tests.
  637. *
  638. * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
  639. * is at @off bytes into @blkg's blkg_policy_data of the policy.
  640. */
  641. struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
  642. struct blkcg_policy *pol, int off)
  643. {
  644. struct blkcg_gq *pos_blkg;
  645. struct cgroup_subsys_state *pos_css;
  646. struct blkg_rwstat sum = { };
  647. int i;
  648. lockdep_assert_held(blkg->q->queue_lock);
  649. rcu_read_lock();
  650. blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
  651. struct blkg_rwstat *rwstat;
  652. if (!pos_blkg->online)
  653. continue;
  654. if (pol)
  655. rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
  656. else
  657. rwstat = (void *)pos_blkg + off;
  658. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  659. atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
  660. percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
  661. &sum.aux_cnt[i]);
  662. }
  663. rcu_read_unlock();
  664. return sum;
  665. }
  666. EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
  667. /* Performs queue bypass and policy enabled checks then looks up blkg. */
  668. static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
  669. const struct blkcg_policy *pol,
  670. struct request_queue *q)
  671. {
  672. WARN_ON_ONCE(!rcu_read_lock_held());
  673. lockdep_assert_held(q->queue_lock);
  674. if (!blkcg_policy_enabled(q, pol))
  675. return ERR_PTR(-EOPNOTSUPP);
  676. /*
  677. * This could be the first entry point of blkcg implementation and
  678. * we shouldn't allow anything to go through for a bypassing queue.
  679. */
  680. if (unlikely(blk_queue_bypass(q)))
  681. return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
  682. return __blkg_lookup(blkcg, q, true /* update_hint */);
  683. }
  684. /**
  685. * blkg_conf_prep - parse and prepare for per-blkg config update
  686. * @blkcg: target block cgroup
  687. * @pol: target policy
  688. * @input: input string
  689. * @ctx: blkg_conf_ctx to be filled
  690. *
  691. * Parse per-blkg config update from @input and initialize @ctx with the
  692. * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
  693. * part of @input following MAJ:MIN. This function returns with RCU read
  694. * lock and queue lock held and must be paired with blkg_conf_finish().
  695. */
  696. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  697. char *input, struct blkg_conf_ctx *ctx)
  698. __acquires(rcu) __acquires(disk->queue->queue_lock)
  699. {
  700. struct gendisk *disk;
  701. struct request_queue *q;
  702. struct blkcg_gq *blkg;
  703. unsigned int major, minor;
  704. int key_len, part, ret;
  705. char *body;
  706. if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
  707. return -EINVAL;
  708. body = input + key_len;
  709. if (!isspace(*body))
  710. return -EINVAL;
  711. body = skip_spaces(body);
  712. disk = get_gendisk(MKDEV(major, minor), &part);
  713. if (!disk)
  714. return -ENODEV;
  715. if (part) {
  716. ret = -ENODEV;
  717. goto fail;
  718. }
  719. q = disk->queue;
  720. rcu_read_lock();
  721. spin_lock_irq(q->queue_lock);
  722. blkg = blkg_lookup_check(blkcg, pol, q);
  723. if (IS_ERR(blkg)) {
  724. ret = PTR_ERR(blkg);
  725. goto fail_unlock;
  726. }
  727. if (blkg)
  728. goto success;
  729. /*
  730. * Create blkgs walking down from blkcg_root to @blkcg, so that all
  731. * non-root blkgs have access to their parents.
  732. */
  733. while (true) {
  734. struct blkcg *pos = blkcg;
  735. struct blkcg *parent;
  736. struct blkcg_gq *new_blkg;
  737. parent = blkcg_parent(blkcg);
  738. while (parent && !__blkg_lookup(parent, q, false)) {
  739. pos = parent;
  740. parent = blkcg_parent(parent);
  741. }
  742. /* Drop locks to do new blkg allocation with GFP_KERNEL. */
  743. spin_unlock_irq(q->queue_lock);
  744. rcu_read_unlock();
  745. new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
  746. if (unlikely(!new_blkg)) {
  747. ret = -ENOMEM;
  748. goto fail;
  749. }
  750. if (radix_tree_preload(GFP_KERNEL)) {
  751. blkg_free(new_blkg);
  752. ret = -ENOMEM;
  753. goto fail;
  754. }
  755. rcu_read_lock();
  756. spin_lock_irq(q->queue_lock);
  757. blkg = blkg_lookup_check(pos, pol, q);
  758. if (IS_ERR(blkg)) {
  759. ret = PTR_ERR(blkg);
  760. blkg_free(new_blkg);
  761. goto fail_preloaded;
  762. }
  763. if (blkg) {
  764. blkg_free(new_blkg);
  765. } else {
  766. blkg = blkg_create(pos, q, new_blkg);
  767. if (unlikely(IS_ERR(blkg))) {
  768. ret = PTR_ERR(blkg);
  769. goto fail_preloaded;
  770. }
  771. }
  772. radix_tree_preload_end();
  773. if (pos == blkcg)
  774. goto success;
  775. }
  776. success:
  777. ctx->disk = disk;
  778. ctx->blkg = blkg;
  779. ctx->body = body;
  780. return 0;
  781. fail_preloaded:
  782. radix_tree_preload_end();
  783. fail_unlock:
  784. spin_unlock_irq(q->queue_lock);
  785. rcu_read_unlock();
  786. fail:
  787. put_disk_and_module(disk);
  788. /*
  789. * If queue was bypassing, we should retry. Do so after a
  790. * short msleep(). It isn't strictly necessary but queue
  791. * can be bypassing for some time and it's always nice to
  792. * avoid busy looping.
  793. */
  794. if (ret == -EBUSY) {
  795. msleep(10);
  796. ret = restart_syscall();
  797. }
  798. return ret;
  799. }
  800. EXPORT_SYMBOL_GPL(blkg_conf_prep);
  801. /**
  802. * blkg_conf_finish - finish up per-blkg config update
  803. * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
  804. *
  805. * Finish up after per-blkg config update. This function must be paired
  806. * with blkg_conf_prep().
  807. */
  808. void blkg_conf_finish(struct blkg_conf_ctx *ctx)
  809. __releases(ctx->disk->queue->queue_lock) __releases(rcu)
  810. {
  811. spin_unlock_irq(ctx->disk->queue->queue_lock);
  812. rcu_read_unlock();
  813. put_disk_and_module(ctx->disk);
  814. }
  815. EXPORT_SYMBOL_GPL(blkg_conf_finish);
  816. static int blkcg_print_stat(struct seq_file *sf, void *v)
  817. {
  818. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  819. struct blkcg_gq *blkg;
  820. rcu_read_lock();
  821. hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
  822. const char *dname;
  823. char *buf;
  824. struct blkg_rwstat rwstat;
  825. u64 rbytes, wbytes, rios, wios, dbytes, dios;
  826. size_t size = seq_get_buf(sf, &buf), off = 0;
  827. int i;
  828. bool has_stats = false;
  829. spin_lock_irq(blkg->q->queue_lock);
  830. if (!blkg->online)
  831. goto skip;
  832. dname = blkg_dev_name(blkg);
  833. if (!dname)
  834. goto skip;
  835. /*
  836. * Hooray string manipulation, count is the size written NOT
  837. * INCLUDING THE \0, so size is now count+1 less than what we
  838. * had before, but we want to start writing the next bit from
  839. * the \0 so we only add count to buf.
  840. */
  841. off += scnprintf(buf+off, size-off, "%s ", dname);
  842. rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
  843. offsetof(struct blkcg_gq, stat_bytes));
  844. rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
  845. wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
  846. dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
  847. rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
  848. offsetof(struct blkcg_gq, stat_ios));
  849. rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
  850. wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
  851. dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
  852. if (rbytes || wbytes || rios || wios) {
  853. has_stats = true;
  854. off += scnprintf(buf+off, size-off,
  855. "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
  856. rbytes, wbytes, rios, wios,
  857. dbytes, dios);
  858. }
  859. if (!blkcg_debug_stats)
  860. goto next;
  861. if (atomic_read(&blkg->use_delay)) {
  862. has_stats = true;
  863. off += scnprintf(buf+off, size-off,
  864. " use_delay=%d delay_nsec=%llu",
  865. atomic_read(&blkg->use_delay),
  866. (unsigned long long)atomic64_read(&blkg->delay_nsec));
  867. }
  868. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  869. struct blkcg_policy *pol = blkcg_policy[i];
  870. size_t written;
  871. if (!blkg->pd[i] || !pol->pd_stat_fn)
  872. continue;
  873. written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
  874. if (written)
  875. has_stats = true;
  876. off += written;
  877. }
  878. next:
  879. if (has_stats) {
  880. if (off < size - 1) {
  881. off += scnprintf(buf+off, size-off, "\n");
  882. seq_commit(sf, off);
  883. } else {
  884. seq_commit(sf, -1);
  885. }
  886. }
  887. skip:
  888. spin_unlock_irq(blkg->q->queue_lock);
  889. }
  890. rcu_read_unlock();
  891. return 0;
  892. }
  893. static struct cftype blkcg_files[] = {
  894. {
  895. .name = "stat",
  896. .flags = CFTYPE_NOT_ON_ROOT,
  897. .seq_show = blkcg_print_stat,
  898. },
  899. { } /* terminate */
  900. };
  901. static struct cftype blkcg_legacy_files[] = {
  902. {
  903. .name = "reset_stats",
  904. .write_u64 = blkcg_reset_stats,
  905. },
  906. { } /* terminate */
  907. };
  908. /*
  909. * blkcg destruction is a three-stage process.
  910. *
  911. * 1. Destruction starts. The blkcg_css_offline() callback is invoked
  912. * which offlines writeback. Here we tie the next stage of blkg destruction
  913. * to the completion of writeback associated with the blkcg. This lets us
  914. * avoid punting potentially large amounts of outstanding writeback to root
  915. * while maintaining any ongoing policies. The next stage is triggered when
  916. * the nr_cgwbs count goes to zero.
  917. *
  918. * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
  919. * and handles the destruction of blkgs. Here the css reference held by
  920. * the blkg is put back eventually allowing blkcg_css_free() to be called.
  921. * This work may occur in cgwb_release_workfn() on the cgwb_release
  922. * workqueue. Any submitted ios that fail to get the blkg ref will be
  923. * punted to the root_blkg.
  924. *
  925. * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
  926. * This finally frees the blkcg.
  927. */
  928. /**
  929. * blkcg_css_offline - cgroup css_offline callback
  930. * @css: css of interest
  931. *
  932. * This function is called when @css is about to go away. Here the cgwbs are
  933. * offlined first and only once writeback associated with the blkcg has
  934. * finished do we start step 2 (see above).
  935. */
  936. static void blkcg_css_offline(struct cgroup_subsys_state *css)
  937. {
  938. struct blkcg *blkcg = css_to_blkcg(css);
  939. /* this prevents anyone from attaching or migrating to this blkcg */
  940. wb_blkcg_offline(blkcg);
  941. /* put the base cgwb reference allowing step 2 to be triggered */
  942. blkcg_cgwb_put(blkcg);
  943. }
  944. /**
  945. * blkcg_destroy_blkgs - responsible for shooting down blkgs
  946. * @blkcg: blkcg of interest
  947. *
  948. * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
  949. * is nested inside q lock, this function performs reverse double lock dancing.
  950. * Destroying the blkgs releases the reference held on the blkcg's css allowing
  951. * blkcg_css_free to eventually be called.
  952. *
  953. * This is the blkcg counterpart of ioc_release_fn().
  954. */
  955. void blkcg_destroy_blkgs(struct blkcg *blkcg)
  956. {
  957. spin_lock_irq(&blkcg->lock);
  958. while (!hlist_empty(&blkcg->blkg_list)) {
  959. struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
  960. struct blkcg_gq, blkcg_node);
  961. struct request_queue *q = blkg->q;
  962. if (spin_trylock(q->queue_lock)) {
  963. blkg_destroy(blkg);
  964. spin_unlock(q->queue_lock);
  965. } else {
  966. spin_unlock_irq(&blkcg->lock);
  967. cpu_relax();
  968. spin_lock_irq(&blkcg->lock);
  969. }
  970. }
  971. spin_unlock_irq(&blkcg->lock);
  972. }
  973. static void blkcg_css_free(struct cgroup_subsys_state *css)
  974. {
  975. struct blkcg *blkcg = css_to_blkcg(css);
  976. int i;
  977. mutex_lock(&blkcg_pol_mutex);
  978. list_del(&blkcg->all_blkcgs_node);
  979. for (i = 0; i < BLKCG_MAX_POLS; i++)
  980. if (blkcg->cpd[i])
  981. blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
  982. mutex_unlock(&blkcg_pol_mutex);
  983. kfree(blkcg);
  984. }
  985. static struct cgroup_subsys_state *
  986. blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
  987. {
  988. struct blkcg *blkcg;
  989. struct cgroup_subsys_state *ret;
  990. int i;
  991. mutex_lock(&blkcg_pol_mutex);
  992. if (!parent_css) {
  993. blkcg = &blkcg_root;
  994. } else {
  995. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  996. if (!blkcg) {
  997. ret = ERR_PTR(-ENOMEM);
  998. goto unlock;
  999. }
  1000. }
  1001. for (i = 0; i < BLKCG_MAX_POLS ; i++) {
  1002. struct blkcg_policy *pol = blkcg_policy[i];
  1003. struct blkcg_policy_data *cpd;
  1004. /*
  1005. * If the policy hasn't been attached yet, wait for it
  1006. * to be attached before doing anything else. Otherwise,
  1007. * check if the policy requires any specific per-cgroup
  1008. * data: if it does, allocate and initialize it.
  1009. */
  1010. if (!pol || !pol->cpd_alloc_fn)
  1011. continue;
  1012. cpd = pol->cpd_alloc_fn(GFP_KERNEL);
  1013. if (!cpd) {
  1014. ret = ERR_PTR(-ENOMEM);
  1015. goto free_pd_blkcg;
  1016. }
  1017. blkcg->cpd[i] = cpd;
  1018. cpd->blkcg = blkcg;
  1019. cpd->plid = i;
  1020. if (pol->cpd_init_fn)
  1021. pol->cpd_init_fn(cpd);
  1022. }
  1023. spin_lock_init(&blkcg->lock);
  1024. INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
  1025. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1026. #ifdef CONFIG_CGROUP_WRITEBACK
  1027. INIT_LIST_HEAD(&blkcg->cgwb_list);
  1028. refcount_set(&blkcg->cgwb_refcnt, 1);
  1029. #endif
  1030. list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
  1031. mutex_unlock(&blkcg_pol_mutex);
  1032. return &blkcg->css;
  1033. free_pd_blkcg:
  1034. for (i--; i >= 0; i--)
  1035. if (blkcg->cpd[i])
  1036. blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
  1037. if (blkcg != &blkcg_root)
  1038. kfree(blkcg);
  1039. unlock:
  1040. mutex_unlock(&blkcg_pol_mutex);
  1041. return ret;
  1042. }
  1043. /**
  1044. * blkcg_init_queue - initialize blkcg part of request queue
  1045. * @q: request_queue to initialize
  1046. *
  1047. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1048. * part of new request_queue @q.
  1049. *
  1050. * RETURNS:
  1051. * 0 on success, -errno on failure.
  1052. */
  1053. int blkcg_init_queue(struct request_queue *q)
  1054. {
  1055. struct blkcg_gq *new_blkg, *blkg;
  1056. bool preloaded;
  1057. int ret;
  1058. new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
  1059. if (!new_blkg)
  1060. return -ENOMEM;
  1061. preloaded = !radix_tree_preload(GFP_KERNEL);
  1062. /* Make sure the root blkg exists. */
  1063. rcu_read_lock();
  1064. spin_lock_irq(q->queue_lock);
  1065. blkg = blkg_create(&blkcg_root, q, new_blkg);
  1066. if (IS_ERR(blkg))
  1067. goto err_unlock;
  1068. q->root_blkg = blkg;
  1069. q->root_rl.blkg = blkg;
  1070. spin_unlock_irq(q->queue_lock);
  1071. rcu_read_unlock();
  1072. if (preloaded)
  1073. radix_tree_preload_end();
  1074. ret = blk_iolatency_init(q);
  1075. if (ret) {
  1076. spin_lock_irq(q->queue_lock);
  1077. blkg_destroy_all(q);
  1078. spin_unlock_irq(q->queue_lock);
  1079. return ret;
  1080. }
  1081. ret = blk_throtl_init(q);
  1082. if (ret) {
  1083. spin_lock_irq(q->queue_lock);
  1084. blkg_destroy_all(q);
  1085. spin_unlock_irq(q->queue_lock);
  1086. }
  1087. return ret;
  1088. err_unlock:
  1089. spin_unlock_irq(q->queue_lock);
  1090. rcu_read_unlock();
  1091. if (preloaded)
  1092. radix_tree_preload_end();
  1093. return PTR_ERR(blkg);
  1094. }
  1095. /**
  1096. * blkcg_drain_queue - drain blkcg part of request_queue
  1097. * @q: request_queue to drain
  1098. *
  1099. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1100. */
  1101. void blkcg_drain_queue(struct request_queue *q)
  1102. {
  1103. lockdep_assert_held(q->queue_lock);
  1104. /*
  1105. * @q could be exiting and already have destroyed all blkgs as
  1106. * indicated by NULL root_blkg. If so, don't confuse policies.
  1107. */
  1108. if (!q->root_blkg)
  1109. return;
  1110. blk_throtl_drain(q);
  1111. }
  1112. /**
  1113. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1114. * @q: request_queue being released
  1115. *
  1116. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1117. */
  1118. void blkcg_exit_queue(struct request_queue *q)
  1119. {
  1120. spin_lock_irq(q->queue_lock);
  1121. blkg_destroy_all(q);
  1122. spin_unlock_irq(q->queue_lock);
  1123. blk_throtl_exit(q);
  1124. }
  1125. /*
  1126. * We cannot support shared io contexts, as we have no mean to support
  1127. * two tasks with the same ioc in two different groups without major rework
  1128. * of the main cic data structures. For now we allow a task to change
  1129. * its cgroup only if it's the only owner of its ioc.
  1130. */
  1131. static int blkcg_can_attach(struct cgroup_taskset *tset)
  1132. {
  1133. struct task_struct *task;
  1134. struct cgroup_subsys_state *dst_css;
  1135. struct io_context *ioc;
  1136. int ret = 0;
  1137. /* task_lock() is needed to avoid races with exit_io_context() */
  1138. cgroup_taskset_for_each(task, dst_css, tset) {
  1139. task_lock(task);
  1140. ioc = task->io_context;
  1141. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1142. ret = -EINVAL;
  1143. task_unlock(task);
  1144. if (ret)
  1145. break;
  1146. }
  1147. return ret;
  1148. }
  1149. static void blkcg_bind(struct cgroup_subsys_state *root_css)
  1150. {
  1151. int i;
  1152. mutex_lock(&blkcg_pol_mutex);
  1153. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  1154. struct blkcg_policy *pol = blkcg_policy[i];
  1155. struct blkcg *blkcg;
  1156. if (!pol || !pol->cpd_bind_fn)
  1157. continue;
  1158. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
  1159. if (blkcg->cpd[pol->plid])
  1160. pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
  1161. }
  1162. mutex_unlock(&blkcg_pol_mutex);
  1163. }
  1164. static void blkcg_exit(struct task_struct *tsk)
  1165. {
  1166. if (tsk->throttle_queue)
  1167. blk_put_queue(tsk->throttle_queue);
  1168. tsk->throttle_queue = NULL;
  1169. }
  1170. struct cgroup_subsys io_cgrp_subsys = {
  1171. .css_alloc = blkcg_css_alloc,
  1172. .css_offline = blkcg_css_offline,
  1173. .css_free = blkcg_css_free,
  1174. .can_attach = blkcg_can_attach,
  1175. .bind = blkcg_bind,
  1176. .dfl_cftypes = blkcg_files,
  1177. .legacy_cftypes = blkcg_legacy_files,
  1178. .legacy_name = "blkio",
  1179. .exit = blkcg_exit,
  1180. #ifdef CONFIG_MEMCG
  1181. /*
  1182. * This ensures that, if available, memcg is automatically enabled
  1183. * together on the default hierarchy so that the owner cgroup can
  1184. * be retrieved from writeback pages.
  1185. */
  1186. .depends_on = 1 << memory_cgrp_id,
  1187. #endif
  1188. };
  1189. EXPORT_SYMBOL_GPL(io_cgrp_subsys);
  1190. /**
  1191. * blkcg_activate_policy - activate a blkcg policy on a request_queue
  1192. * @q: request_queue of interest
  1193. * @pol: blkcg policy to activate
  1194. *
  1195. * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
  1196. * bypass mode to populate its blkgs with policy_data for @pol.
  1197. *
  1198. * Activation happens with @q bypassed, so nobody would be accessing blkgs
  1199. * from IO path. Update of each blkg is protected by both queue and blkcg
  1200. * locks so that holding either lock and testing blkcg_policy_enabled() is
  1201. * always enough for dereferencing policy data.
  1202. *
  1203. * The caller is responsible for synchronizing [de]activations and policy
  1204. * [un]registerations. Returns 0 on success, -errno on failure.
  1205. */
  1206. int blkcg_activate_policy(struct request_queue *q,
  1207. const struct blkcg_policy *pol)
  1208. {
  1209. struct blkg_policy_data *pd_prealloc = NULL;
  1210. struct blkcg_gq *blkg;
  1211. int ret;
  1212. if (blkcg_policy_enabled(q, pol))
  1213. return 0;
  1214. if (q->mq_ops)
  1215. blk_mq_freeze_queue(q);
  1216. else
  1217. blk_queue_bypass_start(q);
  1218. pd_prealloc:
  1219. if (!pd_prealloc) {
  1220. pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
  1221. if (!pd_prealloc) {
  1222. ret = -ENOMEM;
  1223. goto out_bypass_end;
  1224. }
  1225. }
  1226. spin_lock_irq(q->queue_lock);
  1227. list_for_each_entry(blkg, &q->blkg_list, q_node) {
  1228. struct blkg_policy_data *pd;
  1229. if (blkg->pd[pol->plid])
  1230. continue;
  1231. pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
  1232. if (!pd)
  1233. swap(pd, pd_prealloc);
  1234. if (!pd) {
  1235. spin_unlock_irq(q->queue_lock);
  1236. goto pd_prealloc;
  1237. }
  1238. blkg->pd[pol->plid] = pd;
  1239. pd->blkg = blkg;
  1240. pd->plid = pol->plid;
  1241. if (pol->pd_init_fn)
  1242. pol->pd_init_fn(pd);
  1243. }
  1244. __set_bit(pol->plid, q->blkcg_pols);
  1245. ret = 0;
  1246. spin_unlock_irq(q->queue_lock);
  1247. out_bypass_end:
  1248. if (q->mq_ops)
  1249. blk_mq_unfreeze_queue(q);
  1250. else
  1251. blk_queue_bypass_end(q);
  1252. if (pd_prealloc)
  1253. pol->pd_free_fn(pd_prealloc);
  1254. return ret;
  1255. }
  1256. EXPORT_SYMBOL_GPL(blkcg_activate_policy);
  1257. /**
  1258. * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
  1259. * @q: request_queue of interest
  1260. * @pol: blkcg policy to deactivate
  1261. *
  1262. * Deactivate @pol on @q. Follows the same synchronization rules as
  1263. * blkcg_activate_policy().
  1264. */
  1265. void blkcg_deactivate_policy(struct request_queue *q,
  1266. const struct blkcg_policy *pol)
  1267. {
  1268. struct blkcg_gq *blkg;
  1269. if (!blkcg_policy_enabled(q, pol))
  1270. return;
  1271. if (q->mq_ops)
  1272. blk_mq_freeze_queue(q);
  1273. else
  1274. blk_queue_bypass_start(q);
  1275. spin_lock_irq(q->queue_lock);
  1276. __clear_bit(pol->plid, q->blkcg_pols);
  1277. list_for_each_entry(blkg, &q->blkg_list, q_node) {
  1278. if (blkg->pd[pol->plid]) {
  1279. if (pol->pd_offline_fn)
  1280. pol->pd_offline_fn(blkg->pd[pol->plid]);
  1281. pol->pd_free_fn(blkg->pd[pol->plid]);
  1282. blkg->pd[pol->plid] = NULL;
  1283. }
  1284. }
  1285. spin_unlock_irq(q->queue_lock);
  1286. if (q->mq_ops)
  1287. blk_mq_unfreeze_queue(q);
  1288. else
  1289. blk_queue_bypass_end(q);
  1290. }
  1291. EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  1292. /**
  1293. * blkcg_policy_register - register a blkcg policy
  1294. * @pol: blkcg policy to register
  1295. *
  1296. * Register @pol with blkcg core. Might sleep and @pol may be modified on
  1297. * successful registration. Returns 0 on success and -errno on failure.
  1298. */
  1299. int blkcg_policy_register(struct blkcg_policy *pol)
  1300. {
  1301. struct blkcg *blkcg;
  1302. int i, ret;
  1303. mutex_lock(&blkcg_pol_register_mutex);
  1304. mutex_lock(&blkcg_pol_mutex);
  1305. /* find an empty slot */
  1306. ret = -ENOSPC;
  1307. for (i = 0; i < BLKCG_MAX_POLS; i++)
  1308. if (!blkcg_policy[i])
  1309. break;
  1310. if (i >= BLKCG_MAX_POLS) {
  1311. pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
  1312. goto err_unlock;
  1313. }
  1314. /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
  1315. if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
  1316. (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
  1317. goto err_unlock;
  1318. /* register @pol */
  1319. pol->plid = i;
  1320. blkcg_policy[pol->plid] = pol;
  1321. /* allocate and install cpd's */
  1322. if (pol->cpd_alloc_fn) {
  1323. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1324. struct blkcg_policy_data *cpd;
  1325. cpd = pol->cpd_alloc_fn(GFP_KERNEL);
  1326. if (!cpd)
  1327. goto err_free_cpds;
  1328. blkcg->cpd[pol->plid] = cpd;
  1329. cpd->blkcg = blkcg;
  1330. cpd->plid = pol->plid;
  1331. pol->cpd_init_fn(cpd);
  1332. }
  1333. }
  1334. mutex_unlock(&blkcg_pol_mutex);
  1335. /* everything is in place, add intf files for the new policy */
  1336. if (pol->dfl_cftypes)
  1337. WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
  1338. pol->dfl_cftypes));
  1339. if (pol->legacy_cftypes)
  1340. WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
  1341. pol->legacy_cftypes));
  1342. mutex_unlock(&blkcg_pol_register_mutex);
  1343. return 0;
  1344. err_free_cpds:
  1345. if (pol->cpd_free_fn) {
  1346. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1347. if (blkcg->cpd[pol->plid]) {
  1348. pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  1349. blkcg->cpd[pol->plid] = NULL;
  1350. }
  1351. }
  1352. }
  1353. blkcg_policy[pol->plid] = NULL;
  1354. err_unlock:
  1355. mutex_unlock(&blkcg_pol_mutex);
  1356. mutex_unlock(&blkcg_pol_register_mutex);
  1357. return ret;
  1358. }
  1359. EXPORT_SYMBOL_GPL(blkcg_policy_register);
  1360. /**
  1361. * blkcg_policy_unregister - unregister a blkcg policy
  1362. * @pol: blkcg policy to unregister
  1363. *
  1364. * Undo blkcg_policy_register(@pol). Might sleep.
  1365. */
  1366. void blkcg_policy_unregister(struct blkcg_policy *pol)
  1367. {
  1368. struct blkcg *blkcg;
  1369. mutex_lock(&blkcg_pol_register_mutex);
  1370. if (WARN_ON(blkcg_policy[pol->plid] != pol))
  1371. goto out_unlock;
  1372. /* kill the intf files first */
  1373. if (pol->dfl_cftypes)
  1374. cgroup_rm_cftypes(pol->dfl_cftypes);
  1375. if (pol->legacy_cftypes)
  1376. cgroup_rm_cftypes(pol->legacy_cftypes);
  1377. /* remove cpds and unregister */
  1378. mutex_lock(&blkcg_pol_mutex);
  1379. if (pol->cpd_free_fn) {
  1380. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1381. if (blkcg->cpd[pol->plid]) {
  1382. pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  1383. blkcg->cpd[pol->plid] = NULL;
  1384. }
  1385. }
  1386. }
  1387. blkcg_policy[pol->plid] = NULL;
  1388. mutex_unlock(&blkcg_pol_mutex);
  1389. out_unlock:
  1390. mutex_unlock(&blkcg_pol_register_mutex);
  1391. }
  1392. EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
  1393. /*
  1394. * Scale the accumulated delay based on how long it has been since we updated
  1395. * the delay. We only call this when we are adding delay, in case it's been a
  1396. * while since we added delay, and when we are checking to see if we need to
  1397. * delay a task, to account for any delays that may have occurred.
  1398. */
  1399. static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
  1400. {
  1401. u64 old = atomic64_read(&blkg->delay_start);
  1402. /*
  1403. * We only want to scale down every second. The idea here is that we
  1404. * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
  1405. * time window. We only want to throttle tasks for recent delay that
  1406. * has occurred, in 1 second time windows since that's the maximum
  1407. * things can be throttled. We save the current delay window in
  1408. * blkg->last_delay so we know what amount is still left to be charged
  1409. * to the blkg from this point onward. blkg->last_use keeps track of
  1410. * the use_delay counter. The idea is if we're unthrottling the blkg we
  1411. * are ok with whatever is happening now, and we can take away more of
  1412. * the accumulated delay as we've already throttled enough that
  1413. * everybody is happy with their IO latencies.
  1414. */
  1415. if (time_before64(old + NSEC_PER_SEC, now) &&
  1416. atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
  1417. u64 cur = atomic64_read(&blkg->delay_nsec);
  1418. u64 sub = min_t(u64, blkg->last_delay, now - old);
  1419. int cur_use = atomic_read(&blkg->use_delay);
  1420. /*
  1421. * We've been unthrottled, subtract a larger chunk of our
  1422. * accumulated delay.
  1423. */
  1424. if (cur_use < blkg->last_use)
  1425. sub = max_t(u64, sub, blkg->last_delay >> 1);
  1426. /*
  1427. * This shouldn't happen, but handle it anyway. Our delay_nsec
  1428. * should only ever be growing except here where we subtract out
  1429. * min(last_delay, 1 second), but lord knows bugs happen and I'd
  1430. * rather not end up with negative numbers.
  1431. */
  1432. if (unlikely(cur < sub)) {
  1433. atomic64_set(&blkg->delay_nsec, 0);
  1434. blkg->last_delay = 0;
  1435. } else {
  1436. atomic64_sub(sub, &blkg->delay_nsec);
  1437. blkg->last_delay = cur - sub;
  1438. }
  1439. blkg->last_use = cur_use;
  1440. }
  1441. }
  1442. /*
  1443. * This is called when we want to actually walk up the hierarchy and check to
  1444. * see if we need to throttle, and then actually throttle if there is some
  1445. * accumulated delay. This should only be called upon return to user space so
  1446. * we're not holding some lock that would induce a priority inversion.
  1447. */
  1448. static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
  1449. {
  1450. u64 now = ktime_to_ns(ktime_get());
  1451. u64 exp;
  1452. u64 delay_nsec = 0;
  1453. int tok;
  1454. while (blkg->parent) {
  1455. if (atomic_read(&blkg->use_delay)) {
  1456. blkcg_scale_delay(blkg, now);
  1457. delay_nsec = max_t(u64, delay_nsec,
  1458. atomic64_read(&blkg->delay_nsec));
  1459. }
  1460. blkg = blkg->parent;
  1461. }
  1462. if (!delay_nsec)
  1463. return;
  1464. /*
  1465. * Let's not sleep for all eternity if we've amassed a huge delay.
  1466. * Swapping or metadata IO can accumulate 10's of seconds worth of
  1467. * delay, and we want userspace to be able to do _something_ so cap the
  1468. * delays at 1 second. If there's 10's of seconds worth of delay then
  1469. * the tasks will be delayed for 1 second for every syscall.
  1470. */
  1471. delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
  1472. /*
  1473. * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
  1474. * that hasn't landed upstream yet. Once that stuff is in place we need
  1475. * to do a psi_memstall_enter/leave if memdelay is set.
  1476. */
  1477. exp = ktime_add_ns(now, delay_nsec);
  1478. tok = io_schedule_prepare();
  1479. do {
  1480. __set_current_state(TASK_KILLABLE);
  1481. if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
  1482. break;
  1483. } while (!fatal_signal_pending(current));
  1484. io_schedule_finish(tok);
  1485. }
  1486. /**
  1487. * blkcg_maybe_throttle_current - throttle the current task if it has been marked
  1488. *
  1489. * This is only called if we've been marked with set_notify_resume(). Obviously
  1490. * we can be set_notify_resume() for reasons other than blkcg throttling, so we
  1491. * check to see if current->throttle_queue is set and if not this doesn't do
  1492. * anything. This should only ever be called by the resume code, it's not meant
  1493. * to be called by people willy-nilly as it will actually do the work to
  1494. * throttle the task if it is setup for throttling.
  1495. */
  1496. void blkcg_maybe_throttle_current(void)
  1497. {
  1498. struct request_queue *q = current->throttle_queue;
  1499. struct cgroup_subsys_state *css;
  1500. struct blkcg *blkcg;
  1501. struct blkcg_gq *blkg;
  1502. bool use_memdelay = current->use_memdelay;
  1503. if (!q)
  1504. return;
  1505. current->throttle_queue = NULL;
  1506. current->use_memdelay = false;
  1507. rcu_read_lock();
  1508. css = kthread_blkcg();
  1509. if (css)
  1510. blkcg = css_to_blkcg(css);
  1511. else
  1512. blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
  1513. if (!blkcg)
  1514. goto out;
  1515. blkg = blkg_lookup(blkcg, q);
  1516. if (!blkg)
  1517. goto out;
  1518. blkg = blkg_try_get(blkg);
  1519. if (!blkg)
  1520. goto out;
  1521. rcu_read_unlock();
  1522. blkcg_maybe_throttle_blkg(blkg, use_memdelay);
  1523. blkg_put(blkg);
  1524. blk_put_queue(q);
  1525. return;
  1526. out:
  1527. rcu_read_unlock();
  1528. blk_put_queue(q);
  1529. }
  1530. EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
  1531. /**
  1532. * blkcg_schedule_throttle - this task needs to check for throttling
  1533. * @q - the request queue IO was submitted on
  1534. * @use_memdelay - do we charge this to memory delay for PSI
  1535. *
  1536. * This is called by the IO controller when we know there's delay accumulated
  1537. * for the blkg for this task. We do not pass the blkg because there are places
  1538. * we call this that may not have that information, the swapping code for
  1539. * instance will only have a request_queue at that point. This set's the
  1540. * notify_resume for the task to check and see if it requires throttling before
  1541. * returning to user space.
  1542. *
  1543. * We will only schedule once per syscall. You can call this over and over
  1544. * again and it will only do the check once upon return to user space, and only
  1545. * throttle once. If the task needs to be throttled again it'll need to be
  1546. * re-set at the next time we see the task.
  1547. */
  1548. void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
  1549. {
  1550. if (unlikely(current->flags & PF_KTHREAD))
  1551. return;
  1552. if (!blk_get_queue(q))
  1553. return;
  1554. if (current->throttle_queue)
  1555. blk_put_queue(current->throttle_queue);
  1556. current->throttle_queue = q;
  1557. if (use_memdelay)
  1558. current->use_memdelay = use_memdelay;
  1559. set_notify_resume(current);
  1560. }
  1561. EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
  1562. /**
  1563. * blkcg_add_delay - add delay to this blkg
  1564. * @now - the current time in nanoseconds
  1565. * @delta - how many nanoseconds of delay to add
  1566. *
  1567. * Charge @delta to the blkg's current delay accumulation. This is used to
  1568. * throttle tasks if an IO controller thinks we need more throttling.
  1569. */
  1570. void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
  1571. {
  1572. blkcg_scale_delay(blkg, now);
  1573. atomic64_add(delta, &blkg->delay_nsec);
  1574. }
  1575. EXPORT_SYMBOL_GPL(blkcg_add_delay);
  1576. module_param(blkcg_debug_stats, bool, 0644);
  1577. MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");