bfq-cgroup.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * cgroups support for the BFQ I/O scheduler.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/slab.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/cgroup.h>
  9. #include <linux/ktime.h>
  10. #include <linux/rbtree.h>
  11. #include <linux/ioprio.h>
  12. #include <linux/sbitmap.h>
  13. #include <linux/delay.h>
  14. #include "elevator.h"
  15. #include "bfq-iosched.h"
  16. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  17. static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
  18. {
  19. int ret;
  20. ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
  21. if (ret)
  22. return ret;
  23. atomic64_set(&stat->aux_cnt, 0);
  24. return 0;
  25. }
  26. static void bfq_stat_exit(struct bfq_stat *stat)
  27. {
  28. percpu_counter_destroy(&stat->cpu_cnt);
  29. }
  30. /**
  31. * bfq_stat_add - add a value to a bfq_stat
  32. * @stat: target bfq_stat
  33. * @val: value to add
  34. *
  35. * Add @val to @stat. The caller must ensure that IRQ on the same CPU
  36. * don't re-enter this function for the same counter.
  37. */
  38. static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
  39. {
  40. percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
  41. }
  42. /**
  43. * bfq_stat_read - read the current value of a bfq_stat
  44. * @stat: bfq_stat to read
  45. */
  46. static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
  47. {
  48. return percpu_counter_sum_positive(&stat->cpu_cnt);
  49. }
  50. /**
  51. * bfq_stat_reset - reset a bfq_stat
  52. * @stat: bfq_stat to reset
  53. */
  54. static inline void bfq_stat_reset(struct bfq_stat *stat)
  55. {
  56. percpu_counter_set(&stat->cpu_cnt, 0);
  57. atomic64_set(&stat->aux_cnt, 0);
  58. }
  59. /**
  60. * bfq_stat_add_aux - add a bfq_stat into another's aux count
  61. * @to: the destination bfq_stat
  62. * @from: the source
  63. *
  64. * Add @from's count including the aux one to @to's aux count.
  65. */
  66. static inline void bfq_stat_add_aux(struct bfq_stat *to,
  67. struct bfq_stat *from)
  68. {
  69. atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
  70. &to->aux_cnt);
  71. }
  72. /**
  73. * blkg_prfill_stat - prfill callback for bfq_stat
  74. * @sf: seq_file to print to
  75. * @pd: policy private data of interest
  76. * @off: offset to the bfq_stat in @pd
  77. *
  78. * prfill callback for printing a bfq_stat.
  79. */
  80. static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
  81. int off)
  82. {
  83. return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
  84. }
  85. /* bfqg stats flags */
  86. enum bfqg_stats_flags {
  87. BFQG_stats_waiting = 0,
  88. BFQG_stats_idling,
  89. BFQG_stats_empty,
  90. };
  91. #define BFQG_FLAG_FNS(name) \
  92. static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
  93. { \
  94. stats->flags |= (1 << BFQG_stats_##name); \
  95. } \
  96. static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
  97. { \
  98. stats->flags &= ~(1 << BFQG_stats_##name); \
  99. } \
  100. static int bfqg_stats_##name(struct bfqg_stats *stats) \
  101. { \
  102. return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
  103. } \
  104. BFQG_FLAG_FNS(waiting)
  105. BFQG_FLAG_FNS(idling)
  106. BFQG_FLAG_FNS(empty)
  107. #undef BFQG_FLAG_FNS
  108. /* This should be called with the scheduler lock held. */
  109. static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
  110. {
  111. u64 now;
  112. if (!bfqg_stats_waiting(stats))
  113. return;
  114. now = blk_time_get_ns();
  115. if (now > stats->start_group_wait_time)
  116. bfq_stat_add(&stats->group_wait_time,
  117. now - stats->start_group_wait_time);
  118. bfqg_stats_clear_waiting(stats);
  119. }
  120. /* This should be called with the scheduler lock held. */
  121. static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
  122. struct bfq_group *curr_bfqg)
  123. {
  124. struct bfqg_stats *stats = &bfqg->stats;
  125. if (bfqg_stats_waiting(stats))
  126. return;
  127. if (bfqg == curr_bfqg)
  128. return;
  129. stats->start_group_wait_time = blk_time_get_ns();
  130. bfqg_stats_mark_waiting(stats);
  131. }
  132. /* This should be called with the scheduler lock held. */
  133. static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
  134. {
  135. u64 now;
  136. if (!bfqg_stats_empty(stats))
  137. return;
  138. now = blk_time_get_ns();
  139. if (now > stats->start_empty_time)
  140. bfq_stat_add(&stats->empty_time,
  141. now - stats->start_empty_time);
  142. bfqg_stats_clear_empty(stats);
  143. }
  144. void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
  145. {
  146. bfq_stat_add(&bfqg->stats.dequeue, 1);
  147. }
  148. void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
  149. {
  150. struct bfqg_stats *stats = &bfqg->stats;
  151. if (blkg_rwstat_total(&stats->queued))
  152. return;
  153. /*
  154. * group is already marked empty. This can happen if bfqq got new
  155. * request in parent group and moved to this group while being added
  156. * to service tree. Just ignore the event and move on.
  157. */
  158. if (bfqg_stats_empty(stats))
  159. return;
  160. stats->start_empty_time = blk_time_get_ns();
  161. bfqg_stats_mark_empty(stats);
  162. }
  163. void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
  164. {
  165. struct bfqg_stats *stats = &bfqg->stats;
  166. if (bfqg_stats_idling(stats)) {
  167. u64 now = blk_time_get_ns();
  168. if (now > stats->start_idle_time)
  169. bfq_stat_add(&stats->idle_time,
  170. now - stats->start_idle_time);
  171. bfqg_stats_clear_idling(stats);
  172. }
  173. }
  174. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
  175. {
  176. struct bfqg_stats *stats = &bfqg->stats;
  177. stats->start_idle_time = blk_time_get_ns();
  178. bfqg_stats_mark_idling(stats);
  179. }
  180. void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
  181. {
  182. struct bfqg_stats *stats = &bfqg->stats;
  183. bfq_stat_add(&stats->avg_queue_size_sum,
  184. blkg_rwstat_total(&stats->queued));
  185. bfq_stat_add(&stats->avg_queue_size_samples, 1);
  186. bfqg_stats_update_group_wait_time(stats);
  187. }
  188. void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
  189. blk_opf_t opf)
  190. {
  191. blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
  192. bfqg_stats_end_empty_time(&bfqg->stats);
  193. if (!(bfqq == bfqg->bfqd->in_service_queue))
  194. bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
  195. }
  196. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
  197. {
  198. blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
  199. }
  200. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
  201. {
  202. blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
  203. }
  204. void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
  205. u64 io_start_time_ns, blk_opf_t opf)
  206. {
  207. struct bfqg_stats *stats = &bfqg->stats;
  208. u64 now = blk_time_get_ns();
  209. if (now > io_start_time_ns)
  210. blkg_rwstat_add(&stats->service_time, opf,
  211. now - io_start_time_ns);
  212. if (io_start_time_ns > start_time_ns)
  213. blkg_rwstat_add(&stats->wait_time, opf,
  214. io_start_time_ns - start_time_ns);
  215. }
  216. #else /* CONFIG_BFQ_CGROUP_DEBUG */
  217. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
  218. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
  219. void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
  220. u64 io_start_time_ns, blk_opf_t opf) { }
  221. void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
  222. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
  223. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  224. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  225. /*
  226. * blk-cgroup policy-related handlers
  227. * The following functions help in converting between blk-cgroup
  228. * internal structures and BFQ-specific structures.
  229. */
  230. static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
  231. {
  232. return pd ? container_of(pd, struct bfq_group, pd) : NULL;
  233. }
  234. struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
  235. {
  236. return pd_to_blkg(&bfqg->pd);
  237. }
  238. static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
  239. {
  240. return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
  241. }
  242. /*
  243. * bfq_group handlers
  244. * The following functions help in navigating the bfq_group hierarchy
  245. * by allowing to find the parent of a bfq_group or the bfq_group
  246. * associated to a bfq_queue.
  247. */
  248. static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
  249. {
  250. struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
  251. return pblkg ? blkg_to_bfqg(pblkg) : NULL;
  252. }
  253. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  254. {
  255. struct bfq_entity *group_entity = bfqq->entity.parent;
  256. return group_entity ? container_of(group_entity, struct bfq_group,
  257. entity) :
  258. bfqq->bfqd->root_group;
  259. }
  260. /*
  261. * The following two functions handle get and put of a bfq_group by
  262. * wrapping the related blk-cgroup hooks.
  263. */
  264. static void bfqg_get(struct bfq_group *bfqg)
  265. {
  266. refcount_inc(&bfqg->ref);
  267. }
  268. static void bfqg_put(struct bfq_group *bfqg)
  269. {
  270. if (refcount_dec_and_test(&bfqg->ref))
  271. kfree(bfqg);
  272. }
  273. static void bfqg_and_blkg_get(struct bfq_group *bfqg)
  274. {
  275. /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
  276. bfqg_get(bfqg);
  277. blkg_get(bfqg_to_blkg(bfqg));
  278. }
  279. void bfqg_and_blkg_put(struct bfq_group *bfqg)
  280. {
  281. blkg_put(bfqg_to_blkg(bfqg));
  282. bfqg_put(bfqg);
  283. }
  284. void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
  285. {
  286. struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
  287. if (!bfqg)
  288. return;
  289. blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
  290. blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
  291. }
  292. /* @stats = 0 */
  293. static void bfqg_stats_reset(struct bfqg_stats *stats)
  294. {
  295. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  296. /* queued stats shouldn't be cleared */
  297. blkg_rwstat_reset(&stats->merged);
  298. blkg_rwstat_reset(&stats->service_time);
  299. blkg_rwstat_reset(&stats->wait_time);
  300. bfq_stat_reset(&stats->time);
  301. bfq_stat_reset(&stats->avg_queue_size_sum);
  302. bfq_stat_reset(&stats->avg_queue_size_samples);
  303. bfq_stat_reset(&stats->dequeue);
  304. bfq_stat_reset(&stats->group_wait_time);
  305. bfq_stat_reset(&stats->idle_time);
  306. bfq_stat_reset(&stats->empty_time);
  307. #endif
  308. }
  309. /* @to += @from */
  310. static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
  311. {
  312. if (!to || !from)
  313. return;
  314. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  315. /* queued stats shouldn't be cleared */
  316. blkg_rwstat_add_aux(&to->merged, &from->merged);
  317. blkg_rwstat_add_aux(&to->service_time, &from->service_time);
  318. blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
  319. bfq_stat_add_aux(&from->time, &from->time);
  320. bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
  321. bfq_stat_add_aux(&to->avg_queue_size_samples,
  322. &from->avg_queue_size_samples);
  323. bfq_stat_add_aux(&to->dequeue, &from->dequeue);
  324. bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
  325. bfq_stat_add_aux(&to->idle_time, &from->idle_time);
  326. bfq_stat_add_aux(&to->empty_time, &from->empty_time);
  327. #endif
  328. }
  329. /*
  330. * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
  331. * recursive stats can still account for the amount used by this bfqg after
  332. * it's gone.
  333. */
  334. static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
  335. {
  336. struct bfq_group *parent;
  337. if (!bfqg) /* root_group */
  338. return;
  339. parent = bfqg_parent(bfqg);
  340. lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
  341. if (unlikely(!parent))
  342. return;
  343. bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
  344. bfqg_stats_reset(&bfqg->stats);
  345. }
  346. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  347. {
  348. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  349. entity->weight = entity->new_weight;
  350. entity->orig_weight = entity->new_weight;
  351. if (bfqq) {
  352. bfqq->ioprio = bfqq->new_ioprio;
  353. bfqq->ioprio_class = bfqq->new_ioprio_class;
  354. /*
  355. * Make sure that bfqg and its associated blkg do not
  356. * disappear before entity.
  357. */
  358. bfqg_and_blkg_get(bfqg);
  359. }
  360. entity->parent = bfqg->my_entity; /* NULL for root group */
  361. entity->sched_data = &bfqg->sched_data;
  362. }
  363. static void bfqg_stats_exit(struct bfqg_stats *stats)
  364. {
  365. blkg_rwstat_exit(&stats->bytes);
  366. blkg_rwstat_exit(&stats->ios);
  367. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  368. blkg_rwstat_exit(&stats->merged);
  369. blkg_rwstat_exit(&stats->service_time);
  370. blkg_rwstat_exit(&stats->wait_time);
  371. blkg_rwstat_exit(&stats->queued);
  372. bfq_stat_exit(&stats->time);
  373. bfq_stat_exit(&stats->avg_queue_size_sum);
  374. bfq_stat_exit(&stats->avg_queue_size_samples);
  375. bfq_stat_exit(&stats->dequeue);
  376. bfq_stat_exit(&stats->group_wait_time);
  377. bfq_stat_exit(&stats->idle_time);
  378. bfq_stat_exit(&stats->empty_time);
  379. #endif
  380. }
  381. static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
  382. {
  383. if (blkg_rwstat_init(&stats->bytes, gfp) ||
  384. blkg_rwstat_init(&stats->ios, gfp))
  385. goto error;
  386. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  387. if (blkg_rwstat_init(&stats->merged, gfp) ||
  388. blkg_rwstat_init(&stats->service_time, gfp) ||
  389. blkg_rwstat_init(&stats->wait_time, gfp) ||
  390. blkg_rwstat_init(&stats->queued, gfp) ||
  391. bfq_stat_init(&stats->time, gfp) ||
  392. bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
  393. bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
  394. bfq_stat_init(&stats->dequeue, gfp) ||
  395. bfq_stat_init(&stats->group_wait_time, gfp) ||
  396. bfq_stat_init(&stats->idle_time, gfp) ||
  397. bfq_stat_init(&stats->empty_time, gfp))
  398. goto error;
  399. #endif
  400. return 0;
  401. error:
  402. bfqg_stats_exit(stats);
  403. return -ENOMEM;
  404. }
  405. static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
  406. {
  407. return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
  408. }
  409. static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
  410. {
  411. return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
  412. }
  413. static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
  414. {
  415. struct bfq_group_data *bgd;
  416. bgd = kzalloc(sizeof(*bgd), gfp);
  417. if (!bgd)
  418. return NULL;
  419. bgd->weight = CGROUP_WEIGHT_DFL;
  420. return &bgd->pd;
  421. }
  422. static void bfq_cpd_free(struct blkcg_policy_data *cpd)
  423. {
  424. kfree(cpd_to_bfqgd(cpd));
  425. }
  426. static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
  427. struct blkcg *blkcg, gfp_t gfp)
  428. {
  429. struct bfq_group *bfqg;
  430. bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id);
  431. if (!bfqg)
  432. return NULL;
  433. if (bfqg_stats_init(&bfqg->stats, gfp)) {
  434. kfree(bfqg);
  435. return NULL;
  436. }
  437. /* see comments in bfq_bic_update_cgroup for why refcounting */
  438. refcount_set(&bfqg->ref, 1);
  439. return &bfqg->pd;
  440. }
  441. static void bfq_pd_init(struct blkg_policy_data *pd)
  442. {
  443. struct blkcg_gq *blkg = pd_to_blkg(pd);
  444. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  445. struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
  446. struct bfq_entity *entity = &bfqg->entity;
  447. struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
  448. entity->orig_weight = entity->weight = entity->new_weight = d->weight;
  449. entity->my_sched_data = &bfqg->sched_data;
  450. entity->last_bfqq_created = NULL;
  451. bfqg->my_entity = entity; /*
  452. * the root_group's will be set to NULL
  453. * in bfq_init_queue()
  454. */
  455. bfqg->bfqd = bfqd;
  456. bfqg->active_entities = 0;
  457. bfqg->num_queues_with_pending_reqs = 0;
  458. bfqg->rq_pos_tree = RB_ROOT;
  459. }
  460. static void bfq_pd_free(struct blkg_policy_data *pd)
  461. {
  462. struct bfq_group *bfqg = pd_to_bfqg(pd);
  463. bfqg_stats_exit(&bfqg->stats);
  464. bfqg_put(bfqg);
  465. }
  466. static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
  467. {
  468. struct bfq_group *bfqg = pd_to_bfqg(pd);
  469. bfqg_stats_reset(&bfqg->stats);
  470. }
  471. static void bfq_group_set_parent(struct bfq_group *bfqg,
  472. struct bfq_group *parent)
  473. {
  474. struct bfq_entity *entity;
  475. entity = &bfqg->entity;
  476. entity->parent = parent->my_entity;
  477. entity->sched_data = &parent->sched_data;
  478. }
  479. static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
  480. {
  481. struct bfq_group *parent;
  482. struct bfq_entity *entity;
  483. /*
  484. * Update chain of bfq_groups as we might be handling a leaf group
  485. * which, along with some of its relatives, has not been hooked yet
  486. * to the private hierarchy of BFQ.
  487. */
  488. entity = &bfqg->entity;
  489. for_each_entity(entity) {
  490. struct bfq_group *curr_bfqg = container_of(entity,
  491. struct bfq_group, entity);
  492. if (curr_bfqg != bfqd->root_group) {
  493. parent = bfqg_parent(curr_bfqg);
  494. if (!parent)
  495. parent = bfqd->root_group;
  496. bfq_group_set_parent(curr_bfqg, parent);
  497. }
  498. }
  499. }
  500. struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
  501. {
  502. struct blkcg_gq *blkg = bio->bi_blkg;
  503. struct bfq_group *bfqg;
  504. while (blkg) {
  505. if (!blkg->online) {
  506. blkg = blkg->parent;
  507. continue;
  508. }
  509. bfqg = blkg_to_bfqg(blkg);
  510. if (bfqg->pd.online) {
  511. bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
  512. return bfqg;
  513. }
  514. blkg = blkg->parent;
  515. }
  516. bio_associate_blkg_from_css(bio,
  517. &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
  518. return bfqd->root_group;
  519. }
  520. /**
  521. * bfq_bfqq_move - migrate @bfqq to @bfqg.
  522. * @bfqd: queue descriptor.
  523. * @bfqq: the queue to move.
  524. * @bfqg: the group to move to.
  525. *
  526. * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  527. * it on the new one. Avoid putting the entity on the old group idle tree.
  528. *
  529. * Must be called under the scheduler lock, to make sure that the blkg
  530. * owning @bfqg does not disappear (see comments in
  531. * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
  532. * objects).
  533. */
  534. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  535. struct bfq_group *bfqg)
  536. {
  537. struct bfq_entity *entity = &bfqq->entity;
  538. struct bfq_group *old_parent = bfqq_group(bfqq);
  539. bool has_pending_reqs = false;
  540. /*
  541. * No point to move bfqq to the same group, which can happen when
  542. * root group is offlined
  543. */
  544. if (old_parent == bfqg)
  545. return;
  546. /*
  547. * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
  548. * until elevator exit.
  549. */
  550. if (bfqq == &bfqd->oom_bfqq)
  551. return;
  552. /*
  553. * Get extra reference to prevent bfqq from being freed in
  554. * next possible expire or deactivate.
  555. */
  556. bfqq->ref++;
  557. if (entity->in_groups_with_pending_reqs) {
  558. has_pending_reqs = true;
  559. bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
  560. }
  561. /* If bfqq is empty, then bfq_bfqq_expire also invokes
  562. * bfq_del_bfqq_busy, thereby removing bfqq and its entity
  563. * from data structures related to current group. Otherwise we
  564. * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
  565. * we do below.
  566. */
  567. if (bfqq == bfqd->in_service_queue)
  568. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  569. false, BFQQE_PREEMPTED);
  570. if (bfq_bfqq_busy(bfqq))
  571. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  572. else if (entity->on_st_or_in_serv)
  573. bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
  574. bfqg_and_blkg_put(old_parent);
  575. bfq_reassign_last_bfqq(bfqq, NULL);
  576. entity->parent = bfqg->my_entity;
  577. entity->sched_data = &bfqg->sched_data;
  578. /* pin down bfqg and its associated blkg */
  579. bfqg_and_blkg_get(bfqg);
  580. if (has_pending_reqs)
  581. bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
  582. if (bfq_bfqq_busy(bfqq)) {
  583. if (unlikely(!bfqd->nonrot_with_queueing))
  584. bfq_pos_tree_add_move(bfqd, bfqq);
  585. bfq_activate_bfqq(bfqd, bfqq);
  586. }
  587. if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
  588. bfq_schedule_dispatch(bfqd);
  589. /* release extra ref taken above, bfqq may happen to be freed now */
  590. bfq_put_queue(bfqq);
  591. }
  592. static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
  593. struct bfq_queue *sync_bfqq,
  594. struct bfq_io_cq *bic,
  595. struct bfq_group *bfqg,
  596. unsigned int act_idx)
  597. {
  598. struct bfq_queue *bfqq;
  599. if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
  600. /* We are the only user of this bfqq, just move it */
  601. if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
  602. bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
  603. return;
  604. }
  605. /*
  606. * The queue was merged to a different queue. Check
  607. * that the merge chain still belongs to the same
  608. * cgroup.
  609. */
  610. for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
  611. if (bfqq->entity.sched_data != &bfqg->sched_data)
  612. break;
  613. if (bfqq) {
  614. /*
  615. * Some queue changed cgroup so the merge is not valid
  616. * anymore. We cannot easily just cancel the merge (by
  617. * clearing new_bfqq) as there may be other processes
  618. * using this queue and holding refs to all queues
  619. * below sync_bfqq->new_bfqq. Similarly if the merge
  620. * already happened, we need to detach from bfqq now
  621. * so that we cannot merge bio to a request from the
  622. * old cgroup.
  623. */
  624. bfq_put_cooperator(sync_bfqq);
  625. bic_set_bfqq(bic, NULL, true, act_idx);
  626. bfq_release_process_ref(bfqd, sync_bfqq);
  627. }
  628. }
  629. /**
  630. * __bfq_bic_change_cgroup - move @bic to @bfqg.
  631. * @bfqd: the queue descriptor.
  632. * @bic: the bic to move.
  633. * @bfqg: the group to move to.
  634. *
  635. * Move bic to blkcg, assuming that bfqd->lock is held; which makes
  636. * sure that the reference to cgroup is valid across the call (see
  637. * comments in bfq_bic_update_cgroup on this issue)
  638. */
  639. static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
  640. struct bfq_io_cq *bic,
  641. struct bfq_group *bfqg)
  642. {
  643. unsigned int act_idx;
  644. for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
  645. struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx);
  646. struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx);
  647. if (async_bfqq &&
  648. async_bfqq->entity.sched_data != &bfqg->sched_data) {
  649. bic_set_bfqq(bic, NULL, false, act_idx);
  650. bfq_release_process_ref(bfqd, async_bfqq);
  651. }
  652. if (sync_bfqq)
  653. bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
  654. }
  655. }
  656. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
  657. {
  658. struct bfq_data *bfqd = bic_to_bfqd(bic);
  659. struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
  660. uint64_t serial_nr;
  661. serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
  662. /*
  663. * Check whether blkcg has changed. The condition may trigger
  664. * spuriously on a newly created cic but there's no harm.
  665. */
  666. if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
  667. return;
  668. /*
  669. * New cgroup for this process. Make sure it is linked to bfq internal
  670. * cgroup hierarchy.
  671. */
  672. bfq_link_bfqg(bfqd, bfqg);
  673. __bfq_bic_change_cgroup(bfqd, bic, bfqg);
  674. bic->blkcg_serial_nr = serial_nr;
  675. }
  676. /**
  677. * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
  678. * @st: the service tree being flushed.
  679. */
  680. static void bfq_flush_idle_tree(struct bfq_service_tree *st)
  681. {
  682. struct bfq_entity *entity = st->first_idle;
  683. for (; entity ; entity = st->first_idle)
  684. __bfq_deactivate_entity(entity, false);
  685. }
  686. /**
  687. * bfq_reparent_leaf_entity - move leaf entity to the root_group.
  688. * @bfqd: the device data structure with the root group.
  689. * @entity: the entity to move, if entity is a leaf; or the parent entity
  690. * of an active leaf entity to move, if entity is not a leaf.
  691. * @ioprio_class: I/O priority class to reparent.
  692. */
  693. static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  694. struct bfq_entity *entity,
  695. int ioprio_class)
  696. {
  697. struct bfq_queue *bfqq;
  698. struct bfq_entity *child_entity = entity;
  699. while (child_entity->my_sched_data) { /* leaf not reached yet */
  700. struct bfq_sched_data *child_sd = child_entity->my_sched_data;
  701. struct bfq_service_tree *child_st = child_sd->service_tree +
  702. ioprio_class;
  703. struct rb_root *child_active = &child_st->active;
  704. child_entity = bfq_entity_of(rb_first(child_active));
  705. if (!child_entity)
  706. child_entity = child_sd->in_service_entity;
  707. }
  708. bfqq = bfq_entity_to_bfqq(child_entity);
  709. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  710. }
  711. /**
  712. * bfq_reparent_active_queues - move to the root group all active queues.
  713. * @bfqd: the device data structure with the root group.
  714. * @bfqg: the group to move from.
  715. * @st: the service tree to start the search from.
  716. * @ioprio_class: I/O priority class to reparent.
  717. */
  718. static void bfq_reparent_active_queues(struct bfq_data *bfqd,
  719. struct bfq_group *bfqg,
  720. struct bfq_service_tree *st,
  721. int ioprio_class)
  722. {
  723. struct rb_root *active = &st->active;
  724. struct bfq_entity *entity;
  725. while ((entity = bfq_entity_of(rb_first(active))))
  726. bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
  727. if (bfqg->sched_data.in_service_entity)
  728. bfq_reparent_leaf_entity(bfqd,
  729. bfqg->sched_data.in_service_entity,
  730. ioprio_class);
  731. }
  732. /**
  733. * bfq_pd_offline - deactivate the entity associated with @pd,
  734. * and reparent its children entities.
  735. * @pd: descriptor of the policy going offline.
  736. *
  737. * blkio already grabs the queue_lock for us, so no need to use
  738. * RCU-based magic
  739. */
  740. static void bfq_pd_offline(struct blkg_policy_data *pd)
  741. {
  742. struct bfq_service_tree *st;
  743. struct bfq_group *bfqg = pd_to_bfqg(pd);
  744. struct bfq_data *bfqd = bfqg->bfqd;
  745. struct bfq_entity *entity = bfqg->my_entity;
  746. unsigned long flags;
  747. int i;
  748. spin_lock_irqsave(&bfqd->lock, flags);
  749. if (!entity) /* root group */
  750. goto put_async_queues;
  751. /*
  752. * Empty all service_trees belonging to this group before
  753. * deactivating the group itself.
  754. */
  755. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
  756. st = bfqg->sched_data.service_tree + i;
  757. /*
  758. * It may happen that some queues are still active
  759. * (busy) upon group destruction (if the corresponding
  760. * processes have been forced to terminate). We move
  761. * all the leaf entities corresponding to these queues
  762. * to the root_group.
  763. * Also, it may happen that the group has an entity
  764. * in service, which is disconnected from the active
  765. * tree: it must be moved, too.
  766. * There is no need to put the sync queues, as the
  767. * scheduler has taken no reference.
  768. */
  769. bfq_reparent_active_queues(bfqd, bfqg, st, i);
  770. /*
  771. * The idle tree may still contain bfq_queues
  772. * belonging to exited task because they never
  773. * migrated to a different cgroup from the one being
  774. * destroyed now. In addition, even
  775. * bfq_reparent_active_queues() may happen to add some
  776. * entities to the idle tree. It happens if, in some
  777. * of the calls to bfq_bfqq_move() performed by
  778. * bfq_reparent_active_queues(), the queue to move is
  779. * empty and gets expired.
  780. */
  781. bfq_flush_idle_tree(st);
  782. }
  783. __bfq_deactivate_entity(entity, false);
  784. put_async_queues:
  785. bfq_put_async_queues(bfqd, bfqg);
  786. spin_unlock_irqrestore(&bfqd->lock, flags);
  787. /*
  788. * @blkg is going offline and will be ignored by
  789. * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
  790. * that they don't get lost. If IOs complete after this point, the
  791. * stats for them will be lost. Oh well...
  792. */
  793. bfqg_stats_xfer_dead(bfqg);
  794. }
  795. void bfq_end_wr_async(struct bfq_data *bfqd)
  796. {
  797. struct blkcg_gq *blkg;
  798. list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
  799. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  800. bfq_end_wr_async_queues(bfqd, bfqg);
  801. }
  802. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  803. }
  804. static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
  805. {
  806. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  807. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  808. unsigned int val = 0;
  809. if (bfqgd)
  810. val = bfqgd->weight;
  811. seq_printf(sf, "%u\n", val);
  812. return 0;
  813. }
  814. static u64 bfqg_prfill_weight_device(struct seq_file *sf,
  815. struct blkg_policy_data *pd, int off)
  816. {
  817. struct bfq_group *bfqg = pd_to_bfqg(pd);
  818. if (!bfqg->entity.dev_weight)
  819. return 0;
  820. return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
  821. }
  822. static int bfq_io_show_weight(struct seq_file *sf, void *v)
  823. {
  824. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  825. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  826. seq_printf(sf, "default %u\n", bfqgd->weight);
  827. blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
  828. &blkcg_policy_bfq, 0, false);
  829. return 0;
  830. }
  831. static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
  832. {
  833. weight = dev_weight ?: weight;
  834. bfqg->entity.dev_weight = dev_weight;
  835. /*
  836. * Setting the prio_changed flag of the entity
  837. * to 1 with new_weight == weight would re-set
  838. * the value of the weight to its ioprio mapping.
  839. * Set the flag only if necessary.
  840. */
  841. if ((unsigned short)weight != bfqg->entity.new_weight) {
  842. bfqg->entity.new_weight = (unsigned short)weight;
  843. /*
  844. * Make sure that the above new value has been
  845. * stored in bfqg->entity.new_weight before
  846. * setting the prio_changed flag. In fact,
  847. * this flag may be read asynchronously (in
  848. * critical sections protected by a different
  849. * lock than that held here), and finding this
  850. * flag set may cause the execution of the code
  851. * for updating parameters whose value may
  852. * depend also on bfqg->entity.new_weight (in
  853. * __bfq_entity_update_weight_prio).
  854. * This barrier makes sure that the new value
  855. * of bfqg->entity.new_weight is correctly
  856. * seen in that code.
  857. */
  858. smp_wmb();
  859. bfqg->entity.prio_changed = 1;
  860. }
  861. }
  862. static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
  863. struct cftype *cftype,
  864. u64 val)
  865. {
  866. struct blkcg *blkcg = css_to_blkcg(css);
  867. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  868. struct blkcg_gq *blkg;
  869. int ret = -ERANGE;
  870. if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
  871. return ret;
  872. ret = 0;
  873. spin_lock_irq(&blkcg->lock);
  874. bfqgd->weight = (unsigned short)val;
  875. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  876. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  877. if (bfqg)
  878. bfq_group_set_weight(bfqg, val, 0);
  879. }
  880. spin_unlock_irq(&blkcg->lock);
  881. return ret;
  882. }
  883. static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
  884. char *buf, size_t nbytes,
  885. loff_t off)
  886. {
  887. int ret;
  888. struct blkg_conf_ctx ctx;
  889. struct blkcg *blkcg = css_to_blkcg(of_css(of));
  890. struct bfq_group *bfqg;
  891. u64 v;
  892. blkg_conf_init(&ctx, buf);
  893. ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx);
  894. if (ret)
  895. goto out;
  896. if (sscanf(ctx.body, "%llu", &v) == 1) {
  897. /* require "default" on dfl */
  898. ret = -ERANGE;
  899. if (!v)
  900. goto out;
  901. } else if (!strcmp(strim(ctx.body), "default")) {
  902. v = 0;
  903. } else {
  904. ret = -EINVAL;
  905. goto out;
  906. }
  907. bfqg = blkg_to_bfqg(ctx.blkg);
  908. ret = -ERANGE;
  909. if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
  910. bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
  911. ret = 0;
  912. }
  913. out:
  914. blkg_conf_exit(&ctx);
  915. return ret ?: nbytes;
  916. }
  917. static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
  918. char *buf, size_t nbytes,
  919. loff_t off)
  920. {
  921. char *endp;
  922. int ret;
  923. u64 v;
  924. buf = strim(buf);
  925. /* "WEIGHT" or "default WEIGHT" sets the default weight */
  926. v = simple_strtoull(buf, &endp, 0);
  927. if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
  928. ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
  929. return ret ?: nbytes;
  930. }
  931. return bfq_io_set_device_weight(of, buf, nbytes, off);
  932. }
  933. static int bfqg_print_rwstat(struct seq_file *sf, void *v)
  934. {
  935. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
  936. &blkcg_policy_bfq, seq_cft(sf)->private, true);
  937. return 0;
  938. }
  939. static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
  940. struct blkg_policy_data *pd, int off)
  941. {
  942. struct blkg_rwstat_sample sum;
  943. blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
  944. return __blkg_prfill_rwstat(sf, pd, &sum);
  945. }
  946. static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
  947. {
  948. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  949. bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
  950. seq_cft(sf)->private, true);
  951. return 0;
  952. }
  953. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  954. static int bfqg_print_stat(struct seq_file *sf, void *v)
  955. {
  956. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
  957. &blkcg_policy_bfq, seq_cft(sf)->private, false);
  958. return 0;
  959. }
  960. static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
  961. struct blkg_policy_data *pd, int off)
  962. {
  963. struct blkcg_gq *blkg = pd_to_blkg(pd);
  964. struct blkcg_gq *pos_blkg;
  965. struct cgroup_subsys_state *pos_css;
  966. u64 sum = 0;
  967. lockdep_assert_held(&blkg->q->queue_lock);
  968. rcu_read_lock();
  969. blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
  970. struct bfq_stat *stat;
  971. if (!pos_blkg->online)
  972. continue;
  973. stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
  974. sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
  975. }
  976. rcu_read_unlock();
  977. return __blkg_prfill_u64(sf, pd, sum);
  978. }
  979. static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
  980. {
  981. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  982. bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
  983. seq_cft(sf)->private, false);
  984. return 0;
  985. }
  986. static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
  987. int off)
  988. {
  989. struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
  990. u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
  991. return __blkg_prfill_u64(sf, pd, sum >> 9);
  992. }
  993. static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
  994. {
  995. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  996. bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
  997. return 0;
  998. }
  999. static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
  1000. struct blkg_policy_data *pd, int off)
  1001. {
  1002. struct blkg_rwstat_sample tmp;
  1003. blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
  1004. offsetof(struct bfq_group, stats.bytes), &tmp);
  1005. return __blkg_prfill_u64(sf, pd,
  1006. (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
  1007. }
  1008. static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
  1009. {
  1010. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  1011. bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
  1012. false);
  1013. return 0;
  1014. }
  1015. static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
  1016. struct blkg_policy_data *pd, int off)
  1017. {
  1018. struct bfq_group *bfqg = pd_to_bfqg(pd);
  1019. u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
  1020. u64 v = 0;
  1021. if (samples) {
  1022. v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
  1023. v = div64_u64(v, samples);
  1024. }
  1025. __blkg_prfill_u64(sf, pd, v);
  1026. return 0;
  1027. }
  1028. /* print avg_queue_size */
  1029. static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
  1030. {
  1031. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  1032. bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
  1033. 0, false);
  1034. return 0;
  1035. }
  1036. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  1037. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  1038. {
  1039. int ret;
  1040. ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
  1041. if (ret)
  1042. return NULL;
  1043. return blkg_to_bfqg(bfqd->queue->root_blkg);
  1044. }
  1045. struct blkcg_policy blkcg_policy_bfq = {
  1046. .dfl_cftypes = bfq_blkg_files,
  1047. .legacy_cftypes = bfq_blkcg_legacy_files,
  1048. .cpd_alloc_fn = bfq_cpd_alloc,
  1049. .cpd_free_fn = bfq_cpd_free,
  1050. .pd_alloc_fn = bfq_pd_alloc,
  1051. .pd_init_fn = bfq_pd_init,
  1052. .pd_offline_fn = bfq_pd_offline,
  1053. .pd_free_fn = bfq_pd_free,
  1054. .pd_reset_stats_fn = bfq_pd_reset_stats,
  1055. };
  1056. struct cftype bfq_blkcg_legacy_files[] = {
  1057. {
  1058. .name = "bfq.weight",
  1059. .flags = CFTYPE_NOT_ON_ROOT,
  1060. .seq_show = bfq_io_show_weight_legacy,
  1061. .write_u64 = bfq_io_set_weight_legacy,
  1062. },
  1063. {
  1064. .name = "bfq.weight_device",
  1065. .flags = CFTYPE_NOT_ON_ROOT,
  1066. .seq_show = bfq_io_show_weight,
  1067. .write = bfq_io_set_weight,
  1068. },
  1069. /* statistics, covers only the tasks in the bfqg */
  1070. {
  1071. .name = "bfq.io_service_bytes",
  1072. .private = offsetof(struct bfq_group, stats.bytes),
  1073. .seq_show = bfqg_print_rwstat,
  1074. },
  1075. {
  1076. .name = "bfq.io_serviced",
  1077. .private = offsetof(struct bfq_group, stats.ios),
  1078. .seq_show = bfqg_print_rwstat,
  1079. },
  1080. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  1081. {
  1082. .name = "bfq.time",
  1083. .private = offsetof(struct bfq_group, stats.time),
  1084. .seq_show = bfqg_print_stat,
  1085. },
  1086. {
  1087. .name = "bfq.sectors",
  1088. .seq_show = bfqg_print_stat_sectors,
  1089. },
  1090. {
  1091. .name = "bfq.io_service_time",
  1092. .private = offsetof(struct bfq_group, stats.service_time),
  1093. .seq_show = bfqg_print_rwstat,
  1094. },
  1095. {
  1096. .name = "bfq.io_wait_time",
  1097. .private = offsetof(struct bfq_group, stats.wait_time),
  1098. .seq_show = bfqg_print_rwstat,
  1099. },
  1100. {
  1101. .name = "bfq.io_merged",
  1102. .private = offsetof(struct bfq_group, stats.merged),
  1103. .seq_show = bfqg_print_rwstat,
  1104. },
  1105. {
  1106. .name = "bfq.io_queued",
  1107. .private = offsetof(struct bfq_group, stats.queued),
  1108. .seq_show = bfqg_print_rwstat,
  1109. },
  1110. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  1111. /* the same statistics which cover the bfqg and its descendants */
  1112. {
  1113. .name = "bfq.io_service_bytes_recursive",
  1114. .private = offsetof(struct bfq_group, stats.bytes),
  1115. .seq_show = bfqg_print_rwstat_recursive,
  1116. },
  1117. {
  1118. .name = "bfq.io_serviced_recursive",
  1119. .private = offsetof(struct bfq_group, stats.ios),
  1120. .seq_show = bfqg_print_rwstat_recursive,
  1121. },
  1122. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  1123. {
  1124. .name = "bfq.time_recursive",
  1125. .private = offsetof(struct bfq_group, stats.time),
  1126. .seq_show = bfqg_print_stat_recursive,
  1127. },
  1128. {
  1129. .name = "bfq.sectors_recursive",
  1130. .seq_show = bfqg_print_stat_sectors_recursive,
  1131. },
  1132. {
  1133. .name = "bfq.io_service_time_recursive",
  1134. .private = offsetof(struct bfq_group, stats.service_time),
  1135. .seq_show = bfqg_print_rwstat_recursive,
  1136. },
  1137. {
  1138. .name = "bfq.io_wait_time_recursive",
  1139. .private = offsetof(struct bfq_group, stats.wait_time),
  1140. .seq_show = bfqg_print_rwstat_recursive,
  1141. },
  1142. {
  1143. .name = "bfq.io_merged_recursive",
  1144. .private = offsetof(struct bfq_group, stats.merged),
  1145. .seq_show = bfqg_print_rwstat_recursive,
  1146. },
  1147. {
  1148. .name = "bfq.io_queued_recursive",
  1149. .private = offsetof(struct bfq_group, stats.queued),
  1150. .seq_show = bfqg_print_rwstat_recursive,
  1151. },
  1152. {
  1153. .name = "bfq.avg_queue_size",
  1154. .seq_show = bfqg_print_avg_queue_size,
  1155. },
  1156. {
  1157. .name = "bfq.group_wait_time",
  1158. .private = offsetof(struct bfq_group, stats.group_wait_time),
  1159. .seq_show = bfqg_print_stat,
  1160. },
  1161. {
  1162. .name = "bfq.idle_time",
  1163. .private = offsetof(struct bfq_group, stats.idle_time),
  1164. .seq_show = bfqg_print_stat,
  1165. },
  1166. {
  1167. .name = "bfq.empty_time",
  1168. .private = offsetof(struct bfq_group, stats.empty_time),
  1169. .seq_show = bfqg_print_stat,
  1170. },
  1171. {
  1172. .name = "bfq.dequeue",
  1173. .private = offsetof(struct bfq_group, stats.dequeue),
  1174. .seq_show = bfqg_print_stat,
  1175. },
  1176. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  1177. { } /* terminate */
  1178. };
  1179. struct cftype bfq_blkg_files[] = {
  1180. {
  1181. .name = "bfq.weight",
  1182. .flags = CFTYPE_NOT_ON_ROOT,
  1183. .seq_show = bfq_io_show_weight,
  1184. .write = bfq_io_set_weight,
  1185. },
  1186. {} /* terminate */
  1187. };
  1188. #else /* CONFIG_BFQ_GROUP_IOSCHED */
  1189. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1190. struct bfq_group *bfqg) {}
  1191. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  1192. {
  1193. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  1194. entity->weight = entity->new_weight;
  1195. entity->orig_weight = entity->new_weight;
  1196. if (bfqq) {
  1197. bfqq->ioprio = bfqq->new_ioprio;
  1198. bfqq->ioprio_class = bfqq->new_ioprio_class;
  1199. }
  1200. entity->sched_data = &bfqg->sched_data;
  1201. }
  1202. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
  1203. void bfq_end_wr_async(struct bfq_data *bfqd)
  1204. {
  1205. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  1206. }
  1207. struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
  1208. {
  1209. return bfqd->root_group;
  1210. }
  1211. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  1212. {
  1213. return bfqq->bfqd->root_group;
  1214. }
  1215. void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
  1216. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  1217. {
  1218. struct bfq_group *bfqg;
  1219. int i;
  1220. bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
  1221. if (!bfqg)
  1222. return NULL;
  1223. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  1224. bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  1225. return bfqg;
  1226. }
  1227. #endif /* CONFIG_BFQ_GROUP_IOSCHED */