elevator.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Block device elevator/IO-scheduler.
  4. *
  5. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6. *
  7. * 30042000 Jens Axboe <axboe@kernel.dk> :
  8. *
  9. * Split the elevator a bit so that it is possible to choose a different
  10. * one or even write a new "plug in". There are three pieces:
  11. * - elevator_fn, inserts a new request in the queue list
  12. * - elevator_merge_fn, decides whether a new buffer can be merged with
  13. * an existing request
  14. * - elevator_dequeue_fn, called when a request is taken off the active list
  15. *
  16. * 20082000 Dave Jones <davej@suse.de> :
  17. * Removed tests for max-bomb-segments, which was breaking elvtune
  18. * when run without -bN
  19. *
  20. * Jens:
  21. * - Rework again to work with bio instead of buffer_heads
  22. * - loose bi_dev comparisons, partition handling is right now
  23. * - completely modularize elevator setup and teardown
  24. *
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/fs.h>
  28. #include <linux/blkdev.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/blktrace_api.h>
  35. #include <linux/hash.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/pm_runtime.h>
  38. #include <trace/events/block.h>
  39. #include "elevator.h"
  40. #include "blk.h"
  41. #include "blk-mq-sched.h"
  42. #include "blk-pm.h"
  43. #include "blk-wbt.h"
  44. #include "blk-cgroup.h"
  45. static DEFINE_SPINLOCK(elv_list_lock);
  46. static LIST_HEAD(elv_list);
  47. /*
  48. * Merge hash stuff.
  49. */
  50. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  51. /*
  52. * Query io scheduler to see if the current process issuing bio may be
  53. * merged with rq.
  54. */
  55. static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
  56. {
  57. struct request_queue *q = rq->q;
  58. struct elevator_queue *e = q->elevator;
  59. if (e->type->ops.allow_merge)
  60. return e->type->ops.allow_merge(q, rq, bio);
  61. return true;
  62. }
  63. /*
  64. * can we safely merge with this request?
  65. */
  66. bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
  67. {
  68. if (!blk_rq_merge_ok(rq, bio))
  69. return false;
  70. if (!elv_iosched_allow_bio_merge(rq, bio))
  71. return false;
  72. return true;
  73. }
  74. EXPORT_SYMBOL(elv_bio_merge_ok);
  75. /**
  76. * elevator_match - Check whether @e's name or alias matches @name
  77. * @e: Scheduler to test
  78. * @name: Elevator name to test
  79. *
  80. * Return true if the elevator @e's name or alias matches @name.
  81. */
  82. static bool elevator_match(const struct elevator_type *e, const char *name)
  83. {
  84. return !strcmp(e->elevator_name, name) ||
  85. (e->elevator_alias && !strcmp(e->elevator_alias, name));
  86. }
  87. static struct elevator_type *__elevator_find(const char *name)
  88. {
  89. struct elevator_type *e;
  90. list_for_each_entry(e, &elv_list, list)
  91. if (elevator_match(e, name))
  92. return e;
  93. return NULL;
  94. }
  95. static struct elevator_type *elevator_find_get(const char *name)
  96. {
  97. struct elevator_type *e;
  98. spin_lock(&elv_list_lock);
  99. e = __elevator_find(name);
  100. if (e && (!elevator_tryget(e)))
  101. e = NULL;
  102. spin_unlock(&elv_list_lock);
  103. return e;
  104. }
  105. static const struct kobj_type elv_ktype;
  106. struct elevator_queue *elevator_alloc(struct request_queue *q,
  107. struct elevator_type *e)
  108. {
  109. struct elevator_queue *eq;
  110. eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
  111. if (unlikely(!eq))
  112. return NULL;
  113. __elevator_get(e);
  114. eq->type = e;
  115. kobject_init(&eq->kobj, &elv_ktype);
  116. mutex_init(&eq->sysfs_lock);
  117. hash_init(eq->hash);
  118. return eq;
  119. }
  120. EXPORT_SYMBOL(elevator_alloc);
  121. static void elevator_release(struct kobject *kobj)
  122. {
  123. struct elevator_queue *e;
  124. e = container_of(kobj, struct elevator_queue, kobj);
  125. elevator_put(e->type);
  126. kfree(e);
  127. }
  128. void elevator_exit(struct request_queue *q)
  129. {
  130. struct elevator_queue *e = q->elevator;
  131. ioc_clear_queue(q);
  132. blk_mq_sched_free_rqs(q);
  133. mutex_lock(&e->sysfs_lock);
  134. blk_mq_exit_sched(q, e);
  135. mutex_unlock(&e->sysfs_lock);
  136. kobject_put(&e->kobj);
  137. }
  138. static inline void __elv_rqhash_del(struct request *rq)
  139. {
  140. hash_del(&rq->hash);
  141. rq->rq_flags &= ~RQF_HASHED;
  142. }
  143. void elv_rqhash_del(struct request_queue *q, struct request *rq)
  144. {
  145. if (ELV_ON_HASH(rq))
  146. __elv_rqhash_del(rq);
  147. }
  148. EXPORT_SYMBOL_GPL(elv_rqhash_del);
  149. void elv_rqhash_add(struct request_queue *q, struct request *rq)
  150. {
  151. struct elevator_queue *e = q->elevator;
  152. BUG_ON(ELV_ON_HASH(rq));
  153. hash_add(e->hash, &rq->hash, rq_hash_key(rq));
  154. rq->rq_flags |= RQF_HASHED;
  155. }
  156. EXPORT_SYMBOL_GPL(elv_rqhash_add);
  157. void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  158. {
  159. __elv_rqhash_del(rq);
  160. elv_rqhash_add(q, rq);
  161. }
  162. struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  163. {
  164. struct elevator_queue *e = q->elevator;
  165. struct hlist_node *next;
  166. struct request *rq;
  167. hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
  168. BUG_ON(!ELV_ON_HASH(rq));
  169. if (unlikely(!rq_mergeable(rq))) {
  170. __elv_rqhash_del(rq);
  171. continue;
  172. }
  173. if (rq_hash_key(rq) == offset)
  174. return rq;
  175. }
  176. return NULL;
  177. }
  178. /*
  179. * RB-tree support functions for inserting/lookup/removal of requests
  180. * in a sorted RB tree.
  181. */
  182. void elv_rb_add(struct rb_root *root, struct request *rq)
  183. {
  184. struct rb_node **p = &root->rb_node;
  185. struct rb_node *parent = NULL;
  186. struct request *__rq;
  187. while (*p) {
  188. parent = *p;
  189. __rq = rb_entry(parent, struct request, rb_node);
  190. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  191. p = &(*p)->rb_left;
  192. else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  193. p = &(*p)->rb_right;
  194. }
  195. rb_link_node(&rq->rb_node, parent, p);
  196. rb_insert_color(&rq->rb_node, root);
  197. }
  198. EXPORT_SYMBOL(elv_rb_add);
  199. void elv_rb_del(struct rb_root *root, struct request *rq)
  200. {
  201. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  202. rb_erase(&rq->rb_node, root);
  203. RB_CLEAR_NODE(&rq->rb_node);
  204. }
  205. EXPORT_SYMBOL(elv_rb_del);
  206. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  207. {
  208. struct rb_node *n = root->rb_node;
  209. struct request *rq;
  210. while (n) {
  211. rq = rb_entry(n, struct request, rb_node);
  212. if (sector < blk_rq_pos(rq))
  213. n = n->rb_left;
  214. else if (sector > blk_rq_pos(rq))
  215. n = n->rb_right;
  216. else
  217. return rq;
  218. }
  219. return NULL;
  220. }
  221. EXPORT_SYMBOL(elv_rb_find);
  222. enum elv_merge elv_merge(struct request_queue *q, struct request **req,
  223. struct bio *bio)
  224. {
  225. struct elevator_queue *e = q->elevator;
  226. struct request *__rq;
  227. /*
  228. * Levels of merges:
  229. * nomerges: No merges at all attempted
  230. * noxmerges: Only simple one-hit cache try
  231. * merges: All merge tries attempted
  232. */
  233. if (blk_queue_nomerges(q) || !bio_mergeable(bio))
  234. return ELEVATOR_NO_MERGE;
  235. /*
  236. * First try one-hit cache.
  237. */
  238. if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
  239. enum elv_merge ret = blk_try_merge(q->last_merge, bio);
  240. if (ret != ELEVATOR_NO_MERGE) {
  241. *req = q->last_merge;
  242. return ret;
  243. }
  244. }
  245. if (blk_queue_noxmerges(q))
  246. return ELEVATOR_NO_MERGE;
  247. /*
  248. * See if our hash lookup can find a potential backmerge.
  249. */
  250. __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
  251. if (__rq && elv_bio_merge_ok(__rq, bio)) {
  252. *req = __rq;
  253. if (blk_discard_mergable(__rq))
  254. return ELEVATOR_DISCARD_MERGE;
  255. return ELEVATOR_BACK_MERGE;
  256. }
  257. if (e->type->ops.request_merge)
  258. return e->type->ops.request_merge(q, req, bio);
  259. return ELEVATOR_NO_MERGE;
  260. }
  261. /*
  262. * Attempt to do an insertion back merge. Only check for the case where
  263. * we can append 'rq' to an existing request, so we can throw 'rq' away
  264. * afterwards.
  265. *
  266. * Returns true if we merged, false otherwise. 'free' will contain all
  267. * requests that need to be freed.
  268. */
  269. bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
  270. struct list_head *free)
  271. {
  272. struct request *__rq;
  273. bool ret;
  274. if (blk_queue_nomerges(q))
  275. return false;
  276. /*
  277. * First try one-hit cache.
  278. */
  279. if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
  280. list_add(&rq->queuelist, free);
  281. return true;
  282. }
  283. if (blk_queue_noxmerges(q))
  284. return false;
  285. ret = false;
  286. /*
  287. * See if our hash lookup can find a potential backmerge.
  288. */
  289. while (1) {
  290. __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  291. if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
  292. break;
  293. list_add(&rq->queuelist, free);
  294. /* The merged request could be merged with others, try again */
  295. ret = true;
  296. rq = __rq;
  297. }
  298. return ret;
  299. }
  300. void elv_merged_request(struct request_queue *q, struct request *rq,
  301. enum elv_merge type)
  302. {
  303. struct elevator_queue *e = q->elevator;
  304. if (e->type->ops.request_merged)
  305. e->type->ops.request_merged(q, rq, type);
  306. if (type == ELEVATOR_BACK_MERGE)
  307. elv_rqhash_reposition(q, rq);
  308. q->last_merge = rq;
  309. }
  310. void elv_merge_requests(struct request_queue *q, struct request *rq,
  311. struct request *next)
  312. {
  313. struct elevator_queue *e = q->elevator;
  314. if (e->type->ops.requests_merged)
  315. e->type->ops.requests_merged(q, rq, next);
  316. elv_rqhash_reposition(q, rq);
  317. q->last_merge = rq;
  318. }
  319. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  320. {
  321. struct elevator_queue *e = q->elevator;
  322. if (e->type->ops.next_request)
  323. return e->type->ops.next_request(q, rq);
  324. return NULL;
  325. }
  326. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  327. {
  328. struct elevator_queue *e = q->elevator;
  329. if (e->type->ops.former_request)
  330. return e->type->ops.former_request(q, rq);
  331. return NULL;
  332. }
  333. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  334. static ssize_t
  335. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  336. {
  337. struct elv_fs_entry *entry = to_elv(attr);
  338. struct elevator_queue *e;
  339. ssize_t error;
  340. if (!entry->show)
  341. return -EIO;
  342. e = container_of(kobj, struct elevator_queue, kobj);
  343. mutex_lock(&e->sysfs_lock);
  344. error = e->type ? entry->show(e, page) : -ENOENT;
  345. mutex_unlock(&e->sysfs_lock);
  346. return error;
  347. }
  348. static ssize_t
  349. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  350. const char *page, size_t length)
  351. {
  352. struct elv_fs_entry *entry = to_elv(attr);
  353. struct elevator_queue *e;
  354. ssize_t error;
  355. if (!entry->store)
  356. return -EIO;
  357. e = container_of(kobj, struct elevator_queue, kobj);
  358. mutex_lock(&e->sysfs_lock);
  359. error = e->type ? entry->store(e, page, length) : -ENOENT;
  360. mutex_unlock(&e->sysfs_lock);
  361. return error;
  362. }
  363. static const struct sysfs_ops elv_sysfs_ops = {
  364. .show = elv_attr_show,
  365. .store = elv_attr_store,
  366. };
  367. static const struct kobj_type elv_ktype = {
  368. .sysfs_ops = &elv_sysfs_ops,
  369. .release = elevator_release,
  370. };
  371. int elv_register_queue(struct request_queue *q, bool uevent)
  372. {
  373. struct elevator_queue *e = q->elevator;
  374. int error;
  375. lockdep_assert_held(&q->sysfs_lock);
  376. error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
  377. if (!error) {
  378. struct elv_fs_entry *attr = e->type->elevator_attrs;
  379. if (attr) {
  380. while (attr->attr.name) {
  381. if (sysfs_create_file(&e->kobj, &attr->attr))
  382. break;
  383. attr++;
  384. }
  385. }
  386. if (uevent)
  387. kobject_uevent(&e->kobj, KOBJ_ADD);
  388. set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
  389. }
  390. return error;
  391. }
  392. void elv_unregister_queue(struct request_queue *q)
  393. {
  394. struct elevator_queue *e = q->elevator;
  395. lockdep_assert_held(&q->sysfs_lock);
  396. if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
  397. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  398. kobject_del(&e->kobj);
  399. }
  400. }
  401. int elv_register(struct elevator_type *e)
  402. {
  403. /* finish request is mandatory */
  404. if (WARN_ON_ONCE(!e->ops.finish_request))
  405. return -EINVAL;
  406. /* insert_requests and dispatch_request are mandatory */
  407. if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
  408. return -EINVAL;
  409. /* create icq_cache if requested */
  410. if (e->icq_size) {
  411. if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  412. WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  413. return -EINVAL;
  414. snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  415. "%s_io_cq", e->elevator_name);
  416. e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  417. e->icq_align, 0, NULL);
  418. if (!e->icq_cache)
  419. return -ENOMEM;
  420. }
  421. /* register, don't allow duplicate names */
  422. spin_lock(&elv_list_lock);
  423. if (__elevator_find(e->elevator_name)) {
  424. spin_unlock(&elv_list_lock);
  425. kmem_cache_destroy(e->icq_cache);
  426. return -EBUSY;
  427. }
  428. list_add_tail(&e->list, &elv_list);
  429. spin_unlock(&elv_list_lock);
  430. printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
  431. return 0;
  432. }
  433. EXPORT_SYMBOL_GPL(elv_register);
  434. void elv_unregister(struct elevator_type *e)
  435. {
  436. /* unregister */
  437. spin_lock(&elv_list_lock);
  438. list_del_init(&e->list);
  439. spin_unlock(&elv_list_lock);
  440. /*
  441. * Destroy icq_cache if it exists. icq's are RCU managed. Make
  442. * sure all RCU operations are complete before proceeding.
  443. */
  444. if (e->icq_cache) {
  445. rcu_barrier();
  446. kmem_cache_destroy(e->icq_cache);
  447. e->icq_cache = NULL;
  448. }
  449. }
  450. EXPORT_SYMBOL_GPL(elv_unregister);
  451. static inline bool elv_support_iosched(struct request_queue *q)
  452. {
  453. if (!queue_is_mq(q) ||
  454. (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
  455. return false;
  456. return true;
  457. }
  458. /*
  459. * For single queue devices, default to using mq-deadline. If we have multiple
  460. * queues or mq-deadline is not available, default to "none".
  461. */
  462. static struct elevator_type *elevator_get_default(struct request_queue *q)
  463. {
  464. if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
  465. return NULL;
  466. if (q->nr_hw_queues != 1 &&
  467. !blk_mq_is_shared_tags(q->tag_set->flags))
  468. return NULL;
  469. return elevator_find_get("mq-deadline");
  470. }
  471. /*
  472. * Use the default elevator settings. If the chosen elevator initialization
  473. * fails, fall back to the "none" elevator (no elevator).
  474. */
  475. void elevator_init_mq(struct request_queue *q)
  476. {
  477. struct elevator_type *e;
  478. int err;
  479. if (!elv_support_iosched(q))
  480. return;
  481. WARN_ON_ONCE(blk_queue_registered(q));
  482. if (unlikely(q->elevator))
  483. return;
  484. e = elevator_get_default(q);
  485. if (!e)
  486. return;
  487. /*
  488. * We are called before adding disk, when there isn't any FS I/O,
  489. * so freezing queue plus canceling dispatch work is enough to
  490. * drain any dispatch activities originated from passthrough
  491. * requests, then no need to quiesce queue which may add long boot
  492. * latency, especially when lots of disks are involved.
  493. *
  494. * Disk isn't added yet, so verifying queue lock only manually.
  495. */
  496. blk_freeze_queue_start_non_owner(q);
  497. blk_freeze_acquire_lock(q, true, false);
  498. blk_mq_freeze_queue_wait(q);
  499. blk_mq_cancel_work_sync(q);
  500. err = blk_mq_init_sched(q, e);
  501. blk_unfreeze_release_lock(q, true, false);
  502. blk_mq_unfreeze_queue_non_owner(q);
  503. if (err) {
  504. pr_warn("\"%s\" elevator initialization failed, "
  505. "falling back to \"none\"\n", e->elevator_name);
  506. }
  507. elevator_put(e);
  508. }
  509. /*
  510. * Switch to new_e io scheduler.
  511. *
  512. * If switching fails, we are most likely running out of memory and not able
  513. * to restore the old io scheduler, so leaving the io scheduler being none.
  514. */
  515. int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  516. {
  517. int ret;
  518. lockdep_assert_held(&q->sysfs_lock);
  519. blk_mq_freeze_queue(q);
  520. blk_mq_quiesce_queue(q);
  521. if (q->elevator) {
  522. elv_unregister_queue(q);
  523. elevator_exit(q);
  524. }
  525. ret = blk_mq_init_sched(q, new_e);
  526. if (ret)
  527. goto out_unfreeze;
  528. ret = elv_register_queue(q, true);
  529. if (ret) {
  530. elevator_exit(q);
  531. goto out_unfreeze;
  532. }
  533. blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  534. out_unfreeze:
  535. blk_mq_unquiesce_queue(q);
  536. blk_mq_unfreeze_queue(q);
  537. if (ret) {
  538. pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
  539. new_e->elevator_name);
  540. }
  541. return ret;
  542. }
  543. void elevator_disable(struct request_queue *q)
  544. {
  545. lockdep_assert_held(&q->sysfs_lock);
  546. blk_mq_freeze_queue(q);
  547. blk_mq_quiesce_queue(q);
  548. elv_unregister_queue(q);
  549. elevator_exit(q);
  550. blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
  551. q->elevator = NULL;
  552. q->nr_requests = q->tag_set->queue_depth;
  553. blk_add_trace_msg(q, "elv switch: none");
  554. blk_mq_unquiesce_queue(q);
  555. blk_mq_unfreeze_queue(q);
  556. }
  557. /*
  558. * Switch this queue to the given IO scheduler.
  559. */
  560. static int elevator_change(struct request_queue *q, const char *elevator_name)
  561. {
  562. struct elevator_type *e;
  563. int ret;
  564. /* Make sure queue is not in the middle of being removed */
  565. if (!blk_queue_registered(q))
  566. return -ENOENT;
  567. if (!strncmp(elevator_name, "none", 4)) {
  568. if (q->elevator)
  569. elevator_disable(q);
  570. return 0;
  571. }
  572. if (q->elevator && elevator_match(q->elevator->type, elevator_name))
  573. return 0;
  574. e = elevator_find_get(elevator_name);
  575. if (!e)
  576. return -EINVAL;
  577. ret = elevator_switch(q, e);
  578. elevator_put(e);
  579. return ret;
  580. }
  581. int elv_iosched_load_module(struct gendisk *disk, const char *buf,
  582. size_t count)
  583. {
  584. char elevator_name[ELV_NAME_MAX];
  585. struct elevator_type *found;
  586. const char *name;
  587. if (!elv_support_iosched(disk->queue))
  588. return -EOPNOTSUPP;
  589. strscpy(elevator_name, buf, sizeof(elevator_name));
  590. name = strstrip(elevator_name);
  591. spin_lock(&elv_list_lock);
  592. found = __elevator_find(name);
  593. spin_unlock(&elv_list_lock);
  594. if (!found)
  595. request_module("%s-iosched", name);
  596. return 0;
  597. }
  598. ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
  599. size_t count)
  600. {
  601. char elevator_name[ELV_NAME_MAX];
  602. int ret;
  603. if (!elv_support_iosched(disk->queue))
  604. return count;
  605. strscpy(elevator_name, buf, sizeof(elevator_name));
  606. ret = elevator_change(disk->queue, strstrip(elevator_name));
  607. if (!ret)
  608. return count;
  609. return ret;
  610. }
  611. ssize_t elv_iosched_show(struct gendisk *disk, char *name)
  612. {
  613. struct request_queue *q = disk->queue;
  614. struct elevator_type *cur = NULL, *e;
  615. int len = 0;
  616. if (!elv_support_iosched(q))
  617. return sprintf(name, "none\n");
  618. if (!q->elevator) {
  619. len += sprintf(name+len, "[none] ");
  620. } else {
  621. len += sprintf(name+len, "none ");
  622. cur = q->elevator->type;
  623. }
  624. spin_lock(&elv_list_lock);
  625. list_for_each_entry(e, &elv_list, list) {
  626. if (e == cur)
  627. len += sprintf(name+len, "[%s] ", e->elevator_name);
  628. else
  629. len += sprintf(name+len, "%s ", e->elevator_name);
  630. }
  631. spin_unlock(&elv_list_lock);
  632. len += sprintf(name+len, "\n");
  633. return len;
  634. }
  635. struct request *elv_rb_former_request(struct request_queue *q,
  636. struct request *rq)
  637. {
  638. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  639. if (rbprev)
  640. return rb_entry_rq(rbprev);
  641. return NULL;
  642. }
  643. EXPORT_SYMBOL(elv_rb_former_request);
  644. struct request *elv_rb_latter_request(struct request_queue *q,
  645. struct request *rq)
  646. {
  647. struct rb_node *rbnext = rb_next(&rq->rb_node);
  648. if (rbnext)
  649. return rb_entry_rq(rbnext);
  650. return NULL;
  651. }
  652. EXPORT_SYMBOL(elv_rb_latter_request);
  653. static int __init elevator_setup(char *str)
  654. {
  655. pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
  656. "Please use sysfs to set IO scheduler for individual devices.\n");
  657. return 1;
  658. }
  659. __setup("elevator=", elevator_setup);