pagelist.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. /*
  2. * linux/fs/nfs/pagelist.c
  3. *
  4. * A set of helper functions for managing NFS read and write requests.
  5. * The main purpose of these routines is to provide support for the
  6. * coalescing of several requests into a single RPC call.
  7. *
  8. * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  9. *
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/sched.h>
  14. #include <linux/sunrpc/clnt.h>
  15. #include <linux/nfs.h>
  16. #include <linux/nfs3.h>
  17. #include <linux/nfs4.h>
  18. #include <linux/nfs_page.h>
  19. #include <linux/nfs_fs.h>
  20. #include <linux/nfs_mount.h>
  21. #include <linux/export.h>
  22. #include "internal.h"
  23. #include "pnfs.h"
  24. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  25. static struct kmem_cache *nfs_page_cachep;
  26. static const struct rpc_call_ops nfs_pgio_common_ops;
  27. struct nfs_pgio_mirror *
  28. nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  29. {
  30. return nfs_pgio_has_mirroring(desc) ?
  31. &desc->pg_mirrors[desc->pg_mirror_idx] :
  32. &desc->pg_mirrors[0];
  33. }
  34. EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  35. void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  36. struct nfs_pgio_header *hdr,
  37. void (*release)(struct nfs_pgio_header *hdr))
  38. {
  39. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  40. hdr->req = nfs_list_entry(mirror->pg_list.next);
  41. hdr->inode = desc->pg_inode;
  42. hdr->cred = hdr->req->wb_context->cred;
  43. hdr->io_start = req_offset(hdr->req);
  44. hdr->good_bytes = mirror->pg_count;
  45. hdr->io_completion = desc->pg_io_completion;
  46. hdr->dreq = desc->pg_dreq;
  47. hdr->release = release;
  48. hdr->completion_ops = desc->pg_completion_ops;
  49. if (hdr->completion_ops->init_hdr)
  50. hdr->completion_ops->init_hdr(hdr);
  51. hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  52. }
  53. EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  54. void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  55. {
  56. spin_lock(&hdr->lock);
  57. if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
  58. || pos < hdr->io_start + hdr->good_bytes) {
  59. clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  60. hdr->good_bytes = pos - hdr->io_start;
  61. hdr->error = error;
  62. }
  63. spin_unlock(&hdr->lock);
  64. }
  65. static inline struct nfs_page *
  66. nfs_page_alloc(void)
  67. {
  68. struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
  69. if (p)
  70. INIT_LIST_HEAD(&p->wb_list);
  71. return p;
  72. }
  73. static inline void
  74. nfs_page_free(struct nfs_page *p)
  75. {
  76. kmem_cache_free(nfs_page_cachep, p);
  77. }
  78. /**
  79. * nfs_iocounter_wait - wait for i/o to complete
  80. * @l_ctx: nfs_lock_context with io_counter to use
  81. *
  82. * returns -ERESTARTSYS if interrupted by a fatal signal.
  83. * Otherwise returns 0 once the io_count hits 0.
  84. */
  85. int
  86. nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
  87. {
  88. return wait_var_event_killable(&l_ctx->io_count,
  89. !atomic_read(&l_ctx->io_count));
  90. }
  91. /**
  92. * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
  93. * to complete
  94. * @task: the rpc_task that should wait
  95. * @l_ctx: nfs_lock_context with io_counter to check
  96. *
  97. * Returns true if there is outstanding I/O to wait on and the
  98. * task has been put to sleep.
  99. */
  100. bool
  101. nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
  102. {
  103. struct inode *inode = d_inode(l_ctx->open_context->dentry);
  104. bool ret = false;
  105. if (atomic_read(&l_ctx->io_count) > 0) {
  106. rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
  107. ret = true;
  108. }
  109. if (atomic_read(&l_ctx->io_count) == 0) {
  110. rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
  111. ret = false;
  112. }
  113. return ret;
  114. }
  115. EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
  116. /*
  117. * nfs_page_set_headlock - set the request PG_HEADLOCK
  118. * @req: request that is to be locked
  119. *
  120. * this lock must be held when modifying req->wb_head
  121. *
  122. * return 0 on success, < 0 on error
  123. */
  124. int
  125. nfs_page_set_headlock(struct nfs_page *req)
  126. {
  127. if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
  128. return 0;
  129. set_bit(PG_CONTENDED1, &req->wb_flags);
  130. smp_mb__after_atomic();
  131. return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
  132. TASK_UNINTERRUPTIBLE);
  133. }
  134. /*
  135. * nfs_page_clear_headlock - clear the request PG_HEADLOCK
  136. * @req: request that is to be locked
  137. */
  138. void
  139. nfs_page_clear_headlock(struct nfs_page *req)
  140. {
  141. smp_mb__before_atomic();
  142. clear_bit(PG_HEADLOCK, &req->wb_flags);
  143. smp_mb__after_atomic();
  144. if (!test_bit(PG_CONTENDED1, &req->wb_flags))
  145. return;
  146. wake_up_bit(&req->wb_flags, PG_HEADLOCK);
  147. }
  148. /*
  149. * nfs_page_group_lock - lock the head of the page group
  150. * @req: request in group that is to be locked
  151. *
  152. * this lock must be held when traversing or modifying the page
  153. * group list
  154. *
  155. * return 0 on success, < 0 on error
  156. */
  157. int
  158. nfs_page_group_lock(struct nfs_page *req)
  159. {
  160. int ret;
  161. ret = nfs_page_set_headlock(req);
  162. if (ret || req->wb_head == req)
  163. return ret;
  164. return nfs_page_set_headlock(req->wb_head);
  165. }
  166. /*
  167. * nfs_page_group_unlock - unlock the head of the page group
  168. * @req: request in group that is to be unlocked
  169. */
  170. void
  171. nfs_page_group_unlock(struct nfs_page *req)
  172. {
  173. if (req != req->wb_head)
  174. nfs_page_clear_headlock(req->wb_head);
  175. nfs_page_clear_headlock(req);
  176. }
  177. /*
  178. * nfs_page_group_sync_on_bit_locked
  179. *
  180. * must be called with page group lock held
  181. */
  182. static bool
  183. nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
  184. {
  185. struct nfs_page *head = req->wb_head;
  186. struct nfs_page *tmp;
  187. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
  188. WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
  189. tmp = req->wb_this_page;
  190. while (tmp != req) {
  191. if (!test_bit(bit, &tmp->wb_flags))
  192. return false;
  193. tmp = tmp->wb_this_page;
  194. }
  195. /* true! reset all bits */
  196. tmp = req;
  197. do {
  198. clear_bit(bit, &tmp->wb_flags);
  199. tmp = tmp->wb_this_page;
  200. } while (tmp != req);
  201. return true;
  202. }
  203. /*
  204. * nfs_page_group_sync_on_bit - set bit on current request, but only
  205. * return true if the bit is set for all requests in page group
  206. * @req - request in page group
  207. * @bit - PG_* bit that is used to sync page group
  208. */
  209. bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
  210. {
  211. bool ret;
  212. nfs_page_group_lock(req);
  213. ret = nfs_page_group_sync_on_bit_locked(req, bit);
  214. nfs_page_group_unlock(req);
  215. return ret;
  216. }
  217. /*
  218. * nfs_page_group_init - Initialize the page group linkage for @req
  219. * @req - a new nfs request
  220. * @prev - the previous request in page group, or NULL if @req is the first
  221. * or only request in the group (the head).
  222. */
  223. static inline void
  224. nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
  225. {
  226. struct inode *inode;
  227. WARN_ON_ONCE(prev == req);
  228. if (!prev) {
  229. /* a head request */
  230. req->wb_head = req;
  231. req->wb_this_page = req;
  232. } else {
  233. /* a subrequest */
  234. WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
  235. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
  236. req->wb_head = prev->wb_head;
  237. req->wb_this_page = prev->wb_this_page;
  238. prev->wb_this_page = req;
  239. /* All subrequests take a ref on the head request until
  240. * nfs_page_group_destroy is called */
  241. kref_get(&req->wb_head->wb_kref);
  242. /* grab extra ref and bump the request count if head request
  243. * has extra ref from the write/commit path to handle handoff
  244. * between write and commit lists. */
  245. if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
  246. inode = page_file_mapping(req->wb_page)->host;
  247. set_bit(PG_INODE_REF, &req->wb_flags);
  248. kref_get(&req->wb_kref);
  249. atomic_long_inc(&NFS_I(inode)->nrequests);
  250. }
  251. }
  252. }
  253. /*
  254. * nfs_page_group_destroy - sync the destruction of page groups
  255. * @req - request that no longer needs the page group
  256. *
  257. * releases the page group reference from each member once all
  258. * members have called this function.
  259. */
  260. static void
  261. nfs_page_group_destroy(struct kref *kref)
  262. {
  263. struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
  264. struct nfs_page *head = req->wb_head;
  265. struct nfs_page *tmp, *next;
  266. if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
  267. goto out;
  268. tmp = req;
  269. do {
  270. next = tmp->wb_this_page;
  271. /* unlink and free */
  272. tmp->wb_this_page = tmp;
  273. tmp->wb_head = tmp;
  274. nfs_free_request(tmp);
  275. tmp = next;
  276. } while (tmp != req);
  277. out:
  278. /* subrequests must release the ref on the head request */
  279. if (head != req)
  280. nfs_release_request(head);
  281. }
  282. /**
  283. * nfs_create_request - Create an NFS read/write request.
  284. * @ctx: open context to use
  285. * @page: page to write
  286. * @last: last nfs request created for this page group or NULL if head
  287. * @offset: starting offset within the page for the write
  288. * @count: number of bytes to read/write
  289. *
  290. * The page must be locked by the caller. This makes sure we never
  291. * create two different requests for the same page.
  292. * User should ensure it is safe to sleep in this function.
  293. */
  294. struct nfs_page *
  295. nfs_create_request(struct nfs_open_context *ctx, struct page *page,
  296. struct nfs_page *last, unsigned int offset,
  297. unsigned int count)
  298. {
  299. struct nfs_page *req;
  300. struct nfs_lock_context *l_ctx;
  301. if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
  302. return ERR_PTR(-EBADF);
  303. /* try to allocate the request struct */
  304. req = nfs_page_alloc();
  305. if (req == NULL)
  306. return ERR_PTR(-ENOMEM);
  307. /* get lock context early so we can deal with alloc failures */
  308. l_ctx = nfs_get_lock_context(ctx);
  309. if (IS_ERR(l_ctx)) {
  310. nfs_page_free(req);
  311. return ERR_CAST(l_ctx);
  312. }
  313. req->wb_lock_context = l_ctx;
  314. atomic_inc(&l_ctx->io_count);
  315. /* Initialize the request struct. Initially, we assume a
  316. * long write-back delay. This will be adjusted in
  317. * update_nfs_request below if the region is not locked. */
  318. req->wb_page = page;
  319. if (page) {
  320. req->wb_index = page_index(page);
  321. get_page(page);
  322. }
  323. req->wb_offset = offset;
  324. req->wb_pgbase = offset;
  325. req->wb_bytes = count;
  326. req->wb_context = get_nfs_open_context(ctx);
  327. kref_init(&req->wb_kref);
  328. nfs_page_group_init(req, last);
  329. return req;
  330. }
  331. /**
  332. * nfs_unlock_request - Unlock request and wake up sleepers.
  333. * @req:
  334. */
  335. void nfs_unlock_request(struct nfs_page *req)
  336. {
  337. if (!NFS_WBACK_BUSY(req)) {
  338. printk(KERN_ERR "NFS: Invalid unlock attempted\n");
  339. BUG();
  340. }
  341. smp_mb__before_atomic();
  342. clear_bit(PG_BUSY, &req->wb_flags);
  343. smp_mb__after_atomic();
  344. if (!test_bit(PG_CONTENDED2, &req->wb_flags))
  345. return;
  346. wake_up_bit(&req->wb_flags, PG_BUSY);
  347. }
  348. /**
  349. * nfs_unlock_and_release_request - Unlock request and release the nfs_page
  350. * @req:
  351. */
  352. void nfs_unlock_and_release_request(struct nfs_page *req)
  353. {
  354. nfs_unlock_request(req);
  355. nfs_release_request(req);
  356. }
  357. /*
  358. * nfs_clear_request - Free up all resources allocated to the request
  359. * @req:
  360. *
  361. * Release page and open context resources associated with a read/write
  362. * request after it has completed.
  363. */
  364. static void nfs_clear_request(struct nfs_page *req)
  365. {
  366. struct page *page = req->wb_page;
  367. struct nfs_open_context *ctx = req->wb_context;
  368. struct nfs_lock_context *l_ctx = req->wb_lock_context;
  369. if (page != NULL) {
  370. put_page(page);
  371. req->wb_page = NULL;
  372. }
  373. if (l_ctx != NULL) {
  374. if (atomic_dec_and_test(&l_ctx->io_count)) {
  375. wake_up_var(&l_ctx->io_count);
  376. if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
  377. rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
  378. }
  379. nfs_put_lock_context(l_ctx);
  380. req->wb_lock_context = NULL;
  381. }
  382. if (ctx != NULL) {
  383. put_nfs_open_context(ctx);
  384. req->wb_context = NULL;
  385. }
  386. }
  387. /**
  388. * nfs_release_request - Release the count on an NFS read/write request
  389. * @req: request to release
  390. *
  391. * Note: Should never be called with the spinlock held!
  392. */
  393. void nfs_free_request(struct nfs_page *req)
  394. {
  395. WARN_ON_ONCE(req->wb_this_page != req);
  396. /* extra debug: make sure no sync bits are still set */
  397. WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
  398. WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
  399. WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
  400. WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
  401. WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
  402. /* Release struct file and open context */
  403. nfs_clear_request(req);
  404. nfs_page_free(req);
  405. }
  406. void nfs_release_request(struct nfs_page *req)
  407. {
  408. kref_put(&req->wb_kref, nfs_page_group_destroy);
  409. }
  410. EXPORT_SYMBOL_GPL(nfs_release_request);
  411. /**
  412. * nfs_wait_on_request - Wait for a request to complete.
  413. * @req: request to wait upon.
  414. *
  415. * Interruptible by fatal signals only.
  416. * The user is responsible for holding a count on the request.
  417. */
  418. int
  419. nfs_wait_on_request(struct nfs_page *req)
  420. {
  421. if (!test_bit(PG_BUSY, &req->wb_flags))
  422. return 0;
  423. set_bit(PG_CONTENDED2, &req->wb_flags);
  424. smp_mb__after_atomic();
  425. return wait_on_bit_io(&req->wb_flags, PG_BUSY,
  426. TASK_UNINTERRUPTIBLE);
  427. }
  428. EXPORT_SYMBOL_GPL(nfs_wait_on_request);
  429. /*
  430. * nfs_generic_pg_test - determine if requests can be coalesced
  431. * @desc: pointer to descriptor
  432. * @prev: previous request in desc, or NULL
  433. * @req: this request
  434. *
  435. * Returns zero if @req can be coalesced into @desc, otherwise it returns
  436. * the size of the request.
  437. */
  438. size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
  439. struct nfs_page *prev, struct nfs_page *req)
  440. {
  441. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  442. if (mirror->pg_count > mirror->pg_bsize) {
  443. /* should never happen */
  444. WARN_ON_ONCE(1);
  445. return 0;
  446. }
  447. /*
  448. * Limit the request size so that we can still allocate a page array
  449. * for it without upsetting the slab allocator.
  450. */
  451. if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
  452. sizeof(struct page *) > PAGE_SIZE)
  453. return 0;
  454. return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
  455. }
  456. EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
  457. struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
  458. {
  459. struct nfs_pgio_header *hdr = ops->rw_alloc_header();
  460. if (hdr) {
  461. INIT_LIST_HEAD(&hdr->pages);
  462. spin_lock_init(&hdr->lock);
  463. hdr->rw_ops = ops;
  464. }
  465. return hdr;
  466. }
  467. EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
  468. /**
  469. * nfs_pgio_data_destroy - make @hdr suitable for reuse
  470. *
  471. * Frees memory and releases refs from nfs_generic_pgio, so that it may
  472. * be called again.
  473. *
  474. * @hdr: A header that has had nfs_generic_pgio called
  475. */
  476. static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
  477. {
  478. if (hdr->args.context)
  479. put_nfs_open_context(hdr->args.context);
  480. if (hdr->page_array.pagevec != hdr->page_array.page_array)
  481. kfree(hdr->page_array.pagevec);
  482. }
  483. /*
  484. * nfs_pgio_header_free - Free a read or write header
  485. * @hdr: The header to free
  486. */
  487. void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
  488. {
  489. nfs_pgio_data_destroy(hdr);
  490. hdr->rw_ops->rw_free_header(hdr);
  491. }
  492. EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
  493. /**
  494. * nfs_pgio_rpcsetup - Set up arguments for a pageio call
  495. * @hdr: The pageio hdr
  496. * @count: Number of bytes to read
  497. * @offset: Initial offset
  498. * @how: How to commit data (writes only)
  499. * @cinfo: Commit information for the call (writes only)
  500. */
  501. static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
  502. unsigned int count,
  503. int how, struct nfs_commit_info *cinfo)
  504. {
  505. struct nfs_page *req = hdr->req;
  506. /* Set up the RPC argument and reply structs
  507. * NB: take care not to mess about with hdr->commit et al. */
  508. hdr->args.fh = NFS_FH(hdr->inode);
  509. hdr->args.offset = req_offset(req);
  510. /* pnfs_set_layoutcommit needs this */
  511. hdr->mds_offset = hdr->args.offset;
  512. hdr->args.pgbase = req->wb_pgbase;
  513. hdr->args.pages = hdr->page_array.pagevec;
  514. hdr->args.count = count;
  515. hdr->args.context = get_nfs_open_context(req->wb_context);
  516. hdr->args.lock_context = req->wb_lock_context;
  517. hdr->args.stable = NFS_UNSTABLE;
  518. switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
  519. case 0:
  520. break;
  521. case FLUSH_COND_STABLE:
  522. if (nfs_reqs_to_commit(cinfo))
  523. break;
  524. /* fall through */
  525. default:
  526. hdr->args.stable = NFS_FILE_SYNC;
  527. }
  528. hdr->res.fattr = &hdr->fattr;
  529. hdr->res.count = 0;
  530. hdr->res.eof = 0;
  531. hdr->res.verf = &hdr->verf;
  532. nfs_fattr_init(&hdr->fattr);
  533. }
  534. /**
  535. * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
  536. * @task: The current task
  537. * @calldata: pageio header to prepare
  538. */
  539. static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
  540. {
  541. struct nfs_pgio_header *hdr = calldata;
  542. int err;
  543. err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
  544. if (err)
  545. rpc_exit(task, err);
  546. }
  547. int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
  548. struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
  549. const struct rpc_call_ops *call_ops, int how, int flags)
  550. {
  551. struct rpc_task *task;
  552. struct rpc_message msg = {
  553. .rpc_argp = &hdr->args,
  554. .rpc_resp = &hdr->res,
  555. .rpc_cred = cred,
  556. };
  557. struct rpc_task_setup task_setup_data = {
  558. .rpc_client = clnt,
  559. .task = &hdr->task,
  560. .rpc_message = &msg,
  561. .callback_ops = call_ops,
  562. .callback_data = hdr,
  563. .workqueue = nfsiod_workqueue,
  564. .flags = RPC_TASK_ASYNC | flags,
  565. };
  566. int ret = 0;
  567. hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
  568. dprintk("NFS: initiated pgio call "
  569. "(req %s/%llu, %u bytes @ offset %llu)\n",
  570. hdr->inode->i_sb->s_id,
  571. (unsigned long long)NFS_FILEID(hdr->inode),
  572. hdr->args.count,
  573. (unsigned long long)hdr->args.offset);
  574. task = rpc_run_task(&task_setup_data);
  575. if (IS_ERR(task)) {
  576. ret = PTR_ERR(task);
  577. goto out;
  578. }
  579. if (how & FLUSH_SYNC) {
  580. ret = rpc_wait_for_completion_task(task);
  581. if (ret == 0)
  582. ret = task->tk_status;
  583. }
  584. rpc_put_task(task);
  585. out:
  586. return ret;
  587. }
  588. EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
  589. /**
  590. * nfs_pgio_error - Clean up from a pageio error
  591. * @desc: IO descriptor
  592. * @hdr: pageio header
  593. */
  594. static void nfs_pgio_error(struct nfs_pgio_header *hdr)
  595. {
  596. set_bit(NFS_IOHDR_REDO, &hdr->flags);
  597. hdr->completion_ops->completion(hdr);
  598. }
  599. /**
  600. * nfs_pgio_release - Release pageio data
  601. * @calldata: The pageio header to release
  602. */
  603. static void nfs_pgio_release(void *calldata)
  604. {
  605. struct nfs_pgio_header *hdr = calldata;
  606. hdr->completion_ops->completion(hdr);
  607. }
  608. static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
  609. unsigned int bsize)
  610. {
  611. INIT_LIST_HEAD(&mirror->pg_list);
  612. mirror->pg_bytes_written = 0;
  613. mirror->pg_count = 0;
  614. mirror->pg_bsize = bsize;
  615. mirror->pg_base = 0;
  616. mirror->pg_recoalesce = 0;
  617. }
  618. /**
  619. * nfs_pageio_init - initialise a page io descriptor
  620. * @desc: pointer to descriptor
  621. * @inode: pointer to inode
  622. * @pg_ops: pointer to pageio operations
  623. * @compl_ops: pointer to pageio completion operations
  624. * @rw_ops: pointer to nfs read/write operations
  625. * @bsize: io block size
  626. * @io_flags: extra parameters for the io function
  627. */
  628. void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
  629. struct inode *inode,
  630. const struct nfs_pageio_ops *pg_ops,
  631. const struct nfs_pgio_completion_ops *compl_ops,
  632. const struct nfs_rw_ops *rw_ops,
  633. size_t bsize,
  634. int io_flags)
  635. {
  636. desc->pg_moreio = 0;
  637. desc->pg_inode = inode;
  638. desc->pg_ops = pg_ops;
  639. desc->pg_completion_ops = compl_ops;
  640. desc->pg_rw_ops = rw_ops;
  641. desc->pg_ioflags = io_flags;
  642. desc->pg_error = 0;
  643. desc->pg_lseg = NULL;
  644. desc->pg_io_completion = NULL;
  645. desc->pg_dreq = NULL;
  646. desc->pg_bsize = bsize;
  647. desc->pg_mirror_count = 1;
  648. desc->pg_mirror_idx = 0;
  649. desc->pg_mirrors_dynamic = NULL;
  650. desc->pg_mirrors = desc->pg_mirrors_static;
  651. nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
  652. }
  653. /**
  654. * nfs_pgio_result - Basic pageio error handling
  655. * @task: The task that ran
  656. * @calldata: Pageio header to check
  657. */
  658. static void nfs_pgio_result(struct rpc_task *task, void *calldata)
  659. {
  660. struct nfs_pgio_header *hdr = calldata;
  661. struct inode *inode = hdr->inode;
  662. dprintk("NFS: %s: %5u, (status %d)\n", __func__,
  663. task->tk_pid, task->tk_status);
  664. if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
  665. return;
  666. if (task->tk_status < 0)
  667. nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
  668. else
  669. hdr->rw_ops->rw_result(task, hdr);
  670. }
  671. /*
  672. * Create an RPC task for the given read or write request and kick it.
  673. * The page must have been locked by the caller.
  674. *
  675. * It may happen that the page we're passed is not marked dirty.
  676. * This is the case if nfs_updatepage detects a conflicting request
  677. * that has been written but not committed.
  678. */
  679. int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
  680. struct nfs_pgio_header *hdr)
  681. {
  682. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  683. struct nfs_page *req;
  684. struct page **pages,
  685. *last_page;
  686. struct list_head *head = &mirror->pg_list;
  687. struct nfs_commit_info cinfo;
  688. struct nfs_page_array *pg_array = &hdr->page_array;
  689. unsigned int pagecount, pageused;
  690. gfp_t gfp_flags = GFP_KERNEL;
  691. pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
  692. pg_array->npages = pagecount;
  693. if (pagecount <= ARRAY_SIZE(pg_array->page_array))
  694. pg_array->pagevec = pg_array->page_array;
  695. else {
  696. if (hdr->rw_mode == FMODE_WRITE)
  697. gfp_flags = GFP_NOIO;
  698. pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
  699. if (!pg_array->pagevec) {
  700. pg_array->npages = 0;
  701. nfs_pgio_error(hdr);
  702. desc->pg_error = -ENOMEM;
  703. return desc->pg_error;
  704. }
  705. }
  706. nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
  707. pages = hdr->page_array.pagevec;
  708. last_page = NULL;
  709. pageused = 0;
  710. while (!list_empty(head)) {
  711. req = nfs_list_entry(head->next);
  712. nfs_list_move_request(req, &hdr->pages);
  713. if (!last_page || last_page != req->wb_page) {
  714. pageused++;
  715. if (pageused > pagecount)
  716. break;
  717. *pages++ = last_page = req->wb_page;
  718. }
  719. }
  720. if (WARN_ON_ONCE(pageused != pagecount)) {
  721. nfs_pgio_error(hdr);
  722. desc->pg_error = -EINVAL;
  723. return desc->pg_error;
  724. }
  725. if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
  726. (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
  727. desc->pg_ioflags &= ~FLUSH_COND_STABLE;
  728. /* Set up the argument struct */
  729. nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
  730. desc->pg_rpc_callops = &nfs_pgio_common_ops;
  731. return 0;
  732. }
  733. EXPORT_SYMBOL_GPL(nfs_generic_pgio);
  734. static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
  735. {
  736. struct nfs_pgio_header *hdr;
  737. int ret;
  738. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  739. if (!hdr) {
  740. desc->pg_error = -ENOMEM;
  741. return desc->pg_error;
  742. }
  743. nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
  744. ret = nfs_generic_pgio(desc, hdr);
  745. if (ret == 0)
  746. ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
  747. hdr,
  748. hdr->cred,
  749. NFS_PROTO(hdr->inode),
  750. desc->pg_rpc_callops,
  751. desc->pg_ioflags, 0);
  752. return ret;
  753. }
  754. static struct nfs_pgio_mirror *
  755. nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
  756. unsigned int mirror_count)
  757. {
  758. struct nfs_pgio_mirror *ret;
  759. unsigned int i;
  760. kfree(desc->pg_mirrors_dynamic);
  761. desc->pg_mirrors_dynamic = NULL;
  762. if (mirror_count == 1)
  763. return desc->pg_mirrors_static;
  764. ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
  765. if (ret != NULL) {
  766. for (i = 0; i < mirror_count; i++)
  767. nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
  768. desc->pg_mirrors_dynamic = ret;
  769. }
  770. return ret;
  771. }
  772. /*
  773. * nfs_pageio_setup_mirroring - determine if mirroring is to be used
  774. * by calling the pg_get_mirror_count op
  775. */
  776. static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
  777. struct nfs_page *req)
  778. {
  779. unsigned int mirror_count = 1;
  780. if (pgio->pg_ops->pg_get_mirror_count)
  781. mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
  782. if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
  783. return;
  784. if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
  785. pgio->pg_error = -EINVAL;
  786. return;
  787. }
  788. pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
  789. if (pgio->pg_mirrors == NULL) {
  790. pgio->pg_error = -ENOMEM;
  791. pgio->pg_mirrors = pgio->pg_mirrors_static;
  792. mirror_count = 1;
  793. }
  794. pgio->pg_mirror_count = mirror_count;
  795. }
  796. static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
  797. {
  798. pgio->pg_mirror_count = 1;
  799. pgio->pg_mirror_idx = 0;
  800. pgio->pg_mirrors = pgio->pg_mirrors_static;
  801. kfree(pgio->pg_mirrors_dynamic);
  802. pgio->pg_mirrors_dynamic = NULL;
  803. }
  804. static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
  805. const struct nfs_lock_context *l2)
  806. {
  807. return l1->lockowner == l2->lockowner;
  808. }
  809. /**
  810. * nfs_can_coalesce_requests - test two requests for compatibility
  811. * @prev: pointer to nfs_page
  812. * @req: pointer to nfs_page
  813. *
  814. * The nfs_page structures 'prev' and 'req' are compared to ensure that the
  815. * page data area they describe is contiguous, and that their RPC
  816. * credentials, NFSv4 open state, and lockowners are the same.
  817. *
  818. * Return 'true' if this is the case, else return 'false'.
  819. */
  820. static bool nfs_can_coalesce_requests(struct nfs_page *prev,
  821. struct nfs_page *req,
  822. struct nfs_pageio_descriptor *pgio)
  823. {
  824. size_t size;
  825. struct file_lock_context *flctx;
  826. if (prev) {
  827. if (!nfs_match_open_context(req->wb_context, prev->wb_context))
  828. return false;
  829. flctx = d_inode(req->wb_context->dentry)->i_flctx;
  830. if (flctx != NULL &&
  831. !(list_empty_careful(&flctx->flc_posix) &&
  832. list_empty_careful(&flctx->flc_flock)) &&
  833. !nfs_match_lock_context(req->wb_lock_context,
  834. prev->wb_lock_context))
  835. return false;
  836. if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
  837. return false;
  838. if (req->wb_page == prev->wb_page) {
  839. if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
  840. return false;
  841. } else {
  842. if (req->wb_pgbase != 0 ||
  843. prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
  844. return false;
  845. }
  846. }
  847. size = pgio->pg_ops->pg_test(pgio, prev, req);
  848. WARN_ON_ONCE(size > req->wb_bytes);
  849. if (size && size < req->wb_bytes)
  850. req->wb_bytes = size;
  851. return size > 0;
  852. }
  853. /**
  854. * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
  855. * @desc: destination io descriptor
  856. * @req: request
  857. *
  858. * Returns true if the request 'req' was successfully coalesced into the
  859. * existing list of pages 'desc'.
  860. */
  861. static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
  862. struct nfs_page *req)
  863. {
  864. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  865. struct nfs_page *prev = NULL;
  866. if (mirror->pg_count != 0) {
  867. prev = nfs_list_entry(mirror->pg_list.prev);
  868. } else {
  869. if (desc->pg_ops->pg_init)
  870. desc->pg_ops->pg_init(desc, req);
  871. if (desc->pg_error < 0)
  872. return 0;
  873. mirror->pg_base = req->wb_pgbase;
  874. }
  875. if (!nfs_can_coalesce_requests(prev, req, desc))
  876. return 0;
  877. nfs_list_move_request(req, &mirror->pg_list);
  878. mirror->pg_count += req->wb_bytes;
  879. return 1;
  880. }
  881. /*
  882. * Helper for nfs_pageio_add_request and nfs_pageio_complete
  883. */
  884. static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
  885. {
  886. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  887. if (!list_empty(&mirror->pg_list)) {
  888. int error = desc->pg_ops->pg_doio(desc);
  889. if (error < 0)
  890. desc->pg_error = error;
  891. else
  892. mirror->pg_bytes_written += mirror->pg_count;
  893. }
  894. if (list_empty(&mirror->pg_list)) {
  895. mirror->pg_count = 0;
  896. mirror->pg_base = 0;
  897. }
  898. }
  899. static void
  900. nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
  901. struct nfs_page *req)
  902. {
  903. LIST_HEAD(head);
  904. nfs_list_move_request(req, &head);
  905. desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
  906. }
  907. /**
  908. * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
  909. * @desc: destination io descriptor
  910. * @req: request
  911. *
  912. * This may split a request into subrequests which are all part of the
  913. * same page group.
  914. *
  915. * Returns true if the request 'req' was successfully coalesced into the
  916. * existing list of pages 'desc'.
  917. */
  918. static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  919. struct nfs_page *req)
  920. {
  921. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  922. struct nfs_page *subreq;
  923. unsigned int bytes_left = 0;
  924. unsigned int offset, pgbase;
  925. nfs_page_group_lock(req);
  926. subreq = req;
  927. bytes_left = subreq->wb_bytes;
  928. offset = subreq->wb_offset;
  929. pgbase = subreq->wb_pgbase;
  930. do {
  931. if (!nfs_pageio_do_add_request(desc, subreq)) {
  932. /* make sure pg_test call(s) did nothing */
  933. WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
  934. WARN_ON_ONCE(subreq->wb_offset != offset);
  935. WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
  936. nfs_page_group_unlock(req);
  937. desc->pg_moreio = 1;
  938. nfs_pageio_doio(desc);
  939. if (desc->pg_error < 0 || mirror->pg_recoalesce)
  940. goto out_cleanup_subreq;
  941. /* retry add_request for this subreq */
  942. nfs_page_group_lock(req);
  943. continue;
  944. }
  945. /* check for buggy pg_test call(s) */
  946. WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
  947. WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
  948. WARN_ON_ONCE(subreq->wb_bytes == 0);
  949. bytes_left -= subreq->wb_bytes;
  950. offset += subreq->wb_bytes;
  951. pgbase += subreq->wb_bytes;
  952. if (bytes_left) {
  953. subreq = nfs_create_request(req->wb_context,
  954. req->wb_page,
  955. subreq, pgbase, bytes_left);
  956. if (IS_ERR(subreq))
  957. goto err_ptr;
  958. nfs_lock_request(subreq);
  959. subreq->wb_offset = offset;
  960. subreq->wb_index = req->wb_index;
  961. }
  962. } while (bytes_left > 0);
  963. nfs_page_group_unlock(req);
  964. return 1;
  965. err_ptr:
  966. desc->pg_error = PTR_ERR(subreq);
  967. nfs_page_group_unlock(req);
  968. return 0;
  969. out_cleanup_subreq:
  970. if (req != subreq)
  971. nfs_pageio_cleanup_request(desc, subreq);
  972. return 0;
  973. }
  974. static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
  975. {
  976. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  977. LIST_HEAD(head);
  978. do {
  979. list_splice_init(&mirror->pg_list, &head);
  980. mirror->pg_bytes_written -= mirror->pg_count;
  981. mirror->pg_count = 0;
  982. mirror->pg_base = 0;
  983. mirror->pg_recoalesce = 0;
  984. while (!list_empty(&head)) {
  985. struct nfs_page *req;
  986. req = list_first_entry(&head, struct nfs_page, wb_list);
  987. if (__nfs_pageio_add_request(desc, req))
  988. continue;
  989. if (desc->pg_error < 0) {
  990. list_splice_tail(&head, &mirror->pg_list);
  991. mirror->pg_recoalesce = 1;
  992. return 0;
  993. }
  994. break;
  995. }
  996. } while (mirror->pg_recoalesce);
  997. return 1;
  998. }
  999. static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
  1000. struct nfs_page *req)
  1001. {
  1002. int ret;
  1003. do {
  1004. ret = __nfs_pageio_add_request(desc, req);
  1005. if (ret)
  1006. break;
  1007. if (desc->pg_error < 0)
  1008. break;
  1009. ret = nfs_do_recoalesce(desc);
  1010. } while (ret);
  1011. return ret;
  1012. }
  1013. static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
  1014. {
  1015. u32 midx;
  1016. struct nfs_pgio_mirror *mirror;
  1017. if (!desc->pg_error)
  1018. return;
  1019. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1020. mirror = &desc->pg_mirrors[midx];
  1021. desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
  1022. desc->pg_error);
  1023. }
  1024. }
  1025. int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  1026. struct nfs_page *req)
  1027. {
  1028. u32 midx;
  1029. unsigned int pgbase, offset, bytes;
  1030. struct nfs_page *dupreq, *lastreq;
  1031. pgbase = req->wb_pgbase;
  1032. offset = req->wb_offset;
  1033. bytes = req->wb_bytes;
  1034. nfs_pageio_setup_mirroring(desc, req);
  1035. if (desc->pg_error < 0)
  1036. goto out_failed;
  1037. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1038. if (midx) {
  1039. nfs_page_group_lock(req);
  1040. /* find the last request */
  1041. for (lastreq = req->wb_head;
  1042. lastreq->wb_this_page != req->wb_head;
  1043. lastreq = lastreq->wb_this_page)
  1044. ;
  1045. dupreq = nfs_create_request(req->wb_context,
  1046. req->wb_page, lastreq, pgbase, bytes);
  1047. if (IS_ERR(dupreq)) {
  1048. nfs_page_group_unlock(req);
  1049. desc->pg_error = PTR_ERR(dupreq);
  1050. goto out_failed;
  1051. }
  1052. nfs_lock_request(dupreq);
  1053. nfs_page_group_unlock(req);
  1054. dupreq->wb_offset = offset;
  1055. dupreq->wb_index = req->wb_index;
  1056. } else
  1057. dupreq = req;
  1058. if (nfs_pgio_has_mirroring(desc))
  1059. desc->pg_mirror_idx = midx;
  1060. if (!nfs_pageio_add_request_mirror(desc, dupreq))
  1061. goto out_cleanup_subreq;
  1062. }
  1063. return 1;
  1064. out_cleanup_subreq:
  1065. if (req != dupreq)
  1066. nfs_pageio_cleanup_request(desc, dupreq);
  1067. out_failed:
  1068. /* remember fatal errors */
  1069. if (nfs_error_is_fatal(desc->pg_error))
  1070. nfs_context_set_write_error(req->wb_context,
  1071. desc->pg_error);
  1072. nfs_pageio_error_cleanup(desc);
  1073. return 0;
  1074. }
  1075. /*
  1076. * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
  1077. * nfs_pageio_descriptor
  1078. * @desc: pointer to io descriptor
  1079. * @mirror_idx: pointer to mirror index
  1080. */
  1081. static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
  1082. u32 mirror_idx)
  1083. {
  1084. struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
  1085. u32 restore_idx = desc->pg_mirror_idx;
  1086. if (nfs_pgio_has_mirroring(desc))
  1087. desc->pg_mirror_idx = mirror_idx;
  1088. for (;;) {
  1089. nfs_pageio_doio(desc);
  1090. if (desc->pg_error < 0 || !mirror->pg_recoalesce)
  1091. break;
  1092. if (!nfs_do_recoalesce(desc))
  1093. break;
  1094. }
  1095. desc->pg_mirror_idx = restore_idx;
  1096. }
  1097. /*
  1098. * nfs_pageio_resend - Transfer requests to new descriptor and resend
  1099. * @hdr - the pgio header to move request from
  1100. * @desc - the pageio descriptor to add requests to
  1101. *
  1102. * Try to move each request (nfs_page) from @hdr to @desc then attempt
  1103. * to send them.
  1104. *
  1105. * Returns 0 on success and < 0 on error.
  1106. */
  1107. int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
  1108. struct nfs_pgio_header *hdr)
  1109. {
  1110. LIST_HEAD(pages);
  1111. desc->pg_io_completion = hdr->io_completion;
  1112. desc->pg_dreq = hdr->dreq;
  1113. list_splice_init(&hdr->pages, &pages);
  1114. while (!list_empty(&pages)) {
  1115. struct nfs_page *req = nfs_list_entry(pages.next);
  1116. if (!nfs_pageio_add_request(desc, req))
  1117. break;
  1118. }
  1119. nfs_pageio_complete(desc);
  1120. if (!list_empty(&pages)) {
  1121. int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
  1122. hdr->completion_ops->error_cleanup(&pages, err);
  1123. nfs_set_pgio_error(hdr, err, hdr->io_start);
  1124. return err;
  1125. }
  1126. return 0;
  1127. }
  1128. EXPORT_SYMBOL_GPL(nfs_pageio_resend);
  1129. /**
  1130. * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
  1131. * @desc: pointer to io descriptor
  1132. */
  1133. void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
  1134. {
  1135. u32 midx;
  1136. for (midx = 0; midx < desc->pg_mirror_count; midx++)
  1137. nfs_pageio_complete_mirror(desc, midx);
  1138. if (desc->pg_error < 0)
  1139. nfs_pageio_error_cleanup(desc);
  1140. if (desc->pg_ops->pg_cleanup)
  1141. desc->pg_ops->pg_cleanup(desc);
  1142. nfs_pageio_cleanup_mirroring(desc);
  1143. }
  1144. /**
  1145. * nfs_pageio_cond_complete - Conditional I/O completion
  1146. * @desc: pointer to io descriptor
  1147. * @index: page index
  1148. *
  1149. * It is important to ensure that processes don't try to take locks
  1150. * on non-contiguous ranges of pages as that might deadlock. This
  1151. * function should be called before attempting to wait on a locked
  1152. * nfs_page. It will complete the I/O if the page index 'index'
  1153. * is not contiguous with the existing list of pages in 'desc'.
  1154. */
  1155. void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
  1156. {
  1157. struct nfs_pgio_mirror *mirror;
  1158. struct nfs_page *prev;
  1159. u32 midx;
  1160. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1161. mirror = &desc->pg_mirrors[midx];
  1162. if (!list_empty(&mirror->pg_list)) {
  1163. prev = nfs_list_entry(mirror->pg_list.prev);
  1164. if (index != prev->wb_index + 1) {
  1165. nfs_pageio_complete(desc);
  1166. break;
  1167. }
  1168. }
  1169. }
  1170. }
  1171. /*
  1172. * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
  1173. */
  1174. void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
  1175. {
  1176. nfs_pageio_complete(pgio);
  1177. }
  1178. int __init nfs_init_nfspagecache(void)
  1179. {
  1180. nfs_page_cachep = kmem_cache_create("nfs_page",
  1181. sizeof(struct nfs_page),
  1182. 0, SLAB_HWCACHE_ALIGN,
  1183. NULL);
  1184. if (nfs_page_cachep == NULL)
  1185. return -ENOMEM;
  1186. return 0;
  1187. }
  1188. void nfs_destroy_nfspagecache(void)
  1189. {
  1190. kmem_cache_destroy(nfs_page_cachep);
  1191. }
  1192. static const struct rpc_call_ops nfs_pgio_common_ops = {
  1193. .rpc_call_prepare = nfs_pgio_prepare,
  1194. .rpc_call_done = nfs_pgio_result,
  1195. .rpc_release = nfs_pgio_release,
  1196. };
  1197. const struct nfs_pageio_ops nfs_pgio_rw_ops = {
  1198. .pg_test = nfs_generic_pg_test,
  1199. .pg_doio = nfs_generic_pg_pgios,
  1200. };