aops.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/mpage.h>
  17. #include <linux/fs.h>
  18. #include <linux/writeback.h>
  19. #include <linux/swap.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/backing-dev.h>
  22. #include <linux/uio.h>
  23. #include <trace/events/writeback.h>
  24. #include <linux/sched/signal.h>
  25. #include "gfs2.h"
  26. #include "incore.h"
  27. #include "bmap.h"
  28. #include "glock.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "quota.h"
  33. #include "trans.h"
  34. #include "rgrp.h"
  35. #include "super.h"
  36. #include "util.h"
  37. #include "glops.h"
  38. #include "aops.h"
  39. void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  40. unsigned int from, unsigned int len)
  41. {
  42. struct buffer_head *head = page_buffers(page);
  43. unsigned int bsize = head->b_size;
  44. struct buffer_head *bh;
  45. unsigned int to = from + len;
  46. unsigned int start, end;
  47. for (bh = head, start = 0; bh != head || !start;
  48. bh = bh->b_this_page, start = end) {
  49. end = start + bsize;
  50. if (end <= from)
  51. continue;
  52. if (start >= to)
  53. break;
  54. set_buffer_uptodate(bh);
  55. gfs2_trans_add_data(ip->i_gl, bh);
  56. }
  57. }
  58. /**
  59. * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  60. * @inode: The inode
  61. * @lblock: The block number to look up
  62. * @bh_result: The buffer head to return the result in
  63. * @create: Non-zero if we may add block to the file
  64. *
  65. * Returns: errno
  66. */
  67. static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  68. struct buffer_head *bh_result, int create)
  69. {
  70. int error;
  71. error = gfs2_block_map(inode, lblock, bh_result, 0);
  72. if (error)
  73. return error;
  74. if (!buffer_mapped(bh_result))
  75. return -EIO;
  76. return 0;
  77. }
  78. /**
  79. * gfs2_writepage_common - Common bits of writepage
  80. * @page: The page to be written
  81. * @wbc: The writeback control
  82. *
  83. * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
  84. */
  85. static int gfs2_writepage_common(struct page *page,
  86. struct writeback_control *wbc)
  87. {
  88. struct inode *inode = page->mapping->host;
  89. struct gfs2_inode *ip = GFS2_I(inode);
  90. struct gfs2_sbd *sdp = GFS2_SB(inode);
  91. loff_t i_size = i_size_read(inode);
  92. pgoff_t end_index = i_size >> PAGE_SHIFT;
  93. unsigned offset;
  94. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  95. goto out;
  96. if (current->journal_info)
  97. goto redirty;
  98. /* Is the page fully outside i_size? (truncate in progress) */
  99. offset = i_size & (PAGE_SIZE-1);
  100. if (page->index > end_index || (page->index == end_index && !offset)) {
  101. page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
  102. goto out;
  103. }
  104. return 1;
  105. redirty:
  106. redirty_page_for_writepage(wbc, page);
  107. out:
  108. unlock_page(page);
  109. return 0;
  110. }
  111. /**
  112. * gfs2_writepage - Write page for writeback mappings
  113. * @page: The page
  114. * @wbc: The writeback control
  115. *
  116. */
  117. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  118. {
  119. int ret;
  120. ret = gfs2_writepage_common(page, wbc);
  121. if (ret <= 0)
  122. return ret;
  123. return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
  124. }
  125. /* This is the same as calling block_write_full_page, but it also
  126. * writes pages outside of i_size
  127. */
  128. static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
  129. struct writeback_control *wbc)
  130. {
  131. struct inode * const inode = page->mapping->host;
  132. loff_t i_size = i_size_read(inode);
  133. const pgoff_t end_index = i_size >> PAGE_SHIFT;
  134. unsigned offset;
  135. /*
  136. * The page straddles i_size. It must be zeroed out on each and every
  137. * writepage invocation because it may be mmapped. "A file is mapped
  138. * in multiples of the page size. For a file that is not a multiple of
  139. * the page size, the remaining memory is zeroed when mapped, and
  140. * writes to that region are not written out to the file."
  141. */
  142. offset = i_size & (PAGE_SIZE-1);
  143. if (page->index == end_index && offset)
  144. zero_user_segment(page, offset, PAGE_SIZE);
  145. return __block_write_full_page(inode, page, get_block, wbc,
  146. end_buffer_async_write);
  147. }
  148. /**
  149. * __gfs2_jdata_writepage - The core of jdata writepage
  150. * @page: The page to write
  151. * @wbc: The writeback control
  152. *
  153. * This is shared between writepage and writepages and implements the
  154. * core of the writepage operation. If a transaction is required then
  155. * PageChecked will have been set and the transaction will have
  156. * already been started before this is called.
  157. */
  158. static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  159. {
  160. struct inode *inode = page->mapping->host;
  161. struct gfs2_inode *ip = GFS2_I(inode);
  162. struct gfs2_sbd *sdp = GFS2_SB(inode);
  163. if (PageChecked(page)) {
  164. ClearPageChecked(page);
  165. if (!page_has_buffers(page)) {
  166. create_empty_buffers(page, inode->i_sb->s_blocksize,
  167. BIT(BH_Dirty)|BIT(BH_Uptodate));
  168. }
  169. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
  170. }
  171. return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
  172. }
  173. /**
  174. * gfs2_jdata_writepage - Write complete page
  175. * @page: Page to write
  176. * @wbc: The writeback control
  177. *
  178. * Returns: errno
  179. *
  180. */
  181. static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  182. {
  183. struct inode *inode = page->mapping->host;
  184. struct gfs2_inode *ip = GFS2_I(inode);
  185. struct gfs2_sbd *sdp = GFS2_SB(inode);
  186. int ret;
  187. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  188. goto out;
  189. if (PageChecked(page) || current->journal_info)
  190. goto out_ignore;
  191. ret = __gfs2_jdata_writepage(page, wbc);
  192. return ret;
  193. out_ignore:
  194. redirty_page_for_writepage(wbc, page);
  195. out:
  196. unlock_page(page);
  197. return 0;
  198. }
  199. /**
  200. * gfs2_writepages - Write a bunch of dirty pages back to disk
  201. * @mapping: The mapping to write
  202. * @wbc: Write-back control
  203. *
  204. * Used for both ordered and writeback modes.
  205. */
  206. static int gfs2_writepages(struct address_space *mapping,
  207. struct writeback_control *wbc)
  208. {
  209. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  210. int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
  211. /*
  212. * Even if we didn't write any pages here, we might still be holding
  213. * dirty pages in the ail. We forcibly flush the ail because we don't
  214. * want balance_dirty_pages() to loop indefinitely trying to write out
  215. * pages held in the ail that it can't find.
  216. */
  217. if (ret == 0)
  218. set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
  219. return ret;
  220. }
  221. /**
  222. * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
  223. * @mapping: The mapping
  224. * @wbc: The writeback control
  225. * @pvec: The vector of pages
  226. * @nr_pages: The number of pages to write
  227. * @done_index: Page index
  228. *
  229. * Returns: non-zero if loop should terminate, zero otherwise
  230. */
  231. static int gfs2_write_jdata_pagevec(struct address_space *mapping,
  232. struct writeback_control *wbc,
  233. struct pagevec *pvec,
  234. int nr_pages,
  235. pgoff_t *done_index)
  236. {
  237. struct inode *inode = mapping->host;
  238. struct gfs2_sbd *sdp = GFS2_SB(inode);
  239. unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
  240. int i;
  241. int ret;
  242. ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
  243. if (ret < 0)
  244. return ret;
  245. for(i = 0; i < nr_pages; i++) {
  246. struct page *page = pvec->pages[i];
  247. *done_index = page->index;
  248. lock_page(page);
  249. if (unlikely(page->mapping != mapping)) {
  250. continue_unlock:
  251. unlock_page(page);
  252. continue;
  253. }
  254. if (!PageDirty(page)) {
  255. /* someone wrote it for us */
  256. goto continue_unlock;
  257. }
  258. if (PageWriteback(page)) {
  259. if (wbc->sync_mode != WB_SYNC_NONE)
  260. wait_on_page_writeback(page);
  261. else
  262. goto continue_unlock;
  263. }
  264. BUG_ON(PageWriteback(page));
  265. if (!clear_page_dirty_for_io(page))
  266. goto continue_unlock;
  267. trace_wbc_writepage(wbc, inode_to_bdi(inode));
  268. ret = __gfs2_jdata_writepage(page, wbc);
  269. if (unlikely(ret)) {
  270. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  271. unlock_page(page);
  272. ret = 0;
  273. } else {
  274. /*
  275. * done_index is set past this page,
  276. * so media errors will not choke
  277. * background writeout for the entire
  278. * file. This has consequences for
  279. * range_cyclic semantics (ie. it may
  280. * not be suitable for data integrity
  281. * writeout).
  282. */
  283. *done_index = page->index + 1;
  284. ret = 1;
  285. break;
  286. }
  287. }
  288. /*
  289. * We stop writing back only if we are not doing
  290. * integrity sync. In case of integrity sync we have to
  291. * keep going until we have written all the pages
  292. * we tagged for writeback prior to entering this loop.
  293. */
  294. if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
  295. ret = 1;
  296. break;
  297. }
  298. }
  299. gfs2_trans_end(sdp);
  300. return ret;
  301. }
  302. /**
  303. * gfs2_write_cache_jdata - Like write_cache_pages but different
  304. * @mapping: The mapping to write
  305. * @wbc: The writeback control
  306. *
  307. * The reason that we use our own function here is that we need to
  308. * start transactions before we grab page locks. This allows us
  309. * to get the ordering right.
  310. */
  311. static int gfs2_write_cache_jdata(struct address_space *mapping,
  312. struct writeback_control *wbc)
  313. {
  314. int ret = 0;
  315. int done = 0;
  316. struct pagevec pvec;
  317. int nr_pages;
  318. pgoff_t uninitialized_var(writeback_index);
  319. pgoff_t index;
  320. pgoff_t end;
  321. pgoff_t done_index;
  322. int cycled;
  323. int range_whole = 0;
  324. int tag;
  325. pagevec_init(&pvec);
  326. if (wbc->range_cyclic) {
  327. writeback_index = mapping->writeback_index; /* prev offset */
  328. index = writeback_index;
  329. if (index == 0)
  330. cycled = 1;
  331. else
  332. cycled = 0;
  333. end = -1;
  334. } else {
  335. index = wbc->range_start >> PAGE_SHIFT;
  336. end = wbc->range_end >> PAGE_SHIFT;
  337. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  338. range_whole = 1;
  339. cycled = 1; /* ignore range_cyclic tests */
  340. }
  341. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  342. tag = PAGECACHE_TAG_TOWRITE;
  343. else
  344. tag = PAGECACHE_TAG_DIRTY;
  345. retry:
  346. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  347. tag_pages_for_writeback(mapping, index, end);
  348. done_index = index;
  349. while (!done && (index <= end)) {
  350. nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
  351. tag);
  352. if (nr_pages == 0)
  353. break;
  354. ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
  355. if (ret)
  356. done = 1;
  357. if (ret > 0)
  358. ret = 0;
  359. pagevec_release(&pvec);
  360. cond_resched();
  361. }
  362. if (!cycled && !done) {
  363. /*
  364. * range_cyclic:
  365. * We hit the last page and there is more work to be done: wrap
  366. * back to the start of the file
  367. */
  368. cycled = 1;
  369. index = 0;
  370. end = writeback_index - 1;
  371. goto retry;
  372. }
  373. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  374. mapping->writeback_index = done_index;
  375. return ret;
  376. }
  377. /**
  378. * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
  379. * @mapping: The mapping to write
  380. * @wbc: The writeback control
  381. *
  382. */
  383. static int gfs2_jdata_writepages(struct address_space *mapping,
  384. struct writeback_control *wbc)
  385. {
  386. struct gfs2_inode *ip = GFS2_I(mapping->host);
  387. struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
  388. int ret;
  389. ret = gfs2_write_cache_jdata(mapping, wbc);
  390. if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
  391. gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
  392. GFS2_LFC_JDATA_WPAGES);
  393. ret = gfs2_write_cache_jdata(mapping, wbc);
  394. }
  395. return ret;
  396. }
  397. /**
  398. * stuffed_readpage - Fill in a Linux page with stuffed file data
  399. * @ip: the inode
  400. * @page: the page
  401. *
  402. * Returns: errno
  403. */
  404. int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  405. {
  406. struct buffer_head *dibh;
  407. u64 dsize = i_size_read(&ip->i_inode);
  408. void *kaddr;
  409. int error;
  410. /*
  411. * Due to the order of unstuffing files and ->fault(), we can be
  412. * asked for a zero page in the case of a stuffed file being extended,
  413. * so we need to supply one here. It doesn't happen often.
  414. */
  415. if (unlikely(page->index)) {
  416. zero_user(page, 0, PAGE_SIZE);
  417. SetPageUptodate(page);
  418. return 0;
  419. }
  420. error = gfs2_meta_inode_buffer(ip, &dibh);
  421. if (error)
  422. return error;
  423. kaddr = kmap_atomic(page);
  424. if (dsize > gfs2_max_stuffed_size(ip))
  425. dsize = gfs2_max_stuffed_size(ip);
  426. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  427. memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  428. kunmap_atomic(kaddr);
  429. flush_dcache_page(page);
  430. brelse(dibh);
  431. SetPageUptodate(page);
  432. return 0;
  433. }
  434. /**
  435. * __gfs2_readpage - readpage
  436. * @file: The file to read a page for
  437. * @page: The page to read
  438. *
  439. * This is the core of gfs2's readpage. It's used by the internal file
  440. * reading code as in that case we already hold the glock. Also it's
  441. * called by gfs2_readpage() once the required lock has been granted.
  442. */
  443. static int __gfs2_readpage(void *file, struct page *page)
  444. {
  445. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  446. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  447. int error;
  448. if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
  449. !page_has_buffers(page)) {
  450. error = iomap_readpage(page, &gfs2_iomap_ops);
  451. } else if (gfs2_is_stuffed(ip)) {
  452. error = stuffed_readpage(ip, page);
  453. unlock_page(page);
  454. } else {
  455. error = mpage_readpage(page, gfs2_block_map);
  456. }
  457. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  458. return -EIO;
  459. return error;
  460. }
  461. /**
  462. * gfs2_readpage - read a page of a file
  463. * @file: The file to read
  464. * @page: The page of the file
  465. *
  466. * This deals with the locking required. We have to unlock and
  467. * relock the page in order to get the locking in the right
  468. * order.
  469. */
  470. static int gfs2_readpage(struct file *file, struct page *page)
  471. {
  472. struct address_space *mapping = page->mapping;
  473. struct gfs2_inode *ip = GFS2_I(mapping->host);
  474. struct gfs2_holder gh;
  475. int error;
  476. unlock_page(page);
  477. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  478. error = gfs2_glock_nq(&gh);
  479. if (unlikely(error))
  480. goto out;
  481. error = AOP_TRUNCATED_PAGE;
  482. lock_page(page);
  483. if (page->mapping == mapping && !PageUptodate(page))
  484. error = __gfs2_readpage(file, page);
  485. else
  486. unlock_page(page);
  487. gfs2_glock_dq(&gh);
  488. out:
  489. gfs2_holder_uninit(&gh);
  490. if (error && error != AOP_TRUNCATED_PAGE)
  491. lock_page(page);
  492. return error;
  493. }
  494. /**
  495. * gfs2_internal_read - read an internal file
  496. * @ip: The gfs2 inode
  497. * @buf: The buffer to fill
  498. * @pos: The file position
  499. * @size: The amount to read
  500. *
  501. */
  502. int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
  503. unsigned size)
  504. {
  505. struct address_space *mapping = ip->i_inode.i_mapping;
  506. unsigned long index = *pos / PAGE_SIZE;
  507. unsigned offset = *pos & (PAGE_SIZE - 1);
  508. unsigned copied = 0;
  509. unsigned amt;
  510. struct page *page;
  511. void *p;
  512. do {
  513. amt = size - copied;
  514. if (offset + size > PAGE_SIZE)
  515. amt = PAGE_SIZE - offset;
  516. page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
  517. if (IS_ERR(page))
  518. return PTR_ERR(page);
  519. p = kmap_atomic(page);
  520. memcpy(buf + copied, p + offset, amt);
  521. kunmap_atomic(p);
  522. put_page(page);
  523. copied += amt;
  524. index++;
  525. offset = 0;
  526. } while(copied < size);
  527. (*pos) += size;
  528. return size;
  529. }
  530. /**
  531. * gfs2_readpages - Read a bunch of pages at once
  532. * @file: The file to read from
  533. * @mapping: Address space info
  534. * @pages: List of pages to read
  535. * @nr_pages: Number of pages to read
  536. *
  537. * Some notes:
  538. * 1. This is only for readahead, so we can simply ignore any things
  539. * which are slightly inconvenient (such as locking conflicts between
  540. * the page lock and the glock) and return having done no I/O. Its
  541. * obviously not something we'd want to do on too regular a basis.
  542. * Any I/O we ignore at this time will be done via readpage later.
  543. * 2. We don't handle stuffed files here we let readpage do the honours.
  544. * 3. mpage_readpages() does most of the heavy lifting in the common case.
  545. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
  546. */
  547. static int gfs2_readpages(struct file *file, struct address_space *mapping,
  548. struct list_head *pages, unsigned nr_pages)
  549. {
  550. struct inode *inode = mapping->host;
  551. struct gfs2_inode *ip = GFS2_I(inode);
  552. struct gfs2_sbd *sdp = GFS2_SB(inode);
  553. struct gfs2_holder gh;
  554. int ret;
  555. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  556. ret = gfs2_glock_nq(&gh);
  557. if (unlikely(ret))
  558. goto out_uninit;
  559. if (!gfs2_is_stuffed(ip))
  560. ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
  561. gfs2_glock_dq(&gh);
  562. out_uninit:
  563. gfs2_holder_uninit(&gh);
  564. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  565. ret = -EIO;
  566. return ret;
  567. }
  568. /**
  569. * adjust_fs_space - Adjusts the free space available due to gfs2_grow
  570. * @inode: the rindex inode
  571. */
  572. void adjust_fs_space(struct inode *inode)
  573. {
  574. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  575. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  576. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  577. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  578. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  579. struct buffer_head *m_bh, *l_bh;
  580. u64 fs_total, new_free;
  581. /* Total up the file system space, according to the latest rindex. */
  582. fs_total = gfs2_ri_total(sdp);
  583. if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
  584. return;
  585. spin_lock(&sdp->sd_statfs_spin);
  586. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  587. sizeof(struct gfs2_dinode));
  588. if (fs_total > (m_sc->sc_total + l_sc->sc_total))
  589. new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
  590. else
  591. new_free = 0;
  592. spin_unlock(&sdp->sd_statfs_spin);
  593. fs_warn(sdp, "File system extended by %llu blocks.\n",
  594. (unsigned long long)new_free);
  595. gfs2_statfs_change(sdp, new_free, new_free, 0);
  596. if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
  597. goto out;
  598. update_statfs(sdp, m_bh, l_bh);
  599. brelse(l_bh);
  600. out:
  601. brelse(m_bh);
  602. }
  603. /**
  604. * gfs2_stuffed_write_end - Write end for stuffed files
  605. * @inode: The inode
  606. * @dibh: The buffer_head containing the on-disk inode
  607. * @pos: The file position
  608. * @copied: How much was actually copied by the VFS
  609. * @page: The page
  610. *
  611. * This copies the data from the page into the inode block after
  612. * the inode data structure itself.
  613. *
  614. * Returns: copied bytes or errno
  615. */
  616. int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
  617. loff_t pos, unsigned copied,
  618. struct page *page)
  619. {
  620. struct gfs2_inode *ip = GFS2_I(inode);
  621. u64 to = pos + copied;
  622. void *kaddr;
  623. unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
  624. BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
  625. kaddr = kmap_atomic(page);
  626. memcpy(buf + pos, kaddr + pos, copied);
  627. flush_dcache_page(page);
  628. kunmap_atomic(kaddr);
  629. WARN_ON(!PageUptodate(page));
  630. unlock_page(page);
  631. put_page(page);
  632. if (copied) {
  633. if (inode->i_size < to)
  634. i_size_write(inode, to);
  635. mark_inode_dirty(inode);
  636. }
  637. return copied;
  638. }
  639. /**
  640. * jdata_set_page_dirty - Page dirtying function
  641. * @page: The page to dirty
  642. *
  643. * Returns: 1 if it dirtyed the page, or 0 otherwise
  644. */
  645. static int jdata_set_page_dirty(struct page *page)
  646. {
  647. SetPageChecked(page);
  648. return __set_page_dirty_buffers(page);
  649. }
  650. /**
  651. * gfs2_bmap - Block map function
  652. * @mapping: Address space info
  653. * @lblock: The block to map
  654. *
  655. * Returns: The disk address for the block or 0 on hole or error
  656. */
  657. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  658. {
  659. struct gfs2_inode *ip = GFS2_I(mapping->host);
  660. struct gfs2_holder i_gh;
  661. sector_t dblock = 0;
  662. int error;
  663. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  664. if (error)
  665. return 0;
  666. if (!gfs2_is_stuffed(ip))
  667. dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
  668. gfs2_glock_dq_uninit(&i_gh);
  669. return dblock;
  670. }
  671. static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
  672. {
  673. struct gfs2_bufdata *bd;
  674. lock_buffer(bh);
  675. gfs2_log_lock(sdp);
  676. clear_buffer_dirty(bh);
  677. bd = bh->b_private;
  678. if (bd) {
  679. if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
  680. list_del_init(&bd->bd_list);
  681. else
  682. gfs2_remove_from_journal(bh, REMOVE_JDATA);
  683. }
  684. bh->b_bdev = NULL;
  685. clear_buffer_mapped(bh);
  686. clear_buffer_req(bh);
  687. clear_buffer_new(bh);
  688. gfs2_log_unlock(sdp);
  689. unlock_buffer(bh);
  690. }
  691. static void gfs2_invalidatepage(struct page *page, unsigned int offset,
  692. unsigned int length)
  693. {
  694. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  695. unsigned int stop = offset + length;
  696. int partial_page = (offset || length < PAGE_SIZE);
  697. struct buffer_head *bh, *head;
  698. unsigned long pos = 0;
  699. BUG_ON(!PageLocked(page));
  700. if (!partial_page)
  701. ClearPageChecked(page);
  702. if (!page_has_buffers(page))
  703. goto out;
  704. bh = head = page_buffers(page);
  705. do {
  706. if (pos + bh->b_size > stop)
  707. return;
  708. if (offset <= pos)
  709. gfs2_discard(sdp, bh);
  710. pos += bh->b_size;
  711. bh = bh->b_this_page;
  712. } while (bh != head);
  713. out:
  714. if (!partial_page)
  715. try_to_release_page(page, 0);
  716. }
  717. /**
  718. * gfs2_releasepage - free the metadata associated with a page
  719. * @page: the page that's being released
  720. * @gfp_mask: passed from Linux VFS, ignored by us
  721. *
  722. * Call try_to_free_buffers() if the buffers in this page can be
  723. * released.
  724. *
  725. * Returns: 0
  726. */
  727. int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
  728. {
  729. struct address_space *mapping = page->mapping;
  730. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  731. struct buffer_head *bh, *head;
  732. struct gfs2_bufdata *bd;
  733. if (!page_has_buffers(page))
  734. return 0;
  735. /*
  736. * From xfs_vm_releasepage: mm accommodates an old ext3 case where
  737. * clean pages might not have had the dirty bit cleared. Thus, it can
  738. * send actual dirty pages to ->releasepage() via shrink_active_list().
  739. *
  740. * As a workaround, we skip pages that contain dirty buffers below.
  741. * Once ->releasepage isn't called on dirty pages anymore, we can warn
  742. * on dirty buffers like we used to here again.
  743. */
  744. gfs2_log_lock(sdp);
  745. spin_lock(&sdp->sd_ail_lock);
  746. head = bh = page_buffers(page);
  747. do {
  748. if (atomic_read(&bh->b_count))
  749. goto cannot_release;
  750. bd = bh->b_private;
  751. if (bd && bd->bd_tr)
  752. goto cannot_release;
  753. if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
  754. goto cannot_release;
  755. bh = bh->b_this_page;
  756. } while(bh != head);
  757. spin_unlock(&sdp->sd_ail_lock);
  758. head = bh = page_buffers(page);
  759. do {
  760. bd = bh->b_private;
  761. if (bd) {
  762. gfs2_assert_warn(sdp, bd->bd_bh == bh);
  763. if (!list_empty(&bd->bd_list))
  764. list_del_init(&bd->bd_list);
  765. bd->bd_bh = NULL;
  766. bh->b_private = NULL;
  767. kmem_cache_free(gfs2_bufdata_cachep, bd);
  768. }
  769. bh = bh->b_this_page;
  770. } while (bh != head);
  771. gfs2_log_unlock(sdp);
  772. return try_to_free_buffers(page);
  773. cannot_release:
  774. spin_unlock(&sdp->sd_ail_lock);
  775. gfs2_log_unlock(sdp);
  776. return 0;
  777. }
  778. static const struct address_space_operations gfs2_writeback_aops = {
  779. .writepage = gfs2_writepage,
  780. .writepages = gfs2_writepages,
  781. .readpage = gfs2_readpage,
  782. .readpages = gfs2_readpages,
  783. .bmap = gfs2_bmap,
  784. .invalidatepage = gfs2_invalidatepage,
  785. .releasepage = gfs2_releasepage,
  786. .direct_IO = noop_direct_IO,
  787. .migratepage = buffer_migrate_page,
  788. .is_partially_uptodate = block_is_partially_uptodate,
  789. .error_remove_page = generic_error_remove_page,
  790. };
  791. static const struct address_space_operations gfs2_ordered_aops = {
  792. .writepage = gfs2_writepage,
  793. .writepages = gfs2_writepages,
  794. .readpage = gfs2_readpage,
  795. .readpages = gfs2_readpages,
  796. .set_page_dirty = __set_page_dirty_buffers,
  797. .bmap = gfs2_bmap,
  798. .invalidatepage = gfs2_invalidatepage,
  799. .releasepage = gfs2_releasepage,
  800. .direct_IO = noop_direct_IO,
  801. .migratepage = buffer_migrate_page,
  802. .is_partially_uptodate = block_is_partially_uptodate,
  803. .error_remove_page = generic_error_remove_page,
  804. };
  805. static const struct address_space_operations gfs2_jdata_aops = {
  806. .writepage = gfs2_jdata_writepage,
  807. .writepages = gfs2_jdata_writepages,
  808. .readpage = gfs2_readpage,
  809. .readpages = gfs2_readpages,
  810. .set_page_dirty = jdata_set_page_dirty,
  811. .bmap = gfs2_bmap,
  812. .invalidatepage = gfs2_invalidatepage,
  813. .releasepage = gfs2_releasepage,
  814. .is_partially_uptodate = block_is_partially_uptodate,
  815. .error_remove_page = generic_error_remove_page,
  816. };
  817. void gfs2_set_aops(struct inode *inode)
  818. {
  819. struct gfs2_inode *ip = GFS2_I(inode);
  820. if (gfs2_is_writeback(ip))
  821. inode->i_mapping->a_ops = &gfs2_writeback_aops;
  822. else if (gfs2_is_ordered(ip))
  823. inode->i_mapping->a_ops = &gfs2_ordered_aops;
  824. else if (gfs2_is_jdata(ip))
  825. inode->i_mapping->a_ops = &gfs2_jdata_aops;
  826. else
  827. BUG();
  828. }