buffer.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/fs/buffer.c
  4. *
  5. * Copyright (C) 1991, 1992, 2002 Linus Torvalds
  6. */
  7. /*
  8. * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
  9. *
  10. * Removed a lot of unnecessary code and simplified things now that
  11. * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
  12. *
  13. * Speed up hash, lru, and free list operations. Use gfp() for allocating
  14. * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
  15. *
  16. * Added 32k buffer block sizes - these are required older ARM systems. - RMK
  17. *
  18. * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/sched/signal.h>
  22. #include <linux/syscalls.h>
  23. #include <linux/fs.h>
  24. #include <linux/iomap.h>
  25. #include <linux/mm.h>
  26. #include <linux/percpu.h>
  27. #include <linux/slab.h>
  28. #include <linux/capability.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/file.h>
  31. #include <linux/quotaops.h>
  32. #include <linux/highmem.h>
  33. #include <linux/export.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/writeback.h>
  36. #include <linux/hash.h>
  37. #include <linux/suspend.h>
  38. #include <linux/buffer_head.h>
  39. #include <linux/task_io_accounting_ops.h>
  40. #include <linux/bio.h>
  41. #include <linux/cpu.h>
  42. #include <linux/bitops.h>
  43. #include <linux/mpage.h>
  44. #include <linux/bit_spinlock.h>
  45. #include <linux/pagevec.h>
  46. #include <linux/sched/mm.h>
  47. #include <trace/events/block.h>
  48. #include <linux/fscrypt.h>
  49. #include <linux/fsverity.h>
  50. #include <linux/sched/isolation.h>
  51. #include "internal.h"
  52. static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
  53. static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
  54. enum rw_hint hint, struct writeback_control *wbc);
  55. #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
  56. inline void touch_buffer(struct buffer_head *bh)
  57. {
  58. trace_block_touch_buffer(bh);
  59. folio_mark_accessed(bh->b_folio);
  60. }
  61. EXPORT_SYMBOL(touch_buffer);
  62. void __lock_buffer(struct buffer_head *bh)
  63. {
  64. wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  65. }
  66. EXPORT_SYMBOL(__lock_buffer);
  67. void unlock_buffer(struct buffer_head *bh)
  68. {
  69. clear_bit_unlock(BH_Lock, &bh->b_state);
  70. smp_mb__after_atomic();
  71. wake_up_bit(&bh->b_state, BH_Lock);
  72. }
  73. EXPORT_SYMBOL(unlock_buffer);
  74. /*
  75. * Returns if the folio has dirty or writeback buffers. If all the buffers
  76. * are unlocked and clean then the folio_test_dirty information is stale. If
  77. * any of the buffers are locked, it is assumed they are locked for IO.
  78. */
  79. void buffer_check_dirty_writeback(struct folio *folio,
  80. bool *dirty, bool *writeback)
  81. {
  82. struct buffer_head *head, *bh;
  83. *dirty = false;
  84. *writeback = false;
  85. BUG_ON(!folio_test_locked(folio));
  86. head = folio_buffers(folio);
  87. if (!head)
  88. return;
  89. if (folio_test_writeback(folio))
  90. *writeback = true;
  91. bh = head;
  92. do {
  93. if (buffer_locked(bh))
  94. *writeback = true;
  95. if (buffer_dirty(bh))
  96. *dirty = true;
  97. bh = bh->b_this_page;
  98. } while (bh != head);
  99. }
  100. /*
  101. * Block until a buffer comes unlocked. This doesn't stop it
  102. * from becoming locked again - you have to lock it yourself
  103. * if you want to preserve its state.
  104. */
  105. void __wait_on_buffer(struct buffer_head * bh)
  106. {
  107. wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  108. }
  109. EXPORT_SYMBOL(__wait_on_buffer);
  110. static void buffer_io_error(struct buffer_head *bh, char *msg)
  111. {
  112. if (!test_bit(BH_Quiet, &bh->b_state))
  113. printk_ratelimited(KERN_ERR
  114. "Buffer I/O error on dev %pg, logical block %llu%s\n",
  115. bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
  116. }
  117. /*
  118. * End-of-IO handler helper function which does not touch the bh after
  119. * unlocking it.
  120. * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
  121. * a race there is benign: unlock_buffer() only use the bh's address for
  122. * hashing after unlocking the buffer, so it doesn't actually touch the bh
  123. * itself.
  124. */
  125. static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
  126. {
  127. if (uptodate) {
  128. set_buffer_uptodate(bh);
  129. } else {
  130. /* This happens, due to failed read-ahead attempts. */
  131. clear_buffer_uptodate(bh);
  132. }
  133. unlock_buffer(bh);
  134. }
  135. /*
  136. * Default synchronous end-of-IO handler.. Just mark it up-to-date and
  137. * unlock the buffer.
  138. */
  139. void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
  140. {
  141. put_bh(bh);
  142. __end_buffer_read_notouch(bh, uptodate);
  143. }
  144. EXPORT_SYMBOL(end_buffer_read_sync);
  145. void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  146. {
  147. if (uptodate) {
  148. set_buffer_uptodate(bh);
  149. } else {
  150. buffer_io_error(bh, ", lost sync page write");
  151. mark_buffer_write_io_error(bh);
  152. clear_buffer_uptodate(bh);
  153. }
  154. unlock_buffer(bh);
  155. put_bh(bh);
  156. }
  157. EXPORT_SYMBOL(end_buffer_write_sync);
  158. static struct buffer_head *
  159. __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
  160. {
  161. struct address_space *bd_mapping = bdev->bd_mapping;
  162. const int blkbits = bd_mapping->host->i_blkbits;
  163. struct buffer_head *ret = NULL;
  164. pgoff_t index;
  165. struct buffer_head *bh;
  166. struct buffer_head *head;
  167. struct folio *folio;
  168. int all_mapped = 1;
  169. static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
  170. index = ((loff_t)block << blkbits) / PAGE_SIZE;
  171. folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
  172. if (IS_ERR(folio))
  173. goto out;
  174. /*
  175. * Folio lock protects the buffers. Callers that cannot block
  176. * will fallback to serializing vs try_to_free_buffers() via
  177. * the i_private_lock.
  178. */
  179. if (atomic)
  180. spin_lock(&bd_mapping->i_private_lock);
  181. else
  182. folio_lock(folio);
  183. head = folio_buffers(folio);
  184. if (!head)
  185. goto out_unlock;
  186. bh = head;
  187. do {
  188. if (!buffer_mapped(bh))
  189. all_mapped = 0;
  190. else if (bh->b_blocknr == block) {
  191. ret = bh;
  192. get_bh(bh);
  193. goto out_unlock;
  194. }
  195. bh = bh->b_this_page;
  196. } while (bh != head);
  197. /* we might be here because some of the buffers on this page are
  198. * not mapped. This is due to various races between
  199. * file io on the block device and getblk. It gets dealt with
  200. * elsewhere, don't buffer_error if we had some unmapped buffers
  201. */
  202. ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
  203. if (all_mapped && __ratelimit(&last_warned)) {
  204. printk("__find_get_block_slow() failed. block=%llu, "
  205. "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
  206. "device %pg blocksize: %d\n",
  207. (unsigned long long)block,
  208. (unsigned long long)bh->b_blocknr,
  209. bh->b_state, bh->b_size, bdev,
  210. 1 << blkbits);
  211. }
  212. out_unlock:
  213. if (atomic)
  214. spin_unlock(&bd_mapping->i_private_lock);
  215. else
  216. folio_unlock(folio);
  217. folio_put(folio);
  218. out:
  219. return ret;
  220. }
  221. static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
  222. {
  223. unsigned long flags;
  224. struct buffer_head *first;
  225. struct buffer_head *tmp;
  226. struct folio *folio;
  227. int folio_uptodate = 1;
  228. BUG_ON(!buffer_async_read(bh));
  229. folio = bh->b_folio;
  230. if (uptodate) {
  231. set_buffer_uptodate(bh);
  232. } else {
  233. clear_buffer_uptodate(bh);
  234. buffer_io_error(bh, ", async page read");
  235. }
  236. /*
  237. * Be _very_ careful from here on. Bad things can happen if
  238. * two buffer heads end IO at almost the same time and both
  239. * decide that the page is now completely done.
  240. */
  241. first = folio_buffers(folio);
  242. spin_lock_irqsave(&first->b_uptodate_lock, flags);
  243. clear_buffer_async_read(bh);
  244. unlock_buffer(bh);
  245. tmp = bh;
  246. do {
  247. if (!buffer_uptodate(tmp))
  248. folio_uptodate = 0;
  249. if (buffer_async_read(tmp)) {
  250. BUG_ON(!buffer_locked(tmp));
  251. goto still_busy;
  252. }
  253. tmp = tmp->b_this_page;
  254. } while (tmp != bh);
  255. spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
  256. folio_end_read(folio, folio_uptodate);
  257. return;
  258. still_busy:
  259. spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
  260. return;
  261. }
  262. struct postprocess_bh_ctx {
  263. struct work_struct work;
  264. struct buffer_head *bh;
  265. };
  266. static void verify_bh(struct work_struct *work)
  267. {
  268. struct postprocess_bh_ctx *ctx =
  269. container_of(work, struct postprocess_bh_ctx, work);
  270. struct buffer_head *bh = ctx->bh;
  271. bool valid;
  272. valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
  273. end_buffer_async_read(bh, valid);
  274. kfree(ctx);
  275. }
  276. static bool need_fsverity(struct buffer_head *bh)
  277. {
  278. struct folio *folio = bh->b_folio;
  279. struct inode *inode = folio->mapping->host;
  280. return fsverity_active(inode) &&
  281. /* needed by ext4 */
  282. folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
  283. }
  284. static void decrypt_bh(struct work_struct *work)
  285. {
  286. struct postprocess_bh_ctx *ctx =
  287. container_of(work, struct postprocess_bh_ctx, work);
  288. struct buffer_head *bh = ctx->bh;
  289. int err;
  290. err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
  291. bh_offset(bh));
  292. if (err == 0 && need_fsverity(bh)) {
  293. /*
  294. * We use different work queues for decryption and for verity
  295. * because verity may require reading metadata pages that need
  296. * decryption, and we shouldn't recurse to the same workqueue.
  297. */
  298. INIT_WORK(&ctx->work, verify_bh);
  299. fsverity_enqueue_verify_work(&ctx->work);
  300. return;
  301. }
  302. end_buffer_async_read(bh, err == 0);
  303. kfree(ctx);
  304. }
  305. /*
  306. * I/O completion handler for block_read_full_folio() - pages
  307. * which come unlocked at the end of I/O.
  308. */
  309. static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
  310. {
  311. struct inode *inode = bh->b_folio->mapping->host;
  312. bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
  313. bool verify = need_fsverity(bh);
  314. /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
  315. if (uptodate && (decrypt || verify)) {
  316. struct postprocess_bh_ctx *ctx =
  317. kmalloc(sizeof(*ctx), GFP_ATOMIC);
  318. if (ctx) {
  319. ctx->bh = bh;
  320. if (decrypt) {
  321. INIT_WORK(&ctx->work, decrypt_bh);
  322. fscrypt_enqueue_decrypt_work(&ctx->work);
  323. } else {
  324. INIT_WORK(&ctx->work, verify_bh);
  325. fsverity_enqueue_verify_work(&ctx->work);
  326. }
  327. return;
  328. }
  329. uptodate = 0;
  330. }
  331. end_buffer_async_read(bh, uptodate);
  332. }
  333. /*
  334. * Completion handler for block_write_full_folio() - folios which are unlocked
  335. * during I/O, and which have the writeback flag cleared upon I/O completion.
  336. */
  337. static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
  338. {
  339. unsigned long flags;
  340. struct buffer_head *first;
  341. struct buffer_head *tmp;
  342. struct folio *folio;
  343. BUG_ON(!buffer_async_write(bh));
  344. folio = bh->b_folio;
  345. if (uptodate) {
  346. set_buffer_uptodate(bh);
  347. } else {
  348. buffer_io_error(bh, ", lost async page write");
  349. mark_buffer_write_io_error(bh);
  350. clear_buffer_uptodate(bh);
  351. }
  352. first = folio_buffers(folio);
  353. spin_lock_irqsave(&first->b_uptodate_lock, flags);
  354. clear_buffer_async_write(bh);
  355. unlock_buffer(bh);
  356. tmp = bh->b_this_page;
  357. while (tmp != bh) {
  358. if (buffer_async_write(tmp)) {
  359. BUG_ON(!buffer_locked(tmp));
  360. goto still_busy;
  361. }
  362. tmp = tmp->b_this_page;
  363. }
  364. spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
  365. folio_end_writeback(folio);
  366. return;
  367. still_busy:
  368. spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
  369. return;
  370. }
  371. /*
  372. * If a page's buffers are under async readin (end_buffer_async_read
  373. * completion) then there is a possibility that another thread of
  374. * control could lock one of the buffers after it has completed
  375. * but while some of the other buffers have not completed. This
  376. * locked buffer would confuse end_buffer_async_read() into not unlocking
  377. * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
  378. * that this buffer is not under async I/O.
  379. *
  380. * The page comes unlocked when it has no locked buffer_async buffers
  381. * left.
  382. *
  383. * PageLocked prevents anyone starting new async I/O reads any of
  384. * the buffers.
  385. *
  386. * PageWriteback is used to prevent simultaneous writeout of the same
  387. * page.
  388. *
  389. * PageLocked prevents anyone from starting writeback of a page which is
  390. * under read I/O (PageWriteback is only ever set against a locked page).
  391. */
  392. static void mark_buffer_async_read(struct buffer_head *bh)
  393. {
  394. bh->b_end_io = end_buffer_async_read_io;
  395. set_buffer_async_read(bh);
  396. }
  397. static void mark_buffer_async_write_endio(struct buffer_head *bh,
  398. bh_end_io_t *handler)
  399. {
  400. bh->b_end_io = handler;
  401. set_buffer_async_write(bh);
  402. }
  403. void mark_buffer_async_write(struct buffer_head *bh)
  404. {
  405. mark_buffer_async_write_endio(bh, end_buffer_async_write);
  406. }
  407. EXPORT_SYMBOL(mark_buffer_async_write);
  408. /*
  409. * fs/buffer.c contains helper functions for buffer-backed address space's
  410. * fsync functions. A common requirement for buffer-based filesystems is
  411. * that certain data from the backing blockdev needs to be written out for
  412. * a successful fsync(). For example, ext2 indirect blocks need to be
  413. * written back and waited upon before fsync() returns.
  414. *
  415. * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
  416. * inode_has_buffers() and invalidate_inode_buffers() are provided for the
  417. * management of a list of dependent buffers at ->i_mapping->i_private_list.
  418. *
  419. * Locking is a little subtle: try_to_free_buffers() will remove buffers
  420. * from their controlling inode's queue when they are being freed. But
  421. * try_to_free_buffers() will be operating against the *blockdev* mapping
  422. * at the time, not against the S_ISREG file which depends on those buffers.
  423. * So the locking for i_private_list is via the i_private_lock in the address_space
  424. * which backs the buffers. Which is different from the address_space
  425. * against which the buffers are listed. So for a particular address_space,
  426. * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
  427. * mapping->i_private_list will always be protected by the backing blockdev's
  428. * ->i_private_lock.
  429. *
  430. * Which introduces a requirement: all buffers on an address_space's
  431. * ->i_private_list must be from the same address_space: the blockdev's.
  432. *
  433. * address_spaces which do not place buffers at ->i_private_list via these
  434. * utility functions are free to use i_private_lock and i_private_list for
  435. * whatever they want. The only requirement is that list_empty(i_private_list)
  436. * be true at clear_inode() time.
  437. *
  438. * FIXME: clear_inode should not call invalidate_inode_buffers(). The
  439. * filesystems should do that. invalidate_inode_buffers() should just go
  440. * BUG_ON(!list_empty).
  441. *
  442. * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
  443. * take an address_space, not an inode. And it should be called
  444. * mark_buffer_dirty_fsync() to clearly define why those buffers are being
  445. * queued up.
  446. *
  447. * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
  448. * list if it is already on a list. Because if the buffer is on a list,
  449. * it *must* already be on the right one. If not, the filesystem is being
  450. * silly. This will save a ton of locking. But first we have to ensure
  451. * that buffers are taken *off* the old inode's list when they are freed
  452. * (presumably in truncate). That requires careful auditing of all
  453. * filesystems (do it inside bforget()). It could also be done by bringing
  454. * b_inode back.
  455. */
  456. /*
  457. * The buffer's backing address_space's i_private_lock must be held
  458. */
  459. static void __remove_assoc_queue(struct buffer_head *bh)
  460. {
  461. list_del_init(&bh->b_assoc_buffers);
  462. WARN_ON(!bh->b_assoc_map);
  463. bh->b_assoc_map = NULL;
  464. }
  465. int inode_has_buffers(struct inode *inode)
  466. {
  467. return !list_empty(&inode->i_data.i_private_list);
  468. }
  469. /*
  470. * osync is designed to support O_SYNC io. It waits synchronously for
  471. * all already-submitted IO to complete, but does not queue any new
  472. * writes to the disk.
  473. *
  474. * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
  475. * as you dirty the buffers, and then use osync_inode_buffers to wait for
  476. * completion. Any other dirty buffers which are not yet queued for
  477. * write will not be flushed to disk by the osync.
  478. */
  479. static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
  480. {
  481. struct buffer_head *bh;
  482. struct list_head *p;
  483. int err = 0;
  484. spin_lock(lock);
  485. repeat:
  486. list_for_each_prev(p, list) {
  487. bh = BH_ENTRY(p);
  488. if (buffer_locked(bh)) {
  489. get_bh(bh);
  490. spin_unlock(lock);
  491. wait_on_buffer(bh);
  492. if (!buffer_uptodate(bh))
  493. err = -EIO;
  494. brelse(bh);
  495. spin_lock(lock);
  496. goto repeat;
  497. }
  498. }
  499. spin_unlock(lock);
  500. return err;
  501. }
  502. /**
  503. * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  504. * @mapping: the mapping which wants those buffers written
  505. *
  506. * Starts I/O against the buffers at mapping->i_private_list, and waits upon
  507. * that I/O.
  508. *
  509. * Basically, this is a convenience function for fsync().
  510. * @mapping is a file or directory which needs those buffers to be written for
  511. * a successful fsync().
  512. */
  513. int sync_mapping_buffers(struct address_space *mapping)
  514. {
  515. struct address_space *buffer_mapping = mapping->i_private_data;
  516. if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
  517. return 0;
  518. return fsync_buffers_list(&buffer_mapping->i_private_lock,
  519. &mapping->i_private_list);
  520. }
  521. EXPORT_SYMBOL(sync_mapping_buffers);
  522. /**
  523. * generic_buffers_fsync_noflush - generic buffer fsync implementation
  524. * for simple filesystems with no inode lock
  525. *
  526. * @file: file to synchronize
  527. * @start: start offset in bytes
  528. * @end: end offset in bytes (inclusive)
  529. * @datasync: only synchronize essential metadata if true
  530. *
  531. * This is a generic implementation of the fsync method for simple
  532. * filesystems which track all non-inode metadata in the buffers list
  533. * hanging off the address_space structure.
  534. */
  535. int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
  536. bool datasync)
  537. {
  538. struct inode *inode = file->f_mapping->host;
  539. int err;
  540. int ret;
  541. err = file_write_and_wait_range(file, start, end);
  542. if (err)
  543. return err;
  544. ret = sync_mapping_buffers(inode->i_mapping);
  545. if (!(inode->i_state & I_DIRTY_ALL))
  546. goto out;
  547. if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
  548. goto out;
  549. err = sync_inode_metadata(inode, 1);
  550. if (ret == 0)
  551. ret = err;
  552. out:
  553. /* check and advance again to catch errors after syncing out buffers */
  554. err = file_check_and_advance_wb_err(file);
  555. if (ret == 0)
  556. ret = err;
  557. return ret;
  558. }
  559. EXPORT_SYMBOL(generic_buffers_fsync_noflush);
  560. /**
  561. * generic_buffers_fsync - generic buffer fsync implementation
  562. * for simple filesystems with no inode lock
  563. *
  564. * @file: file to synchronize
  565. * @start: start offset in bytes
  566. * @end: end offset in bytes (inclusive)
  567. * @datasync: only synchronize essential metadata if true
  568. *
  569. * This is a generic implementation of the fsync method for simple
  570. * filesystems which track all non-inode metadata in the buffers list
  571. * hanging off the address_space structure. This also makes sure that
  572. * a device cache flush operation is called at the end.
  573. */
  574. int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
  575. bool datasync)
  576. {
  577. struct inode *inode = file->f_mapping->host;
  578. int ret;
  579. ret = generic_buffers_fsync_noflush(file, start, end, datasync);
  580. if (!ret)
  581. ret = blkdev_issue_flush(inode->i_sb->s_bdev);
  582. return ret;
  583. }
  584. EXPORT_SYMBOL(generic_buffers_fsync);
  585. /*
  586. * Called when we've recently written block `bblock', and it is known that
  587. * `bblock' was for a buffer_boundary() buffer. This means that the block at
  588. * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
  589. * dirty, schedule it for IO. So that indirects merge nicely with their data.
  590. */
  591. void write_boundary_block(struct block_device *bdev,
  592. sector_t bblock, unsigned blocksize)
  593. {
  594. struct buffer_head *bh;
  595. bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
  596. if (bh) {
  597. if (buffer_dirty(bh))
  598. write_dirty_buffer(bh, 0);
  599. put_bh(bh);
  600. }
  601. }
  602. void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
  603. {
  604. struct address_space *mapping = inode->i_mapping;
  605. struct address_space *buffer_mapping = bh->b_folio->mapping;
  606. mark_buffer_dirty(bh);
  607. if (!mapping->i_private_data) {
  608. mapping->i_private_data = buffer_mapping;
  609. } else {
  610. BUG_ON(mapping->i_private_data != buffer_mapping);
  611. }
  612. if (!bh->b_assoc_map) {
  613. spin_lock(&buffer_mapping->i_private_lock);
  614. list_move_tail(&bh->b_assoc_buffers,
  615. &mapping->i_private_list);
  616. bh->b_assoc_map = mapping;
  617. spin_unlock(&buffer_mapping->i_private_lock);
  618. }
  619. }
  620. EXPORT_SYMBOL(mark_buffer_dirty_inode);
  621. /**
  622. * block_dirty_folio - Mark a folio as dirty.
  623. * @mapping: The address space containing this folio.
  624. * @folio: The folio to mark dirty.
  625. *
  626. * Filesystems which use buffer_heads can use this function as their
  627. * ->dirty_folio implementation. Some filesystems need to do a little
  628. * work before calling this function. Filesystems which do not use
  629. * buffer_heads should call filemap_dirty_folio() instead.
  630. *
  631. * If the folio has buffers, the uptodate buffers are set dirty, to
  632. * preserve dirty-state coherency between the folio and the buffers.
  633. * Buffers added to a dirty folio are created dirty.
  634. *
  635. * The buffers are dirtied before the folio is dirtied. There's a small
  636. * race window in which writeback may see the folio cleanness but not the
  637. * buffer dirtiness. That's fine. If this code were to set the folio
  638. * dirty before the buffers, writeback could clear the folio dirty flag,
  639. * see a bunch of clean buffers and we'd end up with dirty buffers/clean
  640. * folio on the dirty folio list.
  641. *
  642. * We use i_private_lock to lock against try_to_free_buffers() while
  643. * using the folio's buffer list. This also prevents clean buffers
  644. * being added to the folio after it was set dirty.
  645. *
  646. * Context: May only be called from process context. Does not sleep.
  647. * Caller must ensure that @folio cannot be truncated during this call,
  648. * typically by holding the folio lock or having a page in the folio
  649. * mapped and holding the page table lock.
  650. *
  651. * Return: True if the folio was dirtied; false if it was already dirtied.
  652. */
  653. bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
  654. {
  655. struct buffer_head *head;
  656. bool newly_dirty;
  657. spin_lock(&mapping->i_private_lock);
  658. head = folio_buffers(folio);
  659. if (head) {
  660. struct buffer_head *bh = head;
  661. do {
  662. set_buffer_dirty(bh);
  663. bh = bh->b_this_page;
  664. } while (bh != head);
  665. }
  666. /*
  667. * Lock out page's memcg migration to keep PageDirty
  668. * synchronized with per-memcg dirty page counters.
  669. */
  670. folio_memcg_lock(folio);
  671. newly_dirty = !folio_test_set_dirty(folio);
  672. spin_unlock(&mapping->i_private_lock);
  673. if (newly_dirty)
  674. __folio_mark_dirty(folio, mapping, 1);
  675. folio_memcg_unlock(folio);
  676. if (newly_dirty)
  677. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  678. return newly_dirty;
  679. }
  680. EXPORT_SYMBOL(block_dirty_folio);
  681. /*
  682. * Write out and wait upon a list of buffers.
  683. *
  684. * We have conflicting pressures: we want to make sure that all
  685. * initially dirty buffers get waited on, but that any subsequently
  686. * dirtied buffers don't. After all, we don't want fsync to last
  687. * forever if somebody is actively writing to the file.
  688. *
  689. * Do this in two main stages: first we copy dirty buffers to a
  690. * temporary inode list, queueing the writes as we go. Then we clean
  691. * up, waiting for those writes to complete.
  692. *
  693. * During this second stage, any subsequent updates to the file may end
  694. * up refiling the buffer on the original inode's dirty list again, so
  695. * there is a chance we will end up with a buffer queued for write but
  696. * not yet completed on that list. So, as a final cleanup we go through
  697. * the osync code to catch these locked, dirty buffers without requeuing
  698. * any newly dirty buffers for write.
  699. */
  700. static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
  701. {
  702. struct buffer_head *bh;
  703. struct address_space *mapping;
  704. int err = 0, err2;
  705. struct blk_plug plug;
  706. LIST_HEAD(tmp);
  707. blk_start_plug(&plug);
  708. spin_lock(lock);
  709. while (!list_empty(list)) {
  710. bh = BH_ENTRY(list->next);
  711. mapping = bh->b_assoc_map;
  712. __remove_assoc_queue(bh);
  713. /* Avoid race with mark_buffer_dirty_inode() which does
  714. * a lockless check and we rely on seeing the dirty bit */
  715. smp_mb();
  716. if (buffer_dirty(bh) || buffer_locked(bh)) {
  717. list_add(&bh->b_assoc_buffers, &tmp);
  718. bh->b_assoc_map = mapping;
  719. if (buffer_dirty(bh)) {
  720. get_bh(bh);
  721. spin_unlock(lock);
  722. /*
  723. * Ensure any pending I/O completes so that
  724. * write_dirty_buffer() actually writes the
  725. * current contents - it is a noop if I/O is
  726. * still in flight on potentially older
  727. * contents.
  728. */
  729. write_dirty_buffer(bh, REQ_SYNC);
  730. /*
  731. * Kick off IO for the previous mapping. Note
  732. * that we will not run the very last mapping,
  733. * wait_on_buffer() will do that for us
  734. * through sync_buffer().
  735. */
  736. brelse(bh);
  737. spin_lock(lock);
  738. }
  739. }
  740. }
  741. spin_unlock(lock);
  742. blk_finish_plug(&plug);
  743. spin_lock(lock);
  744. while (!list_empty(&tmp)) {
  745. bh = BH_ENTRY(tmp.prev);
  746. get_bh(bh);
  747. mapping = bh->b_assoc_map;
  748. __remove_assoc_queue(bh);
  749. /* Avoid race with mark_buffer_dirty_inode() which does
  750. * a lockless check and we rely on seeing the dirty bit */
  751. smp_mb();
  752. if (buffer_dirty(bh)) {
  753. list_add(&bh->b_assoc_buffers,
  754. &mapping->i_private_list);
  755. bh->b_assoc_map = mapping;
  756. }
  757. spin_unlock(lock);
  758. wait_on_buffer(bh);
  759. if (!buffer_uptodate(bh))
  760. err = -EIO;
  761. brelse(bh);
  762. spin_lock(lock);
  763. }
  764. spin_unlock(lock);
  765. err2 = osync_buffers_list(lock, list);
  766. if (err)
  767. return err;
  768. else
  769. return err2;
  770. }
  771. /*
  772. * Invalidate any and all dirty buffers on a given inode. We are
  773. * probably unmounting the fs, but that doesn't mean we have already
  774. * done a sync(). Just drop the buffers from the inode list.
  775. *
  776. * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
  777. * assumes that all the buffers are against the blockdev. Not true
  778. * for reiserfs.
  779. */
  780. void invalidate_inode_buffers(struct inode *inode)
  781. {
  782. if (inode_has_buffers(inode)) {
  783. struct address_space *mapping = &inode->i_data;
  784. struct list_head *list = &mapping->i_private_list;
  785. struct address_space *buffer_mapping = mapping->i_private_data;
  786. spin_lock(&buffer_mapping->i_private_lock);
  787. while (!list_empty(list))
  788. __remove_assoc_queue(BH_ENTRY(list->next));
  789. spin_unlock(&buffer_mapping->i_private_lock);
  790. }
  791. }
  792. EXPORT_SYMBOL(invalidate_inode_buffers);
  793. /*
  794. * Remove any clean buffers from the inode's buffer list. This is called
  795. * when we're trying to free the inode itself. Those buffers can pin it.
  796. *
  797. * Returns true if all buffers were removed.
  798. */
  799. int remove_inode_buffers(struct inode *inode)
  800. {
  801. int ret = 1;
  802. if (inode_has_buffers(inode)) {
  803. struct address_space *mapping = &inode->i_data;
  804. struct list_head *list = &mapping->i_private_list;
  805. struct address_space *buffer_mapping = mapping->i_private_data;
  806. spin_lock(&buffer_mapping->i_private_lock);
  807. while (!list_empty(list)) {
  808. struct buffer_head *bh = BH_ENTRY(list->next);
  809. if (buffer_dirty(bh)) {
  810. ret = 0;
  811. break;
  812. }
  813. __remove_assoc_queue(bh);
  814. }
  815. spin_unlock(&buffer_mapping->i_private_lock);
  816. }
  817. return ret;
  818. }
  819. /*
  820. * Create the appropriate buffers when given a folio for data area and
  821. * the size of each buffer.. Use the bh->b_this_page linked list to
  822. * follow the buffers created. Return NULL if unable to create more
  823. * buffers.
  824. *
  825. * The retry flag is used to differentiate async IO (paging, swapping)
  826. * which may not fail from ordinary buffer allocations.
  827. */
  828. struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
  829. gfp_t gfp)
  830. {
  831. struct buffer_head *bh, *head;
  832. long offset;
  833. struct mem_cgroup *memcg, *old_memcg;
  834. /* The folio lock pins the memcg */
  835. memcg = folio_memcg(folio);
  836. old_memcg = set_active_memcg(memcg);
  837. head = NULL;
  838. offset = folio_size(folio);
  839. while ((offset -= size) >= 0) {
  840. bh = alloc_buffer_head(gfp);
  841. if (!bh)
  842. goto no_grow;
  843. bh->b_this_page = head;
  844. bh->b_blocknr = -1;
  845. head = bh;
  846. bh->b_size = size;
  847. /* Link the buffer to its folio */
  848. folio_set_bh(bh, folio, offset);
  849. }
  850. out:
  851. set_active_memcg(old_memcg);
  852. return head;
  853. /*
  854. * In case anything failed, we just free everything we got.
  855. */
  856. no_grow:
  857. if (head) {
  858. do {
  859. bh = head;
  860. head = head->b_this_page;
  861. free_buffer_head(bh);
  862. } while (head);
  863. }
  864. goto out;
  865. }
  866. EXPORT_SYMBOL_GPL(folio_alloc_buffers);
  867. struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
  868. {
  869. gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
  870. return folio_alloc_buffers(page_folio(page), size, gfp);
  871. }
  872. EXPORT_SYMBOL_GPL(alloc_page_buffers);
  873. static inline void link_dev_buffers(struct folio *folio,
  874. struct buffer_head *head)
  875. {
  876. struct buffer_head *bh, *tail;
  877. bh = head;
  878. do {
  879. tail = bh;
  880. bh = bh->b_this_page;
  881. } while (bh);
  882. tail->b_this_page = head;
  883. folio_attach_private(folio, head);
  884. }
  885. static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
  886. {
  887. sector_t retval = ~((sector_t)0);
  888. loff_t sz = bdev_nr_bytes(bdev);
  889. if (sz) {
  890. unsigned int sizebits = blksize_bits(size);
  891. retval = (sz >> sizebits);
  892. }
  893. return retval;
  894. }
  895. /*
  896. * Initialise the state of a blockdev folio's buffers.
  897. */
  898. static sector_t folio_init_buffers(struct folio *folio,
  899. struct block_device *bdev, unsigned size)
  900. {
  901. struct buffer_head *head = folio_buffers(folio);
  902. struct buffer_head *bh = head;
  903. bool uptodate = folio_test_uptodate(folio);
  904. sector_t block = div_u64(folio_pos(folio), size);
  905. sector_t end_block = blkdev_max_block(bdev, size);
  906. do {
  907. if (!buffer_mapped(bh)) {
  908. bh->b_end_io = NULL;
  909. bh->b_private = NULL;
  910. bh->b_bdev = bdev;
  911. bh->b_blocknr = block;
  912. if (uptodate)
  913. set_buffer_uptodate(bh);
  914. if (block < end_block)
  915. set_buffer_mapped(bh);
  916. }
  917. block++;
  918. bh = bh->b_this_page;
  919. } while (bh != head);
  920. /*
  921. * Caller needs to validate requested block against end of device.
  922. */
  923. return end_block;
  924. }
  925. /*
  926. * Create the page-cache folio that contains the requested block.
  927. *
  928. * This is used purely for blockdev mappings.
  929. *
  930. * Returns false if we have a failure which cannot be cured by retrying
  931. * without sleeping. Returns true if we succeeded, or the caller should retry.
  932. */
  933. static bool grow_dev_folio(struct block_device *bdev, sector_t block,
  934. pgoff_t index, unsigned size, gfp_t gfp)
  935. {
  936. struct address_space *mapping = bdev->bd_mapping;
  937. struct folio *folio;
  938. struct buffer_head *bh;
  939. sector_t end_block = 0;
  940. folio = __filemap_get_folio(mapping, index,
  941. FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
  942. if (IS_ERR(folio))
  943. return false;
  944. bh = folio_buffers(folio);
  945. if (bh) {
  946. if (bh->b_size == size) {
  947. end_block = folio_init_buffers(folio, bdev, size);
  948. goto unlock;
  949. }
  950. /*
  951. * Retrying may succeed; for example the folio may finish
  952. * writeback, or buffers may be cleaned. This should not
  953. * happen very often; maybe we have old buffers attached to
  954. * this blockdev's page cache and we're trying to change
  955. * the block size?
  956. */
  957. if (!try_to_free_buffers(folio)) {
  958. end_block = ~0ULL;
  959. goto unlock;
  960. }
  961. }
  962. bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
  963. if (!bh)
  964. goto unlock;
  965. /*
  966. * Link the folio to the buffers and initialise them. Take the
  967. * lock to be atomic wrt __find_get_block(), which does not
  968. * run under the folio lock.
  969. */
  970. spin_lock(&mapping->i_private_lock);
  971. link_dev_buffers(folio, bh);
  972. end_block = folio_init_buffers(folio, bdev, size);
  973. spin_unlock(&mapping->i_private_lock);
  974. unlock:
  975. folio_unlock(folio);
  976. folio_put(folio);
  977. return block < end_block;
  978. }
  979. /*
  980. * Create buffers for the specified block device block's folio. If
  981. * that folio was dirty, the buffers are set dirty also. Returns false
  982. * if we've hit a permanent error.
  983. */
  984. static bool grow_buffers(struct block_device *bdev, sector_t block,
  985. unsigned size, gfp_t gfp)
  986. {
  987. loff_t pos;
  988. /*
  989. * Check for a block which lies outside our maximum possible
  990. * pagecache index.
  991. */
  992. if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
  993. printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
  994. __func__, (unsigned long long)block,
  995. bdev);
  996. return false;
  997. }
  998. /* Create a folio with the proper size buffers */
  999. return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
  1000. }
  1001. static struct buffer_head *
  1002. __getblk_slow(struct block_device *bdev, sector_t block,
  1003. unsigned size, gfp_t gfp)
  1004. {
  1005. /* Size must be multiple of hard sectorsize */
  1006. if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
  1007. (size < 512 || size > PAGE_SIZE))) {
  1008. printk(KERN_ERR "getblk(): invalid block size %d requested\n",
  1009. size);
  1010. printk(KERN_ERR "logical block size: %d\n",
  1011. bdev_logical_block_size(bdev));
  1012. dump_stack();
  1013. return NULL;
  1014. }
  1015. for (;;) {
  1016. struct buffer_head *bh;
  1017. bh = __find_get_block(bdev, block, size);
  1018. if (bh)
  1019. return bh;
  1020. if (!grow_buffers(bdev, block, size, gfp))
  1021. return NULL;
  1022. }
  1023. }
  1024. /*
  1025. * The relationship between dirty buffers and dirty pages:
  1026. *
  1027. * Whenever a page has any dirty buffers, the page's dirty bit is set, and
  1028. * the page is tagged dirty in the page cache.
  1029. *
  1030. * At all times, the dirtiness of the buffers represents the dirtiness of
  1031. * subsections of the page. If the page has buffers, the page dirty bit is
  1032. * merely a hint about the true dirty state.
  1033. *
  1034. * When a page is set dirty in its entirety, all its buffers are marked dirty
  1035. * (if the page has buffers).
  1036. *
  1037. * When a buffer is marked dirty, its page is dirtied, but the page's other
  1038. * buffers are not.
  1039. *
  1040. * Also. When blockdev buffers are explicitly read with bread(), they
  1041. * individually become uptodate. But their backing page remains not
  1042. * uptodate - even if all of its buffers are uptodate. A subsequent
  1043. * block_read_full_folio() against that folio will discover all the uptodate
  1044. * buffers, will set the folio uptodate and will perform no I/O.
  1045. */
  1046. /**
  1047. * mark_buffer_dirty - mark a buffer_head as needing writeout
  1048. * @bh: the buffer_head to mark dirty
  1049. *
  1050. * mark_buffer_dirty() will set the dirty bit against the buffer, then set
  1051. * its backing page dirty, then tag the page as dirty in the page cache
  1052. * and then attach the address_space's inode to its superblock's dirty
  1053. * inode list.
  1054. *
  1055. * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
  1056. * i_pages lock and mapping->host->i_lock.
  1057. */
  1058. void mark_buffer_dirty(struct buffer_head *bh)
  1059. {
  1060. WARN_ON_ONCE(!buffer_uptodate(bh));
  1061. trace_block_dirty_buffer(bh);
  1062. /*
  1063. * Very *carefully* optimize the it-is-already-dirty case.
  1064. *
  1065. * Don't let the final "is it dirty" escape to before we
  1066. * perhaps modified the buffer.
  1067. */
  1068. if (buffer_dirty(bh)) {
  1069. smp_mb();
  1070. if (buffer_dirty(bh))
  1071. return;
  1072. }
  1073. if (!test_set_buffer_dirty(bh)) {
  1074. struct folio *folio = bh->b_folio;
  1075. struct address_space *mapping = NULL;
  1076. folio_memcg_lock(folio);
  1077. if (!folio_test_set_dirty(folio)) {
  1078. mapping = folio->mapping;
  1079. if (mapping)
  1080. __folio_mark_dirty(folio, mapping, 0);
  1081. }
  1082. folio_memcg_unlock(folio);
  1083. if (mapping)
  1084. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  1085. }
  1086. }
  1087. EXPORT_SYMBOL(mark_buffer_dirty);
  1088. void mark_buffer_write_io_error(struct buffer_head *bh)
  1089. {
  1090. set_buffer_write_io_error(bh);
  1091. /* FIXME: do we need to set this in both places? */
  1092. if (bh->b_folio && bh->b_folio->mapping)
  1093. mapping_set_error(bh->b_folio->mapping, -EIO);
  1094. if (bh->b_assoc_map) {
  1095. mapping_set_error(bh->b_assoc_map, -EIO);
  1096. errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
  1097. }
  1098. }
  1099. EXPORT_SYMBOL(mark_buffer_write_io_error);
  1100. /**
  1101. * __brelse - Release a buffer.
  1102. * @bh: The buffer to release.
  1103. *
  1104. * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
  1105. */
  1106. void __brelse(struct buffer_head *bh)
  1107. {
  1108. if (atomic_read(&bh->b_count)) {
  1109. put_bh(bh);
  1110. return;
  1111. }
  1112. WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
  1113. }
  1114. EXPORT_SYMBOL(__brelse);
  1115. /**
  1116. * __bforget - Discard any dirty data in a buffer.
  1117. * @bh: The buffer to forget.
  1118. *
  1119. * This variant of bforget() can be called if @bh is guaranteed to not
  1120. * be NULL.
  1121. */
  1122. void __bforget(struct buffer_head *bh)
  1123. {
  1124. clear_buffer_dirty(bh);
  1125. if (bh->b_assoc_map) {
  1126. struct address_space *buffer_mapping = bh->b_folio->mapping;
  1127. spin_lock(&buffer_mapping->i_private_lock);
  1128. list_del_init(&bh->b_assoc_buffers);
  1129. bh->b_assoc_map = NULL;
  1130. spin_unlock(&buffer_mapping->i_private_lock);
  1131. }
  1132. __brelse(bh);
  1133. }
  1134. EXPORT_SYMBOL(__bforget);
  1135. static struct buffer_head *__bread_slow(struct buffer_head *bh)
  1136. {
  1137. lock_buffer(bh);
  1138. if (buffer_uptodate(bh)) {
  1139. unlock_buffer(bh);
  1140. return bh;
  1141. } else {
  1142. get_bh(bh);
  1143. bh->b_end_io = end_buffer_read_sync;
  1144. submit_bh(REQ_OP_READ, bh);
  1145. wait_on_buffer(bh);
  1146. if (buffer_uptodate(bh))
  1147. return bh;
  1148. }
  1149. brelse(bh);
  1150. return NULL;
  1151. }
  1152. /*
  1153. * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
  1154. * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
  1155. * refcount elevated by one when they're in an LRU. A buffer can only appear
  1156. * once in a particular CPU's LRU. A single buffer can be present in multiple
  1157. * CPU's LRUs at the same time.
  1158. *
  1159. * This is a transparent caching front-end to sb_bread(), sb_getblk() and
  1160. * sb_find_get_block().
  1161. *
  1162. * The LRUs themselves only need locking against invalidate_bh_lrus. We use
  1163. * a local interrupt disable for that.
  1164. */
  1165. #define BH_LRU_SIZE 16
  1166. struct bh_lru {
  1167. struct buffer_head *bhs[BH_LRU_SIZE];
  1168. };
  1169. static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
  1170. #ifdef CONFIG_SMP
  1171. #define bh_lru_lock() local_irq_disable()
  1172. #define bh_lru_unlock() local_irq_enable()
  1173. #else
  1174. #define bh_lru_lock() preempt_disable()
  1175. #define bh_lru_unlock() preempt_enable()
  1176. #endif
  1177. static inline void check_irqs_on(void)
  1178. {
  1179. #ifdef irqs_disabled
  1180. BUG_ON(irqs_disabled());
  1181. #endif
  1182. }
  1183. /*
  1184. * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
  1185. * inserted at the front, and the buffer_head at the back if any is evicted.
  1186. * Or, if already in the LRU it is moved to the front.
  1187. */
  1188. static void bh_lru_install(struct buffer_head *bh)
  1189. {
  1190. struct buffer_head *evictee = bh;
  1191. struct bh_lru *b;
  1192. int i;
  1193. check_irqs_on();
  1194. bh_lru_lock();
  1195. /*
  1196. * the refcount of buffer_head in bh_lru prevents dropping the
  1197. * attached page(i.e., try_to_free_buffers) so it could cause
  1198. * failing page migration.
  1199. * Skip putting upcoming bh into bh_lru until migration is done.
  1200. */
  1201. if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
  1202. bh_lru_unlock();
  1203. return;
  1204. }
  1205. b = this_cpu_ptr(&bh_lrus);
  1206. for (i = 0; i < BH_LRU_SIZE; i++) {
  1207. swap(evictee, b->bhs[i]);
  1208. if (evictee == bh) {
  1209. bh_lru_unlock();
  1210. return;
  1211. }
  1212. }
  1213. get_bh(bh);
  1214. bh_lru_unlock();
  1215. brelse(evictee);
  1216. }
  1217. /*
  1218. * Look up the bh in this cpu's LRU. If it's there, move it to the head.
  1219. */
  1220. static struct buffer_head *
  1221. lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
  1222. {
  1223. struct buffer_head *ret = NULL;
  1224. unsigned int i;
  1225. check_irqs_on();
  1226. bh_lru_lock();
  1227. if (cpu_is_isolated(smp_processor_id())) {
  1228. bh_lru_unlock();
  1229. return NULL;
  1230. }
  1231. for (i = 0; i < BH_LRU_SIZE; i++) {
  1232. struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
  1233. if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
  1234. bh->b_size == size) {
  1235. if (i) {
  1236. while (i) {
  1237. __this_cpu_write(bh_lrus.bhs[i],
  1238. __this_cpu_read(bh_lrus.bhs[i - 1]));
  1239. i--;
  1240. }
  1241. __this_cpu_write(bh_lrus.bhs[0], bh);
  1242. }
  1243. get_bh(bh);
  1244. ret = bh;
  1245. break;
  1246. }
  1247. }
  1248. bh_lru_unlock();
  1249. return ret;
  1250. }
  1251. /*
  1252. * Perform a pagecache lookup for the matching buffer. If it's there, refresh
  1253. * it in the LRU and mark it as accessed. If it is not present then return
  1254. * NULL
  1255. */
  1256. static struct buffer_head *
  1257. find_get_block_common(struct block_device *bdev, sector_t block,
  1258. unsigned size, bool atomic)
  1259. {
  1260. struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
  1261. if (bh == NULL) {
  1262. /* __find_get_block_slow will mark the page accessed */
  1263. bh = __find_get_block_slow(bdev, block, atomic);
  1264. if (bh)
  1265. bh_lru_install(bh);
  1266. } else
  1267. touch_buffer(bh);
  1268. return bh;
  1269. }
  1270. struct buffer_head *
  1271. __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
  1272. {
  1273. return find_get_block_common(bdev, block, size, true);
  1274. }
  1275. EXPORT_SYMBOL(__find_get_block);
  1276. /* same as __find_get_block() but allows sleeping contexts */
  1277. struct buffer_head *
  1278. __find_get_block_nonatomic(struct block_device *bdev, sector_t block,
  1279. unsigned size)
  1280. {
  1281. return find_get_block_common(bdev, block, size, false);
  1282. }
  1283. EXPORT_SYMBOL(__find_get_block_nonatomic);
  1284. /**
  1285. * bdev_getblk - Get a buffer_head in a block device's buffer cache.
  1286. * @bdev: The block device.
  1287. * @block: The block number.
  1288. * @size: The size of buffer_heads for this @bdev.
  1289. * @gfp: The memory allocation flags to use.
  1290. *
  1291. * The returned buffer head has its reference count incremented, but is
  1292. * not locked. The caller should call brelse() when it has finished
  1293. * with the buffer. The buffer may not be uptodate. If needed, the
  1294. * caller can bring it uptodate either by reading it or overwriting it.
  1295. *
  1296. * Return: The buffer head, or NULL if memory could not be allocated.
  1297. */
  1298. struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
  1299. unsigned size, gfp_t gfp)
  1300. {
  1301. struct buffer_head *bh;
  1302. if (gfpflags_allow_blocking(gfp))
  1303. bh = __find_get_block_nonatomic(bdev, block, size);
  1304. else
  1305. bh = __find_get_block(bdev, block, size);
  1306. might_alloc(gfp);
  1307. if (bh)
  1308. return bh;
  1309. return __getblk_slow(bdev, block, size, gfp);
  1310. }
  1311. EXPORT_SYMBOL(bdev_getblk);
  1312. /*
  1313. * Do async read-ahead on a buffer..
  1314. */
  1315. void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
  1316. {
  1317. struct buffer_head *bh = bdev_getblk(bdev, block, size,
  1318. GFP_NOWAIT | __GFP_MOVABLE);
  1319. if (likely(bh)) {
  1320. bh_readahead(bh, REQ_RAHEAD);
  1321. brelse(bh);
  1322. }
  1323. }
  1324. EXPORT_SYMBOL(__breadahead);
  1325. /**
  1326. * __bread_gfp() - Read a block.
  1327. * @bdev: The block device to read from.
  1328. * @block: Block number in units of block size.
  1329. * @size: The block size of this device in bytes.
  1330. * @gfp: Not page allocation flags; see below.
  1331. *
  1332. * You are not expected to call this function. You should use one of
  1333. * sb_bread(), sb_bread_unmovable() or __bread().
  1334. *
  1335. * Read a specified block, and return the buffer head that refers to it.
  1336. * If @gfp is 0, the memory will be allocated using the block device's
  1337. * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
  1338. * allocated from a movable area. Do not pass in a complete set of
  1339. * GFP flags.
  1340. *
  1341. * The returned buffer head has its refcount increased. The caller should
  1342. * call brelse() when it has finished with the buffer.
  1343. *
  1344. * Context: May sleep waiting for I/O.
  1345. * Return: NULL if the block was unreadable.
  1346. */
  1347. struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
  1348. unsigned size, gfp_t gfp)
  1349. {
  1350. struct buffer_head *bh;
  1351. gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
  1352. /*
  1353. * Prefer looping in the allocator rather than here, at least that
  1354. * code knows what it's doing.
  1355. */
  1356. gfp |= __GFP_NOFAIL;
  1357. bh = bdev_getblk(bdev, block, size, gfp);
  1358. if (likely(bh) && !buffer_uptodate(bh))
  1359. bh = __bread_slow(bh);
  1360. return bh;
  1361. }
  1362. EXPORT_SYMBOL(__bread_gfp);
  1363. static void __invalidate_bh_lrus(struct bh_lru *b)
  1364. {
  1365. int i;
  1366. for (i = 0; i < BH_LRU_SIZE; i++) {
  1367. brelse(b->bhs[i]);
  1368. b->bhs[i] = NULL;
  1369. }
  1370. }
  1371. /*
  1372. * invalidate_bh_lrus() is called rarely - but not only at unmount.
  1373. * This doesn't race because it runs in each cpu either in irq
  1374. * or with preempt disabled.
  1375. */
  1376. static void invalidate_bh_lru(void *arg)
  1377. {
  1378. struct bh_lru *b = &get_cpu_var(bh_lrus);
  1379. __invalidate_bh_lrus(b);
  1380. put_cpu_var(bh_lrus);
  1381. }
  1382. bool has_bh_in_lru(int cpu, void *dummy)
  1383. {
  1384. struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
  1385. int i;
  1386. for (i = 0; i < BH_LRU_SIZE; i++) {
  1387. if (b->bhs[i])
  1388. return true;
  1389. }
  1390. return false;
  1391. }
  1392. void invalidate_bh_lrus(void)
  1393. {
  1394. on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
  1395. }
  1396. EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
  1397. /*
  1398. * It's called from workqueue context so we need a bh_lru_lock to close
  1399. * the race with preemption/irq.
  1400. */
  1401. void invalidate_bh_lrus_cpu(void)
  1402. {
  1403. struct bh_lru *b;
  1404. bh_lru_lock();
  1405. b = this_cpu_ptr(&bh_lrus);
  1406. __invalidate_bh_lrus(b);
  1407. bh_lru_unlock();
  1408. }
  1409. void folio_set_bh(struct buffer_head *bh, struct folio *folio,
  1410. unsigned long offset)
  1411. {
  1412. bh->b_folio = folio;
  1413. BUG_ON(offset >= folio_size(folio));
  1414. if (folio_test_highmem(folio))
  1415. /*
  1416. * This catches illegal uses and preserves the offset:
  1417. */
  1418. bh->b_data = (char *)(0 + offset);
  1419. else
  1420. bh->b_data = folio_address(folio) + offset;
  1421. }
  1422. EXPORT_SYMBOL(folio_set_bh);
  1423. /*
  1424. * Called when truncating a buffer on a page completely.
  1425. */
  1426. /* Bits that are cleared during an invalidate */
  1427. #define BUFFER_FLAGS_DISCARD \
  1428. (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
  1429. 1 << BH_Delay | 1 << BH_Unwritten)
  1430. static void discard_buffer(struct buffer_head * bh)
  1431. {
  1432. unsigned long b_state;
  1433. lock_buffer(bh);
  1434. clear_buffer_dirty(bh);
  1435. bh->b_bdev = NULL;
  1436. b_state = READ_ONCE(bh->b_state);
  1437. do {
  1438. } while (!try_cmpxchg(&bh->b_state, &b_state,
  1439. b_state & ~BUFFER_FLAGS_DISCARD));
  1440. unlock_buffer(bh);
  1441. }
  1442. /**
  1443. * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
  1444. * @folio: The folio which is affected.
  1445. * @offset: start of the range to invalidate
  1446. * @length: length of the range to invalidate
  1447. *
  1448. * block_invalidate_folio() is called when all or part of the folio has been
  1449. * invalidated by a truncate operation.
  1450. *
  1451. * block_invalidate_folio() does not have to release all buffers, but it must
  1452. * ensure that no dirty buffer is left outside @offset and that no I/O
  1453. * is underway against any of the blocks which are outside the truncation
  1454. * point. Because the caller is about to free (and possibly reuse) those
  1455. * blocks on-disk.
  1456. */
  1457. void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
  1458. {
  1459. struct buffer_head *head, *bh, *next;
  1460. size_t curr_off = 0;
  1461. size_t stop = length + offset;
  1462. BUG_ON(!folio_test_locked(folio));
  1463. /*
  1464. * Check for overflow
  1465. */
  1466. BUG_ON(stop > folio_size(folio) || stop < length);
  1467. head = folio_buffers(folio);
  1468. if (!head)
  1469. return;
  1470. bh = head;
  1471. do {
  1472. size_t next_off = curr_off + bh->b_size;
  1473. next = bh->b_this_page;
  1474. /*
  1475. * Are we still fully in range ?
  1476. */
  1477. if (next_off > stop)
  1478. goto out;
  1479. /*
  1480. * is this block fully invalidated?
  1481. */
  1482. if (offset <= curr_off)
  1483. discard_buffer(bh);
  1484. curr_off = next_off;
  1485. bh = next;
  1486. } while (bh != head);
  1487. /*
  1488. * We release buffers only if the entire folio is being invalidated.
  1489. * The get_block cached value has been unconditionally invalidated,
  1490. * so real IO is not possible anymore.
  1491. */
  1492. if (length == folio_size(folio))
  1493. filemap_release_folio(folio, 0);
  1494. out:
  1495. return;
  1496. }
  1497. EXPORT_SYMBOL(block_invalidate_folio);
  1498. /*
  1499. * We attach and possibly dirty the buffers atomically wrt
  1500. * block_dirty_folio() via i_private_lock. try_to_free_buffers
  1501. * is already excluded via the folio lock.
  1502. */
  1503. struct buffer_head *create_empty_buffers(struct folio *folio,
  1504. unsigned long blocksize, unsigned long b_state)
  1505. {
  1506. struct buffer_head *bh, *head, *tail;
  1507. gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
  1508. head = folio_alloc_buffers(folio, blocksize, gfp);
  1509. bh = head;
  1510. do {
  1511. bh->b_state |= b_state;
  1512. tail = bh;
  1513. bh = bh->b_this_page;
  1514. } while (bh);
  1515. tail->b_this_page = head;
  1516. spin_lock(&folio->mapping->i_private_lock);
  1517. if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
  1518. bh = head;
  1519. do {
  1520. if (folio_test_dirty(folio))
  1521. set_buffer_dirty(bh);
  1522. if (folio_test_uptodate(folio))
  1523. set_buffer_uptodate(bh);
  1524. bh = bh->b_this_page;
  1525. } while (bh != head);
  1526. }
  1527. folio_attach_private(folio, head);
  1528. spin_unlock(&folio->mapping->i_private_lock);
  1529. return head;
  1530. }
  1531. EXPORT_SYMBOL(create_empty_buffers);
  1532. /**
  1533. * clean_bdev_aliases: clean a range of buffers in block device
  1534. * @bdev: Block device to clean buffers in
  1535. * @block: Start of a range of blocks to clean
  1536. * @len: Number of blocks to clean
  1537. *
  1538. * We are taking a range of blocks for data and we don't want writeback of any
  1539. * buffer-cache aliases starting from return from this function and until the
  1540. * moment when something will explicitly mark the buffer dirty (hopefully that
  1541. * will not happen until we will free that block ;-) We don't even need to mark
  1542. * it not-uptodate - nobody can expect anything from a newly allocated buffer
  1543. * anyway. We used to use unmap_buffer() for such invalidation, but that was
  1544. * wrong. We definitely don't want to mark the alias unmapped, for example - it
  1545. * would confuse anyone who might pick it with bread() afterwards...
  1546. *
  1547. * Also.. Note that bforget() doesn't lock the buffer. So there can be
  1548. * writeout I/O going on against recently-freed buffers. We don't wait on that
  1549. * I/O in bforget() - it's more efficient to wait on the I/O only if we really
  1550. * need to. That happens here.
  1551. */
  1552. void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
  1553. {
  1554. struct address_space *bd_mapping = bdev->bd_mapping;
  1555. const int blkbits = bd_mapping->host->i_blkbits;
  1556. struct folio_batch fbatch;
  1557. pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
  1558. pgoff_t end;
  1559. int i, count;
  1560. struct buffer_head *bh;
  1561. struct buffer_head *head;
  1562. end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
  1563. folio_batch_init(&fbatch);
  1564. while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
  1565. count = folio_batch_count(&fbatch);
  1566. for (i = 0; i < count; i++) {
  1567. struct folio *folio = fbatch.folios[i];
  1568. if (!folio_buffers(folio))
  1569. continue;
  1570. /*
  1571. * We use folio lock instead of bd_mapping->i_private_lock
  1572. * to pin buffers here since we can afford to sleep and
  1573. * it scales better than a global spinlock lock.
  1574. */
  1575. folio_lock(folio);
  1576. /* Recheck when the folio is locked which pins bhs */
  1577. head = folio_buffers(folio);
  1578. if (!head)
  1579. goto unlock_page;
  1580. bh = head;
  1581. do {
  1582. if (!buffer_mapped(bh) || (bh->b_blocknr < block))
  1583. goto next;
  1584. if (bh->b_blocknr >= block + len)
  1585. break;
  1586. clear_buffer_dirty(bh);
  1587. wait_on_buffer(bh);
  1588. clear_buffer_req(bh);
  1589. next:
  1590. bh = bh->b_this_page;
  1591. } while (bh != head);
  1592. unlock_page:
  1593. folio_unlock(folio);
  1594. }
  1595. folio_batch_release(&fbatch);
  1596. cond_resched();
  1597. /* End of range already reached? */
  1598. if (index > end || !index)
  1599. break;
  1600. }
  1601. }
  1602. EXPORT_SYMBOL(clean_bdev_aliases);
  1603. static struct buffer_head *folio_create_buffers(struct folio *folio,
  1604. struct inode *inode,
  1605. unsigned int b_state)
  1606. {
  1607. struct buffer_head *bh;
  1608. BUG_ON(!folio_test_locked(folio));
  1609. bh = folio_buffers(folio);
  1610. if (!bh)
  1611. bh = create_empty_buffers(folio,
  1612. 1 << READ_ONCE(inode->i_blkbits), b_state);
  1613. return bh;
  1614. }
  1615. /*
  1616. * NOTE! All mapped/uptodate combinations are valid:
  1617. *
  1618. * Mapped Uptodate Meaning
  1619. *
  1620. * No No "unknown" - must do get_block()
  1621. * No Yes "hole" - zero-filled
  1622. * Yes No "allocated" - allocated on disk, not read in
  1623. * Yes Yes "valid" - allocated and up-to-date in memory.
  1624. *
  1625. * "Dirty" is valid only with the last case (mapped+uptodate).
  1626. */
  1627. /*
  1628. * While block_write_full_folio is writing back the dirty buffers under
  1629. * the page lock, whoever dirtied the buffers may decide to clean them
  1630. * again at any time. We handle that by only looking at the buffer
  1631. * state inside lock_buffer().
  1632. *
  1633. * If block_write_full_folio() is called for regular writeback
  1634. * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
  1635. * locked buffer. This only can happen if someone has written the buffer
  1636. * directly, with submit_bh(). At the address_space level PageWriteback
  1637. * prevents this contention from occurring.
  1638. *
  1639. * If block_write_full_folio() is called with wbc->sync_mode ==
  1640. * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
  1641. * causes the writes to be flagged as synchronous writes.
  1642. */
  1643. int __block_write_full_folio(struct inode *inode, struct folio *folio,
  1644. get_block_t *get_block, struct writeback_control *wbc)
  1645. {
  1646. int err;
  1647. sector_t block;
  1648. sector_t last_block;
  1649. struct buffer_head *bh, *head;
  1650. size_t blocksize;
  1651. int nr_underway = 0;
  1652. blk_opf_t write_flags = wbc_to_write_flags(wbc);
  1653. head = folio_create_buffers(folio, inode,
  1654. (1 << BH_Dirty) | (1 << BH_Uptodate));
  1655. /*
  1656. * Be very careful. We have no exclusion from block_dirty_folio
  1657. * here, and the (potentially unmapped) buffers may become dirty at
  1658. * any time. If a buffer becomes dirty here after we've inspected it
  1659. * then we just miss that fact, and the folio stays dirty.
  1660. *
  1661. * Buffers outside i_size may be dirtied by block_dirty_folio;
  1662. * handle that here by just cleaning them.
  1663. */
  1664. bh = head;
  1665. blocksize = bh->b_size;
  1666. block = div_u64(folio_pos(folio), blocksize);
  1667. last_block = div_u64(i_size_read(inode) - 1, blocksize);
  1668. /*
  1669. * Get all the dirty buffers mapped to disk addresses and
  1670. * handle any aliases from the underlying blockdev's mapping.
  1671. */
  1672. do {
  1673. if (block > last_block) {
  1674. /*
  1675. * mapped buffers outside i_size will occur, because
  1676. * this folio can be outside i_size when there is a
  1677. * truncate in progress.
  1678. */
  1679. /*
  1680. * The buffer was zeroed by block_write_full_folio()
  1681. */
  1682. clear_buffer_dirty(bh);
  1683. set_buffer_uptodate(bh);
  1684. } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
  1685. buffer_dirty(bh)) {
  1686. WARN_ON(bh->b_size != blocksize);
  1687. err = get_block(inode, block, bh, 1);
  1688. if (err)
  1689. goto recover;
  1690. clear_buffer_delay(bh);
  1691. if (buffer_new(bh)) {
  1692. /* blockdev mappings never come here */
  1693. clear_buffer_new(bh);
  1694. clean_bdev_bh_alias(bh);
  1695. }
  1696. }
  1697. bh = bh->b_this_page;
  1698. block++;
  1699. } while (bh != head);
  1700. do {
  1701. if (!buffer_mapped(bh))
  1702. continue;
  1703. /*
  1704. * If it's a fully non-blocking write attempt and we cannot
  1705. * lock the buffer then redirty the folio. Note that this can
  1706. * potentially cause a busy-wait loop from writeback threads
  1707. * and kswapd activity, but those code paths have their own
  1708. * higher-level throttling.
  1709. */
  1710. if (wbc->sync_mode != WB_SYNC_NONE) {
  1711. lock_buffer(bh);
  1712. } else if (!trylock_buffer(bh)) {
  1713. folio_redirty_for_writepage(wbc, folio);
  1714. continue;
  1715. }
  1716. if (test_clear_buffer_dirty(bh)) {
  1717. mark_buffer_async_write_endio(bh,
  1718. end_buffer_async_write);
  1719. } else {
  1720. unlock_buffer(bh);
  1721. }
  1722. } while ((bh = bh->b_this_page) != head);
  1723. /*
  1724. * The folio and its buffers are protected by the writeback flag,
  1725. * so we can drop the bh refcounts early.
  1726. */
  1727. BUG_ON(folio_test_writeback(folio));
  1728. folio_start_writeback(folio);
  1729. do {
  1730. struct buffer_head *next = bh->b_this_page;
  1731. if (buffer_async_write(bh)) {
  1732. submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
  1733. inode->i_write_hint, wbc);
  1734. nr_underway++;
  1735. }
  1736. bh = next;
  1737. } while (bh != head);
  1738. folio_unlock(folio);
  1739. err = 0;
  1740. done:
  1741. if (nr_underway == 0) {
  1742. /*
  1743. * The folio was marked dirty, but the buffers were
  1744. * clean. Someone wrote them back by hand with
  1745. * write_dirty_buffer/submit_bh. A rare case.
  1746. */
  1747. folio_end_writeback(folio);
  1748. /*
  1749. * The folio and buffer_heads can be released at any time from
  1750. * here on.
  1751. */
  1752. }
  1753. return err;
  1754. recover:
  1755. /*
  1756. * ENOSPC, or some other error. We may already have added some
  1757. * blocks to the file, so we need to write these out to avoid
  1758. * exposing stale data.
  1759. * The folio is currently locked and not marked for writeback
  1760. */
  1761. bh = head;
  1762. /* Recovery: lock and submit the mapped buffers */
  1763. do {
  1764. if (buffer_mapped(bh) && buffer_dirty(bh) &&
  1765. !buffer_delay(bh)) {
  1766. lock_buffer(bh);
  1767. mark_buffer_async_write_endio(bh,
  1768. end_buffer_async_write);
  1769. } else {
  1770. /*
  1771. * The buffer may have been set dirty during
  1772. * attachment to a dirty folio.
  1773. */
  1774. clear_buffer_dirty(bh);
  1775. }
  1776. } while ((bh = bh->b_this_page) != head);
  1777. BUG_ON(folio_test_writeback(folio));
  1778. mapping_set_error(folio->mapping, err);
  1779. folio_start_writeback(folio);
  1780. do {
  1781. struct buffer_head *next = bh->b_this_page;
  1782. if (buffer_async_write(bh)) {
  1783. clear_buffer_dirty(bh);
  1784. submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
  1785. inode->i_write_hint, wbc);
  1786. nr_underway++;
  1787. }
  1788. bh = next;
  1789. } while (bh != head);
  1790. folio_unlock(folio);
  1791. goto done;
  1792. }
  1793. EXPORT_SYMBOL(__block_write_full_folio);
  1794. /*
  1795. * If a folio has any new buffers, zero them out here, and mark them uptodate
  1796. * and dirty so they'll be written out (in order to prevent uninitialised
  1797. * block data from leaking). And clear the new bit.
  1798. */
  1799. void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
  1800. {
  1801. size_t block_start, block_end;
  1802. struct buffer_head *head, *bh;
  1803. BUG_ON(!folio_test_locked(folio));
  1804. head = folio_buffers(folio);
  1805. if (!head)
  1806. return;
  1807. bh = head;
  1808. block_start = 0;
  1809. do {
  1810. block_end = block_start + bh->b_size;
  1811. if (buffer_new(bh)) {
  1812. if (block_end > from && block_start < to) {
  1813. if (!folio_test_uptodate(folio)) {
  1814. size_t start, xend;
  1815. start = max(from, block_start);
  1816. xend = min(to, block_end);
  1817. folio_zero_segment(folio, start, xend);
  1818. set_buffer_uptodate(bh);
  1819. }
  1820. clear_buffer_new(bh);
  1821. mark_buffer_dirty(bh);
  1822. }
  1823. }
  1824. block_start = block_end;
  1825. bh = bh->b_this_page;
  1826. } while (bh != head);
  1827. }
  1828. EXPORT_SYMBOL(folio_zero_new_buffers);
  1829. static int
  1830. iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
  1831. const struct iomap *iomap)
  1832. {
  1833. loff_t offset = (loff_t)block << inode->i_blkbits;
  1834. bh->b_bdev = iomap->bdev;
  1835. /*
  1836. * Block points to offset in file we need to map, iomap contains
  1837. * the offset at which the map starts. If the map ends before the
  1838. * current block, then do not map the buffer and let the caller
  1839. * handle it.
  1840. */
  1841. if (offset >= iomap->offset + iomap->length)
  1842. return -EIO;
  1843. switch (iomap->type) {
  1844. case IOMAP_HOLE:
  1845. /*
  1846. * If the buffer is not up to date or beyond the current EOF,
  1847. * we need to mark it as new to ensure sub-block zeroing is
  1848. * executed if necessary.
  1849. */
  1850. if (!buffer_uptodate(bh) ||
  1851. (offset >= i_size_read(inode)))
  1852. set_buffer_new(bh);
  1853. return 0;
  1854. case IOMAP_DELALLOC:
  1855. if (!buffer_uptodate(bh) ||
  1856. (offset >= i_size_read(inode)))
  1857. set_buffer_new(bh);
  1858. set_buffer_uptodate(bh);
  1859. set_buffer_mapped(bh);
  1860. set_buffer_delay(bh);
  1861. return 0;
  1862. case IOMAP_UNWRITTEN:
  1863. /*
  1864. * For unwritten regions, we always need to ensure that regions
  1865. * in the block we are not writing to are zeroed. Mark the
  1866. * buffer as new to ensure this.
  1867. */
  1868. set_buffer_new(bh);
  1869. set_buffer_unwritten(bh);
  1870. fallthrough;
  1871. case IOMAP_MAPPED:
  1872. if ((iomap->flags & IOMAP_F_NEW) ||
  1873. offset >= i_size_read(inode)) {
  1874. /*
  1875. * This can happen if truncating the block device races
  1876. * with the check in the caller as i_size updates on
  1877. * block devices aren't synchronized by i_rwsem for
  1878. * block devices.
  1879. */
  1880. if (S_ISBLK(inode->i_mode))
  1881. return -EIO;
  1882. set_buffer_new(bh);
  1883. }
  1884. bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
  1885. inode->i_blkbits;
  1886. set_buffer_mapped(bh);
  1887. return 0;
  1888. default:
  1889. WARN_ON_ONCE(1);
  1890. return -EIO;
  1891. }
  1892. }
  1893. int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
  1894. get_block_t *get_block, const struct iomap *iomap)
  1895. {
  1896. size_t from = offset_in_folio(folio, pos);
  1897. size_t to = from + len;
  1898. struct inode *inode = folio->mapping->host;
  1899. size_t block_start, block_end;
  1900. sector_t block;
  1901. int err = 0;
  1902. size_t blocksize;
  1903. struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
  1904. BUG_ON(!folio_test_locked(folio));
  1905. BUG_ON(to > folio_size(folio));
  1906. BUG_ON(from > to);
  1907. head = folio_create_buffers(folio, inode, 0);
  1908. blocksize = head->b_size;
  1909. block = div_u64(folio_pos(folio), blocksize);
  1910. for (bh = head, block_start = 0; bh != head || !block_start;
  1911. block++, block_start=block_end, bh = bh->b_this_page) {
  1912. block_end = block_start + blocksize;
  1913. if (block_end <= from || block_start >= to) {
  1914. if (folio_test_uptodate(folio)) {
  1915. if (!buffer_uptodate(bh))
  1916. set_buffer_uptodate(bh);
  1917. }
  1918. continue;
  1919. }
  1920. if (buffer_new(bh))
  1921. clear_buffer_new(bh);
  1922. if (!buffer_mapped(bh)) {
  1923. WARN_ON(bh->b_size != blocksize);
  1924. if (get_block)
  1925. err = get_block(inode, block, bh, 1);
  1926. else
  1927. err = iomap_to_bh(inode, block, bh, iomap);
  1928. if (err)
  1929. break;
  1930. if (buffer_new(bh)) {
  1931. clean_bdev_bh_alias(bh);
  1932. if (folio_test_uptodate(folio)) {
  1933. clear_buffer_new(bh);
  1934. set_buffer_uptodate(bh);
  1935. mark_buffer_dirty(bh);
  1936. continue;
  1937. }
  1938. if (block_end > to || block_start < from)
  1939. folio_zero_segments(folio,
  1940. to, block_end,
  1941. block_start, from);
  1942. continue;
  1943. }
  1944. }
  1945. if (folio_test_uptodate(folio)) {
  1946. if (!buffer_uptodate(bh))
  1947. set_buffer_uptodate(bh);
  1948. continue;
  1949. }
  1950. if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
  1951. !buffer_unwritten(bh) &&
  1952. (block_start < from || block_end > to)) {
  1953. bh_read_nowait(bh, 0);
  1954. *wait_bh++=bh;
  1955. }
  1956. }
  1957. /*
  1958. * If we issued read requests - let them complete.
  1959. */
  1960. while(wait_bh > wait) {
  1961. wait_on_buffer(*--wait_bh);
  1962. if (!buffer_uptodate(*wait_bh))
  1963. err = -EIO;
  1964. }
  1965. if (unlikely(err))
  1966. folio_zero_new_buffers(folio, from, to);
  1967. return err;
  1968. }
  1969. int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
  1970. get_block_t *get_block)
  1971. {
  1972. return __block_write_begin_int(folio, pos, len, get_block, NULL);
  1973. }
  1974. EXPORT_SYMBOL(__block_write_begin);
  1975. static void __block_commit_write(struct folio *folio, size_t from, size_t to)
  1976. {
  1977. size_t block_start, block_end;
  1978. bool partial = false;
  1979. unsigned blocksize;
  1980. struct buffer_head *bh, *head;
  1981. bh = head = folio_buffers(folio);
  1982. if (!bh)
  1983. return;
  1984. blocksize = bh->b_size;
  1985. block_start = 0;
  1986. do {
  1987. block_end = block_start + blocksize;
  1988. if (block_end <= from || block_start >= to) {
  1989. if (!buffer_uptodate(bh))
  1990. partial = true;
  1991. } else {
  1992. set_buffer_uptodate(bh);
  1993. mark_buffer_dirty(bh);
  1994. }
  1995. if (buffer_new(bh))
  1996. clear_buffer_new(bh);
  1997. block_start = block_end;
  1998. bh = bh->b_this_page;
  1999. } while (bh != head);
  2000. /*
  2001. * If this is a partial write which happened to make all buffers
  2002. * uptodate then we can optimize away a bogus read_folio() for
  2003. * the next read(). Here we 'discover' whether the folio went
  2004. * uptodate as a result of this (potentially partial) write.
  2005. */
  2006. if (!partial)
  2007. folio_mark_uptodate(folio);
  2008. }
  2009. /*
  2010. * block_write_begin takes care of the basic task of block allocation and
  2011. * bringing partial write blocks uptodate first.
  2012. *
  2013. * The filesystem needs to handle block truncation upon failure.
  2014. */
  2015. int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  2016. struct folio **foliop, get_block_t *get_block)
  2017. {
  2018. pgoff_t index = pos >> PAGE_SHIFT;
  2019. struct folio *folio;
  2020. int status;
  2021. folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
  2022. mapping_gfp_mask(mapping));
  2023. if (IS_ERR(folio))
  2024. return PTR_ERR(folio);
  2025. status = __block_write_begin_int(folio, pos, len, get_block, NULL);
  2026. if (unlikely(status)) {
  2027. folio_unlock(folio);
  2028. folio_put(folio);
  2029. folio = NULL;
  2030. }
  2031. *foliop = folio;
  2032. return status;
  2033. }
  2034. EXPORT_SYMBOL(block_write_begin);
  2035. int block_write_end(struct file *file, struct address_space *mapping,
  2036. loff_t pos, unsigned len, unsigned copied,
  2037. struct folio *folio, void *fsdata)
  2038. {
  2039. size_t start = pos - folio_pos(folio);
  2040. if (unlikely(copied < len)) {
  2041. /*
  2042. * The buffers that were written will now be uptodate, so
  2043. * we don't have to worry about a read_folio reading them
  2044. * and overwriting a partial write. However if we have
  2045. * encountered a short write and only partially written
  2046. * into a buffer, it will not be marked uptodate, so a
  2047. * read_folio might come in and destroy our partial write.
  2048. *
  2049. * Do the simplest thing, and just treat any short write to a
  2050. * non uptodate folio as a zero-length write, and force the
  2051. * caller to redo the whole thing.
  2052. */
  2053. if (!folio_test_uptodate(folio))
  2054. copied = 0;
  2055. folio_zero_new_buffers(folio, start+copied, start+len);
  2056. }
  2057. flush_dcache_folio(folio);
  2058. /* This could be a short (even 0-length) commit */
  2059. __block_commit_write(folio, start, start + copied);
  2060. return copied;
  2061. }
  2062. EXPORT_SYMBOL(block_write_end);
  2063. int generic_write_end(struct file *file, struct address_space *mapping,
  2064. loff_t pos, unsigned len, unsigned copied,
  2065. struct folio *folio, void *fsdata)
  2066. {
  2067. struct inode *inode = mapping->host;
  2068. loff_t old_size = inode->i_size;
  2069. bool i_size_changed = false;
  2070. copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
  2071. /*
  2072. * No need to use i_size_read() here, the i_size cannot change under us
  2073. * because we hold i_rwsem.
  2074. *
  2075. * But it's important to update i_size while still holding folio lock:
  2076. * page writeout could otherwise come in and zero beyond i_size.
  2077. */
  2078. if (pos + copied > inode->i_size) {
  2079. i_size_write(inode, pos + copied);
  2080. i_size_changed = true;
  2081. }
  2082. folio_unlock(folio);
  2083. folio_put(folio);
  2084. if (old_size < pos)
  2085. pagecache_isize_extended(inode, old_size, pos);
  2086. /*
  2087. * Don't mark the inode dirty under page lock. First, it unnecessarily
  2088. * makes the holding time of page lock longer. Second, it forces lock
  2089. * ordering of page lock and transaction start for journaling
  2090. * filesystems.
  2091. */
  2092. if (i_size_changed)
  2093. mark_inode_dirty(inode);
  2094. return copied;
  2095. }
  2096. EXPORT_SYMBOL(generic_write_end);
  2097. /*
  2098. * block_is_partially_uptodate checks whether buffers within a folio are
  2099. * uptodate or not.
  2100. *
  2101. * Returns true if all buffers which correspond to the specified part
  2102. * of the folio are uptodate.
  2103. */
  2104. bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
  2105. {
  2106. unsigned block_start, block_end, blocksize;
  2107. unsigned to;
  2108. struct buffer_head *bh, *head;
  2109. bool ret = true;
  2110. head = folio_buffers(folio);
  2111. if (!head)
  2112. return false;
  2113. blocksize = head->b_size;
  2114. to = min_t(unsigned, folio_size(folio) - from, count);
  2115. to = from + to;
  2116. if (from < blocksize && to > folio_size(folio) - blocksize)
  2117. return false;
  2118. bh = head;
  2119. block_start = 0;
  2120. do {
  2121. block_end = block_start + blocksize;
  2122. if (block_end > from && block_start < to) {
  2123. if (!buffer_uptodate(bh)) {
  2124. ret = false;
  2125. break;
  2126. }
  2127. if (block_end >= to)
  2128. break;
  2129. }
  2130. block_start = block_end;
  2131. bh = bh->b_this_page;
  2132. } while (bh != head);
  2133. return ret;
  2134. }
  2135. EXPORT_SYMBOL(block_is_partially_uptodate);
  2136. /*
  2137. * Generic "read_folio" function for block devices that have the normal
  2138. * get_block functionality. This is most of the block device filesystems.
  2139. * Reads the folio asynchronously --- the unlock_buffer() and
  2140. * set/clear_buffer_uptodate() functions propagate buffer state into the
  2141. * folio once IO has completed.
  2142. */
  2143. int block_read_full_folio(struct folio *folio, get_block_t *get_block)
  2144. {
  2145. struct inode *inode = folio->mapping->host;
  2146. sector_t iblock, lblock;
  2147. struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
  2148. size_t blocksize;
  2149. int nr, i;
  2150. int fully_mapped = 1;
  2151. bool page_error = false;
  2152. loff_t limit = i_size_read(inode);
  2153. /* This is needed for ext4. */
  2154. if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
  2155. limit = inode->i_sb->s_maxbytes;
  2156. VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
  2157. head = folio_create_buffers(folio, inode, 0);
  2158. blocksize = head->b_size;
  2159. iblock = div_u64(folio_pos(folio), blocksize);
  2160. lblock = div_u64(limit + blocksize - 1, blocksize);
  2161. bh = head;
  2162. nr = 0;
  2163. i = 0;
  2164. do {
  2165. if (buffer_uptodate(bh))
  2166. continue;
  2167. if (!buffer_mapped(bh)) {
  2168. int err = 0;
  2169. fully_mapped = 0;
  2170. if (iblock < lblock) {
  2171. WARN_ON(bh->b_size != blocksize);
  2172. err = get_block(inode, iblock, bh, 0);
  2173. if (err)
  2174. page_error = true;
  2175. }
  2176. if (!buffer_mapped(bh)) {
  2177. folio_zero_range(folio, i * blocksize,
  2178. blocksize);
  2179. if (!err)
  2180. set_buffer_uptodate(bh);
  2181. continue;
  2182. }
  2183. /*
  2184. * get_block() might have updated the buffer
  2185. * synchronously
  2186. */
  2187. if (buffer_uptodate(bh))
  2188. continue;
  2189. }
  2190. arr[nr++] = bh;
  2191. } while (i++, iblock++, (bh = bh->b_this_page) != head);
  2192. if (fully_mapped)
  2193. folio_set_mappedtodisk(folio);
  2194. if (!nr) {
  2195. /*
  2196. * All buffers are uptodate or get_block() returned an
  2197. * error when trying to map them - we can finish the read.
  2198. */
  2199. folio_end_read(folio, !page_error);
  2200. return 0;
  2201. }
  2202. /* Stage two: lock the buffers */
  2203. for (i = 0; i < nr; i++) {
  2204. bh = arr[i];
  2205. lock_buffer(bh);
  2206. mark_buffer_async_read(bh);
  2207. }
  2208. /*
  2209. * Stage 3: start the IO. Check for uptodateness
  2210. * inside the buffer lock in case another process reading
  2211. * the underlying blockdev brought it uptodate (the sct fix).
  2212. */
  2213. for (i = 0; i < nr; i++) {
  2214. bh = arr[i];
  2215. if (buffer_uptodate(bh))
  2216. end_buffer_async_read(bh, 1);
  2217. else
  2218. submit_bh(REQ_OP_READ, bh);
  2219. }
  2220. return 0;
  2221. }
  2222. EXPORT_SYMBOL(block_read_full_folio);
  2223. /* utility function for filesystems that need to do work on expanding
  2224. * truncates. Uses filesystem pagecache writes to allow the filesystem to
  2225. * deal with the hole.
  2226. */
  2227. int generic_cont_expand_simple(struct inode *inode, loff_t size)
  2228. {
  2229. struct address_space *mapping = inode->i_mapping;
  2230. const struct address_space_operations *aops = mapping->a_ops;
  2231. struct folio *folio;
  2232. void *fsdata = NULL;
  2233. int err;
  2234. err = inode_newsize_ok(inode, size);
  2235. if (err)
  2236. goto out;
  2237. err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
  2238. if (err)
  2239. goto out;
  2240. err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
  2241. BUG_ON(err > 0);
  2242. out:
  2243. return err;
  2244. }
  2245. EXPORT_SYMBOL(generic_cont_expand_simple);
  2246. static int cont_expand_zero(struct file *file, struct address_space *mapping,
  2247. loff_t pos, loff_t *bytes)
  2248. {
  2249. struct inode *inode = mapping->host;
  2250. const struct address_space_operations *aops = mapping->a_ops;
  2251. unsigned int blocksize = i_blocksize(inode);
  2252. struct folio *folio;
  2253. void *fsdata = NULL;
  2254. pgoff_t index, curidx;
  2255. loff_t curpos;
  2256. unsigned zerofrom, offset, len;
  2257. int err = 0;
  2258. index = pos >> PAGE_SHIFT;
  2259. offset = pos & ~PAGE_MASK;
  2260. while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
  2261. zerofrom = curpos & ~PAGE_MASK;
  2262. if (zerofrom & (blocksize-1)) {
  2263. *bytes |= (blocksize-1);
  2264. (*bytes)++;
  2265. }
  2266. len = PAGE_SIZE - zerofrom;
  2267. err = aops->write_begin(file, mapping, curpos, len,
  2268. &folio, &fsdata);
  2269. if (err)
  2270. goto out;
  2271. folio_zero_range(folio, offset_in_folio(folio, curpos), len);
  2272. err = aops->write_end(file, mapping, curpos, len, len,
  2273. folio, fsdata);
  2274. if (err < 0)
  2275. goto out;
  2276. BUG_ON(err != len);
  2277. err = 0;
  2278. balance_dirty_pages_ratelimited(mapping);
  2279. if (fatal_signal_pending(current)) {
  2280. err = -EINTR;
  2281. goto out;
  2282. }
  2283. }
  2284. /* page covers the boundary, find the boundary offset */
  2285. if (index == curidx) {
  2286. zerofrom = curpos & ~PAGE_MASK;
  2287. /* if we will expand the thing last block will be filled */
  2288. if (offset <= zerofrom) {
  2289. goto out;
  2290. }
  2291. if (zerofrom & (blocksize-1)) {
  2292. *bytes |= (blocksize-1);
  2293. (*bytes)++;
  2294. }
  2295. len = offset - zerofrom;
  2296. err = aops->write_begin(file, mapping, curpos, len,
  2297. &folio, &fsdata);
  2298. if (err)
  2299. goto out;
  2300. folio_zero_range(folio, offset_in_folio(folio, curpos), len);
  2301. err = aops->write_end(file, mapping, curpos, len, len,
  2302. folio, fsdata);
  2303. if (err < 0)
  2304. goto out;
  2305. BUG_ON(err != len);
  2306. err = 0;
  2307. }
  2308. out:
  2309. return err;
  2310. }
  2311. /*
  2312. * For moronic filesystems that do not allow holes in file.
  2313. * We may have to extend the file.
  2314. */
  2315. int cont_write_begin(struct file *file, struct address_space *mapping,
  2316. loff_t pos, unsigned len,
  2317. struct folio **foliop, void **fsdata,
  2318. get_block_t *get_block, loff_t *bytes)
  2319. {
  2320. struct inode *inode = mapping->host;
  2321. unsigned int blocksize = i_blocksize(inode);
  2322. unsigned int zerofrom;
  2323. int err;
  2324. err = cont_expand_zero(file, mapping, pos, bytes);
  2325. if (err)
  2326. return err;
  2327. zerofrom = *bytes & ~PAGE_MASK;
  2328. if (pos+len > *bytes && zerofrom & (blocksize-1)) {
  2329. *bytes |= (blocksize-1);
  2330. (*bytes)++;
  2331. }
  2332. return block_write_begin(mapping, pos, len, foliop, get_block);
  2333. }
  2334. EXPORT_SYMBOL(cont_write_begin);
  2335. void block_commit_write(struct page *page, unsigned from, unsigned to)
  2336. {
  2337. struct folio *folio = page_folio(page);
  2338. __block_commit_write(folio, from, to);
  2339. }
  2340. EXPORT_SYMBOL(block_commit_write);
  2341. /*
  2342. * block_page_mkwrite() is not allowed to change the file size as it gets
  2343. * called from a page fault handler when a page is first dirtied. Hence we must
  2344. * be careful to check for EOF conditions here. We set the page up correctly
  2345. * for a written page which means we get ENOSPC checking when writing into
  2346. * holes and correct delalloc and unwritten extent mapping on filesystems that
  2347. * support these features.
  2348. *
  2349. * We are not allowed to take the i_mutex here so we have to play games to
  2350. * protect against truncate races as the page could now be beyond EOF. Because
  2351. * truncate writes the inode size before removing pages, once we have the
  2352. * page lock we can determine safely if the page is beyond EOF. If it is not
  2353. * beyond EOF, then the page is guaranteed safe against truncation until we
  2354. * unlock the page.
  2355. *
  2356. * Direct callers of this function should protect against filesystem freezing
  2357. * using sb_start_pagefault() - sb_end_pagefault() functions.
  2358. */
  2359. int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
  2360. get_block_t get_block)
  2361. {
  2362. struct folio *folio = page_folio(vmf->page);
  2363. struct inode *inode = file_inode(vma->vm_file);
  2364. unsigned long end;
  2365. loff_t size;
  2366. int ret;
  2367. folio_lock(folio);
  2368. size = i_size_read(inode);
  2369. if ((folio->mapping != inode->i_mapping) ||
  2370. (folio_pos(folio) >= size)) {
  2371. /* We overload EFAULT to mean page got truncated */
  2372. ret = -EFAULT;
  2373. goto out_unlock;
  2374. }
  2375. end = folio_size(folio);
  2376. /* folio is wholly or partially inside EOF */
  2377. if (folio_pos(folio) + end > size)
  2378. end = size - folio_pos(folio);
  2379. ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
  2380. if (unlikely(ret))
  2381. goto out_unlock;
  2382. __block_commit_write(folio, 0, end);
  2383. folio_mark_dirty(folio);
  2384. folio_wait_stable(folio);
  2385. return 0;
  2386. out_unlock:
  2387. folio_unlock(folio);
  2388. return ret;
  2389. }
  2390. EXPORT_SYMBOL(block_page_mkwrite);
  2391. int block_truncate_page(struct address_space *mapping,
  2392. loff_t from, get_block_t *get_block)
  2393. {
  2394. pgoff_t index = from >> PAGE_SHIFT;
  2395. unsigned blocksize;
  2396. sector_t iblock;
  2397. size_t offset, length, pos;
  2398. struct inode *inode = mapping->host;
  2399. struct folio *folio;
  2400. struct buffer_head *bh;
  2401. int err = 0;
  2402. blocksize = i_blocksize(inode);
  2403. length = from & (blocksize - 1);
  2404. /* Block boundary? Nothing to do */
  2405. if (!length)
  2406. return 0;
  2407. length = blocksize - length;
  2408. iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
  2409. folio = filemap_grab_folio(mapping, index);
  2410. if (IS_ERR(folio))
  2411. return PTR_ERR(folio);
  2412. bh = folio_buffers(folio);
  2413. if (!bh)
  2414. bh = create_empty_buffers(folio, blocksize, 0);
  2415. /* Find the buffer that contains "offset" */
  2416. offset = offset_in_folio(folio, from);
  2417. pos = blocksize;
  2418. while (offset >= pos) {
  2419. bh = bh->b_this_page;
  2420. iblock++;
  2421. pos += blocksize;
  2422. }
  2423. if (!buffer_mapped(bh)) {
  2424. WARN_ON(bh->b_size != blocksize);
  2425. err = get_block(inode, iblock, bh, 0);
  2426. if (err)
  2427. goto unlock;
  2428. /* unmapped? It's a hole - nothing to do */
  2429. if (!buffer_mapped(bh))
  2430. goto unlock;
  2431. }
  2432. /* Ok, it's mapped. Make sure it's up-to-date */
  2433. if (folio_test_uptodate(folio))
  2434. set_buffer_uptodate(bh);
  2435. if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
  2436. err = bh_read(bh, 0);
  2437. /* Uhhuh. Read error. Complain and punt. */
  2438. if (err < 0)
  2439. goto unlock;
  2440. }
  2441. folio_zero_range(folio, offset, length);
  2442. mark_buffer_dirty(bh);
  2443. unlock:
  2444. folio_unlock(folio);
  2445. folio_put(folio);
  2446. return err;
  2447. }
  2448. EXPORT_SYMBOL(block_truncate_page);
  2449. /*
  2450. * The generic ->writepage function for buffer-backed address_spaces
  2451. */
  2452. int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
  2453. void *get_block)
  2454. {
  2455. struct inode * const inode = folio->mapping->host;
  2456. loff_t i_size = i_size_read(inode);
  2457. /* Is the folio fully inside i_size? */
  2458. if (folio_pos(folio) + folio_size(folio) <= i_size)
  2459. return __block_write_full_folio(inode, folio, get_block, wbc);
  2460. /* Is the folio fully outside i_size? (truncate in progress) */
  2461. if (folio_pos(folio) >= i_size) {
  2462. folio_unlock(folio);
  2463. return 0; /* don't care */
  2464. }
  2465. /*
  2466. * The folio straddles i_size. It must be zeroed out on each and every
  2467. * writepage invocation because it may be mmapped. "A file is mapped
  2468. * in multiples of the page size. For a file that is not a multiple of
  2469. * the page size, the remaining memory is zeroed when mapped, and
  2470. * writes to that region are not written out to the file."
  2471. */
  2472. folio_zero_segment(folio, offset_in_folio(folio, i_size),
  2473. folio_size(folio));
  2474. return __block_write_full_folio(inode, folio, get_block, wbc);
  2475. }
  2476. sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
  2477. get_block_t *get_block)
  2478. {
  2479. struct inode *inode = mapping->host;
  2480. struct buffer_head tmp = {
  2481. .b_size = i_blocksize(inode),
  2482. };
  2483. get_block(inode, block, &tmp, 0);
  2484. return tmp.b_blocknr;
  2485. }
  2486. EXPORT_SYMBOL(generic_block_bmap);
  2487. static void end_bio_bh_io_sync(struct bio *bio)
  2488. {
  2489. struct buffer_head *bh = bio->bi_private;
  2490. if (unlikely(bio_flagged(bio, BIO_QUIET)))
  2491. set_bit(BH_Quiet, &bh->b_state);
  2492. bh->b_end_io(bh, !bio->bi_status);
  2493. bio_put(bio);
  2494. }
  2495. static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
  2496. enum rw_hint write_hint,
  2497. struct writeback_control *wbc)
  2498. {
  2499. const enum req_op op = opf & REQ_OP_MASK;
  2500. struct bio *bio;
  2501. BUG_ON(!buffer_locked(bh));
  2502. BUG_ON(!buffer_mapped(bh));
  2503. BUG_ON(!bh->b_end_io);
  2504. BUG_ON(buffer_delay(bh));
  2505. BUG_ON(buffer_unwritten(bh));
  2506. /*
  2507. * Only clear out a write error when rewriting
  2508. */
  2509. if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
  2510. clear_buffer_write_io_error(bh);
  2511. if (buffer_meta(bh))
  2512. opf |= REQ_META;
  2513. if (buffer_prio(bh))
  2514. opf |= REQ_PRIO;
  2515. bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
  2516. fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
  2517. bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  2518. bio->bi_write_hint = write_hint;
  2519. bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
  2520. bio->bi_end_io = end_bio_bh_io_sync;
  2521. bio->bi_private = bh;
  2522. /* Take care of bh's that straddle the end of the device */
  2523. guard_bio_eod(bio);
  2524. if (wbc) {
  2525. wbc_init_bio(wbc, bio);
  2526. wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
  2527. }
  2528. submit_bio(bio);
  2529. }
  2530. void submit_bh(blk_opf_t opf, struct buffer_head *bh)
  2531. {
  2532. submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
  2533. }
  2534. EXPORT_SYMBOL(submit_bh);
  2535. void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
  2536. {
  2537. lock_buffer(bh);
  2538. if (!test_clear_buffer_dirty(bh)) {
  2539. unlock_buffer(bh);
  2540. return;
  2541. }
  2542. bh->b_end_io = end_buffer_write_sync;
  2543. get_bh(bh);
  2544. submit_bh(REQ_OP_WRITE | op_flags, bh);
  2545. }
  2546. EXPORT_SYMBOL(write_dirty_buffer);
  2547. /*
  2548. * For a data-integrity writeout, we need to wait upon any in-progress I/O
  2549. * and then start new I/O and then wait upon it. The caller must have a ref on
  2550. * the buffer_head.
  2551. */
  2552. int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
  2553. {
  2554. WARN_ON(atomic_read(&bh->b_count) < 1);
  2555. lock_buffer(bh);
  2556. if (test_clear_buffer_dirty(bh)) {
  2557. /*
  2558. * The bh should be mapped, but it might not be if the
  2559. * device was hot-removed. Not much we can do but fail the I/O.
  2560. */
  2561. if (!buffer_mapped(bh)) {
  2562. unlock_buffer(bh);
  2563. return -EIO;
  2564. }
  2565. get_bh(bh);
  2566. bh->b_end_io = end_buffer_write_sync;
  2567. submit_bh(REQ_OP_WRITE | op_flags, bh);
  2568. wait_on_buffer(bh);
  2569. if (!buffer_uptodate(bh))
  2570. return -EIO;
  2571. } else {
  2572. unlock_buffer(bh);
  2573. }
  2574. return 0;
  2575. }
  2576. EXPORT_SYMBOL(__sync_dirty_buffer);
  2577. int sync_dirty_buffer(struct buffer_head *bh)
  2578. {
  2579. return __sync_dirty_buffer(bh, REQ_SYNC);
  2580. }
  2581. EXPORT_SYMBOL(sync_dirty_buffer);
  2582. static inline int buffer_busy(struct buffer_head *bh)
  2583. {
  2584. return atomic_read(&bh->b_count) |
  2585. (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
  2586. }
  2587. static bool
  2588. drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
  2589. {
  2590. struct buffer_head *head = folio_buffers(folio);
  2591. struct buffer_head *bh;
  2592. bh = head;
  2593. do {
  2594. if (buffer_busy(bh))
  2595. goto failed;
  2596. bh = bh->b_this_page;
  2597. } while (bh != head);
  2598. do {
  2599. struct buffer_head *next = bh->b_this_page;
  2600. if (bh->b_assoc_map)
  2601. __remove_assoc_queue(bh);
  2602. bh = next;
  2603. } while (bh != head);
  2604. *buffers_to_free = head;
  2605. folio_detach_private(folio);
  2606. return true;
  2607. failed:
  2608. return false;
  2609. }
  2610. /**
  2611. * try_to_free_buffers - Release buffers attached to this folio.
  2612. * @folio: The folio.
  2613. *
  2614. * If any buffers are in use (dirty, under writeback, elevated refcount),
  2615. * no buffers will be freed.
  2616. *
  2617. * If the folio is dirty but all the buffers are clean then we need to
  2618. * be sure to mark the folio clean as well. This is because the folio
  2619. * may be against a block device, and a later reattachment of buffers
  2620. * to a dirty folio will set *all* buffers dirty. Which would corrupt
  2621. * filesystem data on the same device.
  2622. *
  2623. * The same applies to regular filesystem folios: if all the buffers are
  2624. * clean then we set the folio clean and proceed. To do that, we require
  2625. * total exclusion from block_dirty_folio(). That is obtained with
  2626. * i_private_lock.
  2627. *
  2628. * Exclusion against try_to_free_buffers may be obtained by either
  2629. * locking the folio or by holding its mapping's i_private_lock.
  2630. *
  2631. * Context: Process context. @folio must be locked. Will not sleep.
  2632. * Return: true if all buffers attached to this folio were freed.
  2633. */
  2634. bool try_to_free_buffers(struct folio *folio)
  2635. {
  2636. struct address_space * const mapping = folio->mapping;
  2637. struct buffer_head *buffers_to_free = NULL;
  2638. bool ret = 0;
  2639. BUG_ON(!folio_test_locked(folio));
  2640. if (folio_test_writeback(folio))
  2641. return false;
  2642. if (mapping == NULL) { /* can this still happen? */
  2643. ret = drop_buffers(folio, &buffers_to_free);
  2644. goto out;
  2645. }
  2646. spin_lock(&mapping->i_private_lock);
  2647. ret = drop_buffers(folio, &buffers_to_free);
  2648. /*
  2649. * If the filesystem writes its buffers by hand (eg ext3)
  2650. * then we can have clean buffers against a dirty folio. We
  2651. * clean the folio here; otherwise the VM will never notice
  2652. * that the filesystem did any IO at all.
  2653. *
  2654. * Also, during truncate, discard_buffer will have marked all
  2655. * the folio's buffers clean. We discover that here and clean
  2656. * the folio also.
  2657. *
  2658. * i_private_lock must be held over this entire operation in order
  2659. * to synchronise against block_dirty_folio and prevent the
  2660. * dirty bit from being lost.
  2661. */
  2662. if (ret)
  2663. folio_cancel_dirty(folio);
  2664. spin_unlock(&mapping->i_private_lock);
  2665. out:
  2666. if (buffers_to_free) {
  2667. struct buffer_head *bh = buffers_to_free;
  2668. do {
  2669. struct buffer_head *next = bh->b_this_page;
  2670. free_buffer_head(bh);
  2671. bh = next;
  2672. } while (bh != buffers_to_free);
  2673. }
  2674. return ret;
  2675. }
  2676. EXPORT_SYMBOL(try_to_free_buffers);
  2677. /*
  2678. * Buffer-head allocation
  2679. */
  2680. static struct kmem_cache *bh_cachep __ro_after_init;
  2681. /*
  2682. * Once the number of bh's in the machine exceeds this level, we start
  2683. * stripping them in writeback.
  2684. */
  2685. static unsigned long max_buffer_heads __ro_after_init;
  2686. int buffer_heads_over_limit;
  2687. struct bh_accounting {
  2688. int nr; /* Number of live bh's */
  2689. int ratelimit; /* Limit cacheline bouncing */
  2690. };
  2691. static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
  2692. static void recalc_bh_state(void)
  2693. {
  2694. int i;
  2695. int tot = 0;
  2696. if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
  2697. return;
  2698. __this_cpu_write(bh_accounting.ratelimit, 0);
  2699. for_each_online_cpu(i)
  2700. tot += per_cpu(bh_accounting, i).nr;
  2701. buffer_heads_over_limit = (tot > max_buffer_heads);
  2702. }
  2703. struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
  2704. {
  2705. struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
  2706. if (ret) {
  2707. INIT_LIST_HEAD(&ret->b_assoc_buffers);
  2708. spin_lock_init(&ret->b_uptodate_lock);
  2709. preempt_disable();
  2710. __this_cpu_inc(bh_accounting.nr);
  2711. recalc_bh_state();
  2712. preempt_enable();
  2713. }
  2714. return ret;
  2715. }
  2716. EXPORT_SYMBOL(alloc_buffer_head);
  2717. void free_buffer_head(struct buffer_head *bh)
  2718. {
  2719. BUG_ON(!list_empty(&bh->b_assoc_buffers));
  2720. kmem_cache_free(bh_cachep, bh);
  2721. preempt_disable();
  2722. __this_cpu_dec(bh_accounting.nr);
  2723. recalc_bh_state();
  2724. preempt_enable();
  2725. }
  2726. EXPORT_SYMBOL(free_buffer_head);
  2727. static int buffer_exit_cpu_dead(unsigned int cpu)
  2728. {
  2729. int i;
  2730. struct bh_lru *b = &per_cpu(bh_lrus, cpu);
  2731. for (i = 0; i < BH_LRU_SIZE; i++) {
  2732. brelse(b->bhs[i]);
  2733. b->bhs[i] = NULL;
  2734. }
  2735. this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
  2736. per_cpu(bh_accounting, cpu).nr = 0;
  2737. return 0;
  2738. }
  2739. /**
  2740. * bh_uptodate_or_lock - Test whether the buffer is uptodate
  2741. * @bh: struct buffer_head
  2742. *
  2743. * Return true if the buffer is up-to-date and false,
  2744. * with the buffer locked, if not.
  2745. */
  2746. int bh_uptodate_or_lock(struct buffer_head *bh)
  2747. {
  2748. if (!buffer_uptodate(bh)) {
  2749. lock_buffer(bh);
  2750. if (!buffer_uptodate(bh))
  2751. return 0;
  2752. unlock_buffer(bh);
  2753. }
  2754. return 1;
  2755. }
  2756. EXPORT_SYMBOL(bh_uptodate_or_lock);
  2757. /**
  2758. * __bh_read - Submit read for a locked buffer
  2759. * @bh: struct buffer_head
  2760. * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
  2761. * @wait: wait until reading finish
  2762. *
  2763. * Returns zero on success or don't wait, and -EIO on error.
  2764. */
  2765. int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
  2766. {
  2767. int ret = 0;
  2768. BUG_ON(!buffer_locked(bh));
  2769. get_bh(bh);
  2770. bh->b_end_io = end_buffer_read_sync;
  2771. submit_bh(REQ_OP_READ | op_flags, bh);
  2772. if (wait) {
  2773. wait_on_buffer(bh);
  2774. if (!buffer_uptodate(bh))
  2775. ret = -EIO;
  2776. }
  2777. return ret;
  2778. }
  2779. EXPORT_SYMBOL(__bh_read);
  2780. /**
  2781. * __bh_read_batch - Submit read for a batch of unlocked buffers
  2782. * @nr: entry number of the buffer batch
  2783. * @bhs: a batch of struct buffer_head
  2784. * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
  2785. * @force_lock: force to get a lock on the buffer if set, otherwise drops any
  2786. * buffer that cannot lock.
  2787. *
  2788. * Returns zero on success or don't wait, and -EIO on error.
  2789. */
  2790. void __bh_read_batch(int nr, struct buffer_head *bhs[],
  2791. blk_opf_t op_flags, bool force_lock)
  2792. {
  2793. int i;
  2794. for (i = 0; i < nr; i++) {
  2795. struct buffer_head *bh = bhs[i];
  2796. if (buffer_uptodate(bh))
  2797. continue;
  2798. if (force_lock)
  2799. lock_buffer(bh);
  2800. else
  2801. if (!trylock_buffer(bh))
  2802. continue;
  2803. if (buffer_uptodate(bh)) {
  2804. unlock_buffer(bh);
  2805. continue;
  2806. }
  2807. bh->b_end_io = end_buffer_read_sync;
  2808. get_bh(bh);
  2809. submit_bh(REQ_OP_READ | op_flags, bh);
  2810. }
  2811. }
  2812. EXPORT_SYMBOL(__bh_read_batch);
  2813. void __init buffer_init(void)
  2814. {
  2815. unsigned long nrpages;
  2816. int ret;
  2817. bh_cachep = KMEM_CACHE(buffer_head,
  2818. SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
  2819. /*
  2820. * Limit the bh occupancy to 10% of ZONE_NORMAL
  2821. */
  2822. nrpages = (nr_free_buffer_pages() * 10) / 100;
  2823. max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
  2824. ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
  2825. NULL, buffer_exit_cpu_dead);
  2826. WARN_ON(ret < 0);
  2827. }