| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * linux/fs/buffer.c
- *
- * Copyright (C) 1991, 1992, 2002 Linus Torvalds
- */
- /*
- * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
- *
- * Removed a lot of unnecessary code and simplified things now that
- * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
- *
- * Speed up hash, lru, and free list operations. Use gfp() for allocating
- * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
- *
- * Added 32k buffer block sizes - these are required older ARM systems. - RMK
- *
- * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
- */
- #include <linux/kernel.h>
- #include <linux/sched/signal.h>
- #include <linux/syscalls.h>
- #include <linux/fs.h>
- #include <linux/iomap.h>
- #include <linux/mm.h>
- #include <linux/percpu.h>
- #include <linux/slab.h>
- #include <linux/capability.h>
- #include <linux/blkdev.h>
- #include <linux/file.h>
- #include <linux/quotaops.h>
- #include <linux/highmem.h>
- #include <linux/export.h>
- #include <linux/backing-dev.h>
- #include <linux/writeback.h>
- #include <linux/hash.h>
- #include <linux/suspend.h>
- #include <linux/buffer_head.h>
- #include <linux/task_io_accounting_ops.h>
- #include <linux/bio.h>
- #include <linux/cpu.h>
- #include <linux/bitops.h>
- #include <linux/mpage.h>
- #include <linux/bit_spinlock.h>
- #include <linux/pagevec.h>
- #include <linux/sched/mm.h>
- #include <trace/events/block.h>
- #include <linux/fscrypt.h>
- #include <linux/fsverity.h>
- #include <linux/sched/isolation.h>
- #include "internal.h"
- static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
- static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
- enum rw_hint hint, struct writeback_control *wbc);
- #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
- inline void touch_buffer(struct buffer_head *bh)
- {
- trace_block_touch_buffer(bh);
- folio_mark_accessed(bh->b_folio);
- }
- EXPORT_SYMBOL(touch_buffer);
- void __lock_buffer(struct buffer_head *bh)
- {
- wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(__lock_buffer);
- void unlock_buffer(struct buffer_head *bh)
- {
- clear_bit_unlock(BH_Lock, &bh->b_state);
- smp_mb__after_atomic();
- wake_up_bit(&bh->b_state, BH_Lock);
- }
- EXPORT_SYMBOL(unlock_buffer);
- /*
- * Returns if the folio has dirty or writeback buffers. If all the buffers
- * are unlocked and clean then the folio_test_dirty information is stale. If
- * any of the buffers are locked, it is assumed they are locked for IO.
- */
- void buffer_check_dirty_writeback(struct folio *folio,
- bool *dirty, bool *writeback)
- {
- struct buffer_head *head, *bh;
- *dirty = false;
- *writeback = false;
- BUG_ON(!folio_test_locked(folio));
- head = folio_buffers(folio);
- if (!head)
- return;
- if (folio_test_writeback(folio))
- *writeback = true;
- bh = head;
- do {
- if (buffer_locked(bh))
- *writeback = true;
- if (buffer_dirty(bh))
- *dirty = true;
- bh = bh->b_this_page;
- } while (bh != head);
- }
- /*
- * Block until a buffer comes unlocked. This doesn't stop it
- * from becoming locked again - you have to lock it yourself
- * if you want to preserve its state.
- */
- void __wait_on_buffer(struct buffer_head * bh)
- {
- wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(__wait_on_buffer);
- static void buffer_io_error(struct buffer_head *bh, char *msg)
- {
- if (!test_bit(BH_Quiet, &bh->b_state))
- printk_ratelimited(KERN_ERR
- "Buffer I/O error on dev %pg, logical block %llu%s\n",
- bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
- }
- /*
- * End-of-IO handler helper function which does not touch the bh after
- * unlocking it.
- * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
- * a race there is benign: unlock_buffer() only use the bh's address for
- * hashing after unlocking the buffer, so it doesn't actually touch the bh
- * itself.
- */
- static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
- {
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- /* This happens, due to failed read-ahead attempts. */
- clear_buffer_uptodate(bh);
- }
- unlock_buffer(bh);
- }
- /*
- * Default synchronous end-of-IO handler.. Just mark it up-to-date and
- * unlock the buffer.
- */
- void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
- {
- put_bh(bh);
- __end_buffer_read_notouch(bh, uptodate);
- }
- EXPORT_SYMBOL(end_buffer_read_sync);
- void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
- {
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- buffer_io_error(bh, ", lost sync page write");
- mark_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- }
- unlock_buffer(bh);
- put_bh(bh);
- }
- EXPORT_SYMBOL(end_buffer_write_sync);
- static struct buffer_head *
- __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
- {
- struct address_space *bd_mapping = bdev->bd_mapping;
- const int blkbits = bd_mapping->host->i_blkbits;
- struct buffer_head *ret = NULL;
- pgoff_t index;
- struct buffer_head *bh;
- struct buffer_head *head;
- struct folio *folio;
- int all_mapped = 1;
- static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
- index = ((loff_t)block << blkbits) / PAGE_SIZE;
- folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
- if (IS_ERR(folio))
- goto out;
- /*
- * Folio lock protects the buffers. Callers that cannot block
- * will fallback to serializing vs try_to_free_buffers() via
- * the i_private_lock.
- */
- if (atomic)
- spin_lock(&bd_mapping->i_private_lock);
- else
- folio_lock(folio);
- head = folio_buffers(folio);
- if (!head)
- goto out_unlock;
- bh = head;
- do {
- if (!buffer_mapped(bh))
- all_mapped = 0;
- else if (bh->b_blocknr == block) {
- ret = bh;
- get_bh(bh);
- goto out_unlock;
- }
- bh = bh->b_this_page;
- } while (bh != head);
- /* we might be here because some of the buffers on this page are
- * not mapped. This is due to various races between
- * file io on the block device and getblk. It gets dealt with
- * elsewhere, don't buffer_error if we had some unmapped buffers
- */
- ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
- if (all_mapped && __ratelimit(&last_warned)) {
- printk("__find_get_block_slow() failed. block=%llu, "
- "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
- "device %pg blocksize: %d\n",
- (unsigned long long)block,
- (unsigned long long)bh->b_blocknr,
- bh->b_state, bh->b_size, bdev,
- 1 << blkbits);
- }
- out_unlock:
- if (atomic)
- spin_unlock(&bd_mapping->i_private_lock);
- else
- folio_unlock(folio);
- folio_put(folio);
- out:
- return ret;
- }
- static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
- {
- unsigned long flags;
- struct buffer_head *first;
- struct buffer_head *tmp;
- struct folio *folio;
- int folio_uptodate = 1;
- BUG_ON(!buffer_async_read(bh));
- folio = bh->b_folio;
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- clear_buffer_uptodate(bh);
- buffer_io_error(bh, ", async page read");
- }
- /*
- * Be _very_ careful from here on. Bad things can happen if
- * two buffer heads end IO at almost the same time and both
- * decide that the page is now completely done.
- */
- first = folio_buffers(folio);
- spin_lock_irqsave(&first->b_uptodate_lock, flags);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
- do {
- if (!buffer_uptodate(tmp))
- folio_uptodate = 0;
- if (buffer_async_read(tmp)) {
- BUG_ON(!buffer_locked(tmp));
- goto still_busy;
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
- spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- folio_end_read(folio, folio_uptodate);
- return;
- still_busy:
- spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
- }
- struct postprocess_bh_ctx {
- struct work_struct work;
- struct buffer_head *bh;
- };
- static void verify_bh(struct work_struct *work)
- {
- struct postprocess_bh_ctx *ctx =
- container_of(work, struct postprocess_bh_ctx, work);
- struct buffer_head *bh = ctx->bh;
- bool valid;
- valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
- end_buffer_async_read(bh, valid);
- kfree(ctx);
- }
- static bool need_fsverity(struct buffer_head *bh)
- {
- struct folio *folio = bh->b_folio;
- struct inode *inode = folio->mapping->host;
- return fsverity_active(inode) &&
- /* needed by ext4 */
- folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
- }
- static void decrypt_bh(struct work_struct *work)
- {
- struct postprocess_bh_ctx *ctx =
- container_of(work, struct postprocess_bh_ctx, work);
- struct buffer_head *bh = ctx->bh;
- int err;
- err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
- bh_offset(bh));
- if (err == 0 && need_fsverity(bh)) {
- /*
- * We use different work queues for decryption and for verity
- * because verity may require reading metadata pages that need
- * decryption, and we shouldn't recurse to the same workqueue.
- */
- INIT_WORK(&ctx->work, verify_bh);
- fsverity_enqueue_verify_work(&ctx->work);
- return;
- }
- end_buffer_async_read(bh, err == 0);
- kfree(ctx);
- }
- /*
- * I/O completion handler for block_read_full_folio() - pages
- * which come unlocked at the end of I/O.
- */
- static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
- {
- struct inode *inode = bh->b_folio->mapping->host;
- bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
- bool verify = need_fsverity(bh);
- /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
- if (uptodate && (decrypt || verify)) {
- struct postprocess_bh_ctx *ctx =
- kmalloc(sizeof(*ctx), GFP_ATOMIC);
- if (ctx) {
- ctx->bh = bh;
- if (decrypt) {
- INIT_WORK(&ctx->work, decrypt_bh);
- fscrypt_enqueue_decrypt_work(&ctx->work);
- } else {
- INIT_WORK(&ctx->work, verify_bh);
- fsverity_enqueue_verify_work(&ctx->work);
- }
- return;
- }
- uptodate = 0;
- }
- end_buffer_async_read(bh, uptodate);
- }
- /*
- * Completion handler for block_write_full_folio() - folios which are unlocked
- * during I/O, and which have the writeback flag cleared upon I/O completion.
- */
- static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
- {
- unsigned long flags;
- struct buffer_head *first;
- struct buffer_head *tmp;
- struct folio *folio;
- BUG_ON(!buffer_async_write(bh));
- folio = bh->b_folio;
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- buffer_io_error(bh, ", lost async page write");
- mark_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- }
- first = folio_buffers(folio);
- spin_lock_irqsave(&first->b_uptodate_lock, flags);
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- tmp = bh->b_this_page;
- while (tmp != bh) {
- if (buffer_async_write(tmp)) {
- BUG_ON(!buffer_locked(tmp));
- goto still_busy;
- }
- tmp = tmp->b_this_page;
- }
- spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- folio_end_writeback(folio);
- return;
- still_busy:
- spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
- return;
- }
- /*
- * If a page's buffers are under async readin (end_buffer_async_read
- * completion) then there is a possibility that another thread of
- * control could lock one of the buffers after it has completed
- * but while some of the other buffers have not completed. This
- * locked buffer would confuse end_buffer_async_read() into not unlocking
- * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
- * that this buffer is not under async I/O.
- *
- * The page comes unlocked when it has no locked buffer_async buffers
- * left.
- *
- * PageLocked prevents anyone starting new async I/O reads any of
- * the buffers.
- *
- * PageWriteback is used to prevent simultaneous writeout of the same
- * page.
- *
- * PageLocked prevents anyone from starting writeback of a page which is
- * under read I/O (PageWriteback is only ever set against a locked page).
- */
- static void mark_buffer_async_read(struct buffer_head *bh)
- {
- bh->b_end_io = end_buffer_async_read_io;
- set_buffer_async_read(bh);
- }
- static void mark_buffer_async_write_endio(struct buffer_head *bh,
- bh_end_io_t *handler)
- {
- bh->b_end_io = handler;
- set_buffer_async_write(bh);
- }
- void mark_buffer_async_write(struct buffer_head *bh)
- {
- mark_buffer_async_write_endio(bh, end_buffer_async_write);
- }
- EXPORT_SYMBOL(mark_buffer_async_write);
- /*
- * fs/buffer.c contains helper functions for buffer-backed address space's
- * fsync functions. A common requirement for buffer-based filesystems is
- * that certain data from the backing blockdev needs to be written out for
- * a successful fsync(). For example, ext2 indirect blocks need to be
- * written back and waited upon before fsync() returns.
- *
- * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
- * inode_has_buffers() and invalidate_inode_buffers() are provided for the
- * management of a list of dependent buffers at ->i_mapping->i_private_list.
- *
- * Locking is a little subtle: try_to_free_buffers() will remove buffers
- * from their controlling inode's queue when they are being freed. But
- * try_to_free_buffers() will be operating against the *blockdev* mapping
- * at the time, not against the S_ISREG file which depends on those buffers.
- * So the locking for i_private_list is via the i_private_lock in the address_space
- * which backs the buffers. Which is different from the address_space
- * against which the buffers are listed. So for a particular address_space,
- * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
- * mapping->i_private_list will always be protected by the backing blockdev's
- * ->i_private_lock.
- *
- * Which introduces a requirement: all buffers on an address_space's
- * ->i_private_list must be from the same address_space: the blockdev's.
- *
- * address_spaces which do not place buffers at ->i_private_list via these
- * utility functions are free to use i_private_lock and i_private_list for
- * whatever they want. The only requirement is that list_empty(i_private_list)
- * be true at clear_inode() time.
- *
- * FIXME: clear_inode should not call invalidate_inode_buffers(). The
- * filesystems should do that. invalidate_inode_buffers() should just go
- * BUG_ON(!list_empty).
- *
- * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
- * take an address_space, not an inode. And it should be called
- * mark_buffer_dirty_fsync() to clearly define why those buffers are being
- * queued up.
- *
- * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
- * list if it is already on a list. Because if the buffer is on a list,
- * it *must* already be on the right one. If not, the filesystem is being
- * silly. This will save a ton of locking. But first we have to ensure
- * that buffers are taken *off* the old inode's list when they are freed
- * (presumably in truncate). That requires careful auditing of all
- * filesystems (do it inside bforget()). It could also be done by bringing
- * b_inode back.
- */
- /*
- * The buffer's backing address_space's i_private_lock must be held
- */
- static void __remove_assoc_queue(struct buffer_head *bh)
- {
- list_del_init(&bh->b_assoc_buffers);
- WARN_ON(!bh->b_assoc_map);
- bh->b_assoc_map = NULL;
- }
- int inode_has_buffers(struct inode *inode)
- {
- return !list_empty(&inode->i_data.i_private_list);
- }
- /*
- * osync is designed to support O_SYNC io. It waits synchronously for
- * all already-submitted IO to complete, but does not queue any new
- * writes to the disk.
- *
- * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
- * as you dirty the buffers, and then use osync_inode_buffers to wait for
- * completion. Any other dirty buffers which are not yet queued for
- * write will not be flushed to disk by the osync.
- */
- static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
- {
- struct buffer_head *bh;
- struct list_head *p;
- int err = 0;
- spin_lock(lock);
- repeat:
- list_for_each_prev(p, list) {
- bh = BH_ENTRY(p);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(lock);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- err = -EIO;
- brelse(bh);
- spin_lock(lock);
- goto repeat;
- }
- }
- spin_unlock(lock);
- return err;
- }
- /**
- * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
- * @mapping: the mapping which wants those buffers written
- *
- * Starts I/O against the buffers at mapping->i_private_list, and waits upon
- * that I/O.
- *
- * Basically, this is a convenience function for fsync().
- * @mapping is a file or directory which needs those buffers to be written for
- * a successful fsync().
- */
- int sync_mapping_buffers(struct address_space *mapping)
- {
- struct address_space *buffer_mapping = mapping->i_private_data;
- if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
- return 0;
- return fsync_buffers_list(&buffer_mapping->i_private_lock,
- &mapping->i_private_list);
- }
- EXPORT_SYMBOL(sync_mapping_buffers);
- /**
- * generic_buffers_fsync_noflush - generic buffer fsync implementation
- * for simple filesystems with no inode lock
- *
- * @file: file to synchronize
- * @start: start offset in bytes
- * @end: end offset in bytes (inclusive)
- * @datasync: only synchronize essential metadata if true
- *
- * This is a generic implementation of the fsync method for simple
- * filesystems which track all non-inode metadata in the buffers list
- * hanging off the address_space structure.
- */
- int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
- bool datasync)
- {
- struct inode *inode = file->f_mapping->host;
- int err;
- int ret;
- err = file_write_and_wait_range(file, start, end);
- if (err)
- return err;
- ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
- goto out;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- goto out;
- err = sync_inode_metadata(inode, 1);
- if (ret == 0)
- ret = err;
- out:
- /* check and advance again to catch errors after syncing out buffers */
- err = file_check_and_advance_wb_err(file);
- if (ret == 0)
- ret = err;
- return ret;
- }
- EXPORT_SYMBOL(generic_buffers_fsync_noflush);
- /**
- * generic_buffers_fsync - generic buffer fsync implementation
- * for simple filesystems with no inode lock
- *
- * @file: file to synchronize
- * @start: start offset in bytes
- * @end: end offset in bytes (inclusive)
- * @datasync: only synchronize essential metadata if true
- *
- * This is a generic implementation of the fsync method for simple
- * filesystems which track all non-inode metadata in the buffers list
- * hanging off the address_space structure. This also makes sure that
- * a device cache flush operation is called at the end.
- */
- int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
- bool datasync)
- {
- struct inode *inode = file->f_mapping->host;
- int ret;
- ret = generic_buffers_fsync_noflush(file, start, end, datasync);
- if (!ret)
- ret = blkdev_issue_flush(inode->i_sb->s_bdev);
- return ret;
- }
- EXPORT_SYMBOL(generic_buffers_fsync);
- /*
- * Called when we've recently written block `bblock', and it is known that
- * `bblock' was for a buffer_boundary() buffer. This means that the block at
- * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
- * dirty, schedule it for IO. So that indirects merge nicely with their data.
- */
- void write_boundary_block(struct block_device *bdev,
- sector_t bblock, unsigned blocksize)
- {
- struct buffer_head *bh;
- bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
- if (bh) {
- if (buffer_dirty(bh))
- write_dirty_buffer(bh, 0);
- put_bh(bh);
- }
- }
- void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
- {
- struct address_space *mapping = inode->i_mapping;
- struct address_space *buffer_mapping = bh->b_folio->mapping;
- mark_buffer_dirty(bh);
- if (!mapping->i_private_data) {
- mapping->i_private_data = buffer_mapping;
- } else {
- BUG_ON(mapping->i_private_data != buffer_mapping);
- }
- if (!bh->b_assoc_map) {
- spin_lock(&buffer_mapping->i_private_lock);
- list_move_tail(&bh->b_assoc_buffers,
- &mapping->i_private_list);
- bh->b_assoc_map = mapping;
- spin_unlock(&buffer_mapping->i_private_lock);
- }
- }
- EXPORT_SYMBOL(mark_buffer_dirty_inode);
- /**
- * block_dirty_folio - Mark a folio as dirty.
- * @mapping: The address space containing this folio.
- * @folio: The folio to mark dirty.
- *
- * Filesystems which use buffer_heads can use this function as their
- * ->dirty_folio implementation. Some filesystems need to do a little
- * work before calling this function. Filesystems which do not use
- * buffer_heads should call filemap_dirty_folio() instead.
- *
- * If the folio has buffers, the uptodate buffers are set dirty, to
- * preserve dirty-state coherency between the folio and the buffers.
- * Buffers added to a dirty folio are created dirty.
- *
- * The buffers are dirtied before the folio is dirtied. There's a small
- * race window in which writeback may see the folio cleanness but not the
- * buffer dirtiness. That's fine. If this code were to set the folio
- * dirty before the buffers, writeback could clear the folio dirty flag,
- * see a bunch of clean buffers and we'd end up with dirty buffers/clean
- * folio on the dirty folio list.
- *
- * We use i_private_lock to lock against try_to_free_buffers() while
- * using the folio's buffer list. This also prevents clean buffers
- * being added to the folio after it was set dirty.
- *
- * Context: May only be called from process context. Does not sleep.
- * Caller must ensure that @folio cannot be truncated during this call,
- * typically by holding the folio lock or having a page in the folio
- * mapped and holding the page table lock.
- *
- * Return: True if the folio was dirtied; false if it was already dirtied.
- */
- bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
- {
- struct buffer_head *head;
- bool newly_dirty;
- spin_lock(&mapping->i_private_lock);
- head = folio_buffers(folio);
- if (head) {
- struct buffer_head *bh = head;
- do {
- set_buffer_dirty(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
- /*
- * Lock out page's memcg migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- folio_memcg_lock(folio);
- newly_dirty = !folio_test_set_dirty(folio);
- spin_unlock(&mapping->i_private_lock);
- if (newly_dirty)
- __folio_mark_dirty(folio, mapping, 1);
- folio_memcg_unlock(folio);
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
- }
- EXPORT_SYMBOL(block_dirty_folio);
- /*
- * Write out and wait upon a list of buffers.
- *
- * We have conflicting pressures: we want to make sure that all
- * initially dirty buffers get waited on, but that any subsequently
- * dirtied buffers don't. After all, we don't want fsync to last
- * forever if somebody is actively writing to the file.
- *
- * Do this in two main stages: first we copy dirty buffers to a
- * temporary inode list, queueing the writes as we go. Then we clean
- * up, waiting for those writes to complete.
- *
- * During this second stage, any subsequent updates to the file may end
- * up refiling the buffer on the original inode's dirty list again, so
- * there is a chance we will end up with a buffer queued for write but
- * not yet completed on that list. So, as a final cleanup we go through
- * the osync code to catch these locked, dirty buffers without requeuing
- * any newly dirty buffers for write.
- */
- static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
- {
- struct buffer_head *bh;
- struct address_space *mapping;
- int err = 0, err2;
- struct blk_plug plug;
- LIST_HEAD(tmp);
- blk_start_plug(&plug);
- spin_lock(lock);
- while (!list_empty(list)) {
- bh = BH_ENTRY(list->next);
- mapping = bh->b_assoc_map;
- __remove_assoc_queue(bh);
- /* Avoid race with mark_buffer_dirty_inode() which does
- * a lockless check and we rely on seeing the dirty bit */
- smp_mb();
- if (buffer_dirty(bh) || buffer_locked(bh)) {
- list_add(&bh->b_assoc_buffers, &tmp);
- bh->b_assoc_map = mapping;
- if (buffer_dirty(bh)) {
- get_bh(bh);
- spin_unlock(lock);
- /*
- * Ensure any pending I/O completes so that
- * write_dirty_buffer() actually writes the
- * current contents - it is a noop if I/O is
- * still in flight on potentially older
- * contents.
- */
- write_dirty_buffer(bh, REQ_SYNC);
- /*
- * Kick off IO for the previous mapping. Note
- * that we will not run the very last mapping,
- * wait_on_buffer() will do that for us
- * through sync_buffer().
- */
- brelse(bh);
- spin_lock(lock);
- }
- }
- }
- spin_unlock(lock);
- blk_finish_plug(&plug);
- spin_lock(lock);
- while (!list_empty(&tmp)) {
- bh = BH_ENTRY(tmp.prev);
- get_bh(bh);
- mapping = bh->b_assoc_map;
- __remove_assoc_queue(bh);
- /* Avoid race with mark_buffer_dirty_inode() which does
- * a lockless check and we rely on seeing the dirty bit */
- smp_mb();
- if (buffer_dirty(bh)) {
- list_add(&bh->b_assoc_buffers,
- &mapping->i_private_list);
- bh->b_assoc_map = mapping;
- }
- spin_unlock(lock);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- err = -EIO;
- brelse(bh);
- spin_lock(lock);
- }
-
- spin_unlock(lock);
- err2 = osync_buffers_list(lock, list);
- if (err)
- return err;
- else
- return err2;
- }
- /*
- * Invalidate any and all dirty buffers on a given inode. We are
- * probably unmounting the fs, but that doesn't mean we have already
- * done a sync(). Just drop the buffers from the inode list.
- *
- * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
- * assumes that all the buffers are against the blockdev. Not true
- * for reiserfs.
- */
- void invalidate_inode_buffers(struct inode *inode)
- {
- if (inode_has_buffers(inode)) {
- struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->i_private_list;
- struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->i_private_lock);
- while (!list_empty(list))
- __remove_assoc_queue(BH_ENTRY(list->next));
- spin_unlock(&buffer_mapping->i_private_lock);
- }
- }
- EXPORT_SYMBOL(invalidate_inode_buffers);
- /*
- * Remove any clean buffers from the inode's buffer list. This is called
- * when we're trying to free the inode itself. Those buffers can pin it.
- *
- * Returns true if all buffers were removed.
- */
- int remove_inode_buffers(struct inode *inode)
- {
- int ret = 1;
- if (inode_has_buffers(inode)) {
- struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->i_private_list;
- struct address_space *buffer_mapping = mapping->i_private_data;
- spin_lock(&buffer_mapping->i_private_lock);
- while (!list_empty(list)) {
- struct buffer_head *bh = BH_ENTRY(list->next);
- if (buffer_dirty(bh)) {
- ret = 0;
- break;
- }
- __remove_assoc_queue(bh);
- }
- spin_unlock(&buffer_mapping->i_private_lock);
- }
- return ret;
- }
- /*
- * Create the appropriate buffers when given a folio for data area and
- * the size of each buffer.. Use the bh->b_this_page linked list to
- * follow the buffers created. Return NULL if unable to create more
- * buffers.
- *
- * The retry flag is used to differentiate async IO (paging, swapping)
- * which may not fail from ordinary buffer allocations.
- */
- struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
- gfp_t gfp)
- {
- struct buffer_head *bh, *head;
- long offset;
- struct mem_cgroup *memcg, *old_memcg;
- /* The folio lock pins the memcg */
- memcg = folio_memcg(folio);
- old_memcg = set_active_memcg(memcg);
- head = NULL;
- offset = folio_size(folio);
- while ((offset -= size) >= 0) {
- bh = alloc_buffer_head(gfp);
- if (!bh)
- goto no_grow;
- bh->b_this_page = head;
- bh->b_blocknr = -1;
- head = bh;
- bh->b_size = size;
- /* Link the buffer to its folio */
- folio_set_bh(bh, folio, offset);
- }
- out:
- set_active_memcg(old_memcg);
- return head;
- /*
- * In case anything failed, we just free everything we got.
- */
- no_grow:
- if (head) {
- do {
- bh = head;
- head = head->b_this_page;
- free_buffer_head(bh);
- } while (head);
- }
- goto out;
- }
- EXPORT_SYMBOL_GPL(folio_alloc_buffers);
- struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
- {
- gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
- return folio_alloc_buffers(page_folio(page), size, gfp);
- }
- EXPORT_SYMBOL_GPL(alloc_page_buffers);
- static inline void link_dev_buffers(struct folio *folio,
- struct buffer_head *head)
- {
- struct buffer_head *bh, *tail;
- bh = head;
- do {
- tail = bh;
- bh = bh->b_this_page;
- } while (bh);
- tail->b_this_page = head;
- folio_attach_private(folio, head);
- }
- static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
- {
- sector_t retval = ~((sector_t)0);
- loff_t sz = bdev_nr_bytes(bdev);
- if (sz) {
- unsigned int sizebits = blksize_bits(size);
- retval = (sz >> sizebits);
- }
- return retval;
- }
- /*
- * Initialise the state of a blockdev folio's buffers.
- */
- static sector_t folio_init_buffers(struct folio *folio,
- struct block_device *bdev, unsigned size)
- {
- struct buffer_head *head = folio_buffers(folio);
- struct buffer_head *bh = head;
- bool uptodate = folio_test_uptodate(folio);
- sector_t block = div_u64(folio_pos(folio), size);
- sector_t end_block = blkdev_max_block(bdev, size);
- do {
- if (!buffer_mapped(bh)) {
- bh->b_end_io = NULL;
- bh->b_private = NULL;
- bh->b_bdev = bdev;
- bh->b_blocknr = block;
- if (uptodate)
- set_buffer_uptodate(bh);
- if (block < end_block)
- set_buffer_mapped(bh);
- }
- block++;
- bh = bh->b_this_page;
- } while (bh != head);
- /*
- * Caller needs to validate requested block against end of device.
- */
- return end_block;
- }
- /*
- * Create the page-cache folio that contains the requested block.
- *
- * This is used purely for blockdev mappings.
- *
- * Returns false if we have a failure which cannot be cured by retrying
- * without sleeping. Returns true if we succeeded, or the caller should retry.
- */
- static bool grow_dev_folio(struct block_device *bdev, sector_t block,
- pgoff_t index, unsigned size, gfp_t gfp)
- {
- struct address_space *mapping = bdev->bd_mapping;
- struct folio *folio;
- struct buffer_head *bh;
- sector_t end_block = 0;
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
- if (IS_ERR(folio))
- return false;
- bh = folio_buffers(folio);
- if (bh) {
- if (bh->b_size == size) {
- end_block = folio_init_buffers(folio, bdev, size);
- goto unlock;
- }
- /*
- * Retrying may succeed; for example the folio may finish
- * writeback, or buffers may be cleaned. This should not
- * happen very often; maybe we have old buffers attached to
- * this blockdev's page cache and we're trying to change
- * the block size?
- */
- if (!try_to_free_buffers(folio)) {
- end_block = ~0ULL;
- goto unlock;
- }
- }
- bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
- if (!bh)
- goto unlock;
- /*
- * Link the folio to the buffers and initialise them. Take the
- * lock to be atomic wrt __find_get_block(), which does not
- * run under the folio lock.
- */
- spin_lock(&mapping->i_private_lock);
- link_dev_buffers(folio, bh);
- end_block = folio_init_buffers(folio, bdev, size);
- spin_unlock(&mapping->i_private_lock);
- unlock:
- folio_unlock(folio);
- folio_put(folio);
- return block < end_block;
- }
- /*
- * Create buffers for the specified block device block's folio. If
- * that folio was dirty, the buffers are set dirty also. Returns false
- * if we've hit a permanent error.
- */
- static bool grow_buffers(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
- {
- loff_t pos;
- /*
- * Check for a block which lies outside our maximum possible
- * pagecache index.
- */
- if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
- printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
- __func__, (unsigned long long)block,
- bdev);
- return false;
- }
- /* Create a folio with the proper size buffers */
- return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
- }
- static struct buffer_head *
- __getblk_slow(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
- {
- /* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
- (size < 512 || size > PAGE_SIZE))) {
- printk(KERN_ERR "getblk(): invalid block size %d requested\n",
- size);
- printk(KERN_ERR "logical block size: %d\n",
- bdev_logical_block_size(bdev));
- dump_stack();
- return NULL;
- }
- for (;;) {
- struct buffer_head *bh;
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
- if (!grow_buffers(bdev, block, size, gfp))
- return NULL;
- }
- }
- /*
- * The relationship between dirty buffers and dirty pages:
- *
- * Whenever a page has any dirty buffers, the page's dirty bit is set, and
- * the page is tagged dirty in the page cache.
- *
- * At all times, the dirtiness of the buffers represents the dirtiness of
- * subsections of the page. If the page has buffers, the page dirty bit is
- * merely a hint about the true dirty state.
- *
- * When a page is set dirty in its entirety, all its buffers are marked dirty
- * (if the page has buffers).
- *
- * When a buffer is marked dirty, its page is dirtied, but the page's other
- * buffers are not.
- *
- * Also. When blockdev buffers are explicitly read with bread(), they
- * individually become uptodate. But their backing page remains not
- * uptodate - even if all of its buffers are uptodate. A subsequent
- * block_read_full_folio() against that folio will discover all the uptodate
- * buffers, will set the folio uptodate and will perform no I/O.
- */
- /**
- * mark_buffer_dirty - mark a buffer_head as needing writeout
- * @bh: the buffer_head to mark dirty
- *
- * mark_buffer_dirty() will set the dirty bit against the buffer, then set
- * its backing page dirty, then tag the page as dirty in the page cache
- * and then attach the address_space's inode to its superblock's dirty
- * inode list.
- *
- * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
- * i_pages lock and mapping->host->i_lock.
- */
- void mark_buffer_dirty(struct buffer_head *bh)
- {
- WARN_ON_ONCE(!buffer_uptodate(bh));
- trace_block_dirty_buffer(bh);
- /*
- * Very *carefully* optimize the it-is-already-dirty case.
- *
- * Don't let the final "is it dirty" escape to before we
- * perhaps modified the buffer.
- */
- if (buffer_dirty(bh)) {
- smp_mb();
- if (buffer_dirty(bh))
- return;
- }
- if (!test_set_buffer_dirty(bh)) {
- struct folio *folio = bh->b_folio;
- struct address_space *mapping = NULL;
- folio_memcg_lock(folio);
- if (!folio_test_set_dirty(folio)) {
- mapping = folio->mapping;
- if (mapping)
- __folio_mark_dirty(folio, mapping, 0);
- }
- folio_memcg_unlock(folio);
- if (mapping)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- }
- }
- EXPORT_SYMBOL(mark_buffer_dirty);
- void mark_buffer_write_io_error(struct buffer_head *bh)
- {
- set_buffer_write_io_error(bh);
- /* FIXME: do we need to set this in both places? */
- if (bh->b_folio && bh->b_folio->mapping)
- mapping_set_error(bh->b_folio->mapping, -EIO);
- if (bh->b_assoc_map) {
- mapping_set_error(bh->b_assoc_map, -EIO);
- errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
- }
- }
- EXPORT_SYMBOL(mark_buffer_write_io_error);
- /**
- * __brelse - Release a buffer.
- * @bh: The buffer to release.
- *
- * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
- */
- void __brelse(struct buffer_head *bh)
- {
- if (atomic_read(&bh->b_count)) {
- put_bh(bh);
- return;
- }
- WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
- }
- EXPORT_SYMBOL(__brelse);
- /**
- * __bforget - Discard any dirty data in a buffer.
- * @bh: The buffer to forget.
- *
- * This variant of bforget() can be called if @bh is guaranteed to not
- * be NULL.
- */
- void __bforget(struct buffer_head *bh)
- {
- clear_buffer_dirty(bh);
- if (bh->b_assoc_map) {
- struct address_space *buffer_mapping = bh->b_folio->mapping;
- spin_lock(&buffer_mapping->i_private_lock);
- list_del_init(&bh->b_assoc_buffers);
- bh->b_assoc_map = NULL;
- spin_unlock(&buffer_mapping->i_private_lock);
- }
- __brelse(bh);
- }
- EXPORT_SYMBOL(__bforget);
- static struct buffer_head *__bread_slow(struct buffer_head *bh)
- {
- lock_buffer(bh);
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- return bh;
- } else {
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return bh;
- }
- brelse(bh);
- return NULL;
- }
- /*
- * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
- * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
- * refcount elevated by one when they're in an LRU. A buffer can only appear
- * once in a particular CPU's LRU. A single buffer can be present in multiple
- * CPU's LRUs at the same time.
- *
- * This is a transparent caching front-end to sb_bread(), sb_getblk() and
- * sb_find_get_block().
- *
- * The LRUs themselves only need locking against invalidate_bh_lrus. We use
- * a local interrupt disable for that.
- */
- #define BH_LRU_SIZE 16
- struct bh_lru {
- struct buffer_head *bhs[BH_LRU_SIZE];
- };
- static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
- #ifdef CONFIG_SMP
- #define bh_lru_lock() local_irq_disable()
- #define bh_lru_unlock() local_irq_enable()
- #else
- #define bh_lru_lock() preempt_disable()
- #define bh_lru_unlock() preempt_enable()
- #endif
- static inline void check_irqs_on(void)
- {
- #ifdef irqs_disabled
- BUG_ON(irqs_disabled());
- #endif
- }
- /*
- * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
- * inserted at the front, and the buffer_head at the back if any is evicted.
- * Or, if already in the LRU it is moved to the front.
- */
- static void bh_lru_install(struct buffer_head *bh)
- {
- struct buffer_head *evictee = bh;
- struct bh_lru *b;
- int i;
- check_irqs_on();
- bh_lru_lock();
- /*
- * the refcount of buffer_head in bh_lru prevents dropping the
- * attached page(i.e., try_to_free_buffers) so it could cause
- * failing page migration.
- * Skip putting upcoming bh into bh_lru until migration is done.
- */
- if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
- bh_lru_unlock();
- return;
- }
- b = this_cpu_ptr(&bh_lrus);
- for (i = 0; i < BH_LRU_SIZE; i++) {
- swap(evictee, b->bhs[i]);
- if (evictee == bh) {
- bh_lru_unlock();
- return;
- }
- }
- get_bh(bh);
- bh_lru_unlock();
- brelse(evictee);
- }
- /*
- * Look up the bh in this cpu's LRU. If it's there, move it to the head.
- */
- static struct buffer_head *
- lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *ret = NULL;
- unsigned int i;
- check_irqs_on();
- bh_lru_lock();
- if (cpu_is_isolated(smp_processor_id())) {
- bh_lru_unlock();
- return NULL;
- }
- for (i = 0; i < BH_LRU_SIZE; i++) {
- struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
- if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
- bh->b_size == size) {
- if (i) {
- while (i) {
- __this_cpu_write(bh_lrus.bhs[i],
- __this_cpu_read(bh_lrus.bhs[i - 1]));
- i--;
- }
- __this_cpu_write(bh_lrus.bhs[0], bh);
- }
- get_bh(bh);
- ret = bh;
- break;
- }
- }
- bh_lru_unlock();
- return ret;
- }
- /*
- * Perform a pagecache lookup for the matching buffer. If it's there, refresh
- * it in the LRU and mark it as accessed. If it is not present then return
- * NULL
- */
- static struct buffer_head *
- find_get_block_common(struct block_device *bdev, sector_t block,
- unsigned size, bool atomic)
- {
- struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
- if (bh == NULL) {
- /* __find_get_block_slow will mark the page accessed */
- bh = __find_get_block_slow(bdev, block, atomic);
- if (bh)
- bh_lru_install(bh);
- } else
- touch_buffer(bh);
- return bh;
- }
- struct buffer_head *
- __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
- {
- return find_get_block_common(bdev, block, size, true);
- }
- EXPORT_SYMBOL(__find_get_block);
- /* same as __find_get_block() but allows sleeping contexts */
- struct buffer_head *
- __find_get_block_nonatomic(struct block_device *bdev, sector_t block,
- unsigned size)
- {
- return find_get_block_common(bdev, block, size, false);
- }
- EXPORT_SYMBOL(__find_get_block_nonatomic);
- /**
- * bdev_getblk - Get a buffer_head in a block device's buffer cache.
- * @bdev: The block device.
- * @block: The block number.
- * @size: The size of buffer_heads for this @bdev.
- * @gfp: The memory allocation flags to use.
- *
- * The returned buffer head has its reference count incremented, but is
- * not locked. The caller should call brelse() when it has finished
- * with the buffer. The buffer may not be uptodate. If needed, the
- * caller can bring it uptodate either by reading it or overwriting it.
- *
- * Return: The buffer head, or NULL if memory could not be allocated.
- */
- struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
- {
- struct buffer_head *bh;
- if (gfpflags_allow_blocking(gfp))
- bh = __find_get_block_nonatomic(bdev, block, size);
- else
- bh = __find_get_block(bdev, block, size);
- might_alloc(gfp);
- if (bh)
- return bh;
- return __getblk_slow(bdev, block, size, gfp);
- }
- EXPORT_SYMBOL(bdev_getblk);
- /*
- * Do async read-ahead on a buffer..
- */
- void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *bh = bdev_getblk(bdev, block, size,
- GFP_NOWAIT | __GFP_MOVABLE);
- if (likely(bh)) {
- bh_readahead(bh, REQ_RAHEAD);
- brelse(bh);
- }
- }
- EXPORT_SYMBOL(__breadahead);
- /**
- * __bread_gfp() - Read a block.
- * @bdev: The block device to read from.
- * @block: Block number in units of block size.
- * @size: The block size of this device in bytes.
- * @gfp: Not page allocation flags; see below.
- *
- * You are not expected to call this function. You should use one of
- * sb_bread(), sb_bread_unmovable() or __bread().
- *
- * Read a specified block, and return the buffer head that refers to it.
- * If @gfp is 0, the memory will be allocated using the block device's
- * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
- * allocated from a movable area. Do not pass in a complete set of
- * GFP flags.
- *
- * The returned buffer head has its refcount increased. The caller should
- * call brelse() when it has finished with the buffer.
- *
- * Context: May sleep waiting for I/O.
- * Return: NULL if the block was unreadable.
- */
- struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
- {
- struct buffer_head *bh;
- gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
- /*
- * Prefer looping in the allocator rather than here, at least that
- * code knows what it's doing.
- */
- gfp |= __GFP_NOFAIL;
- bh = bdev_getblk(bdev, block, size, gfp);
- if (likely(bh) && !buffer_uptodate(bh))
- bh = __bread_slow(bh);
- return bh;
- }
- EXPORT_SYMBOL(__bread_gfp);
- static void __invalidate_bh_lrus(struct bh_lru *b)
- {
- int i;
- for (i = 0; i < BH_LRU_SIZE; i++) {
- brelse(b->bhs[i]);
- b->bhs[i] = NULL;
- }
- }
- /*
- * invalidate_bh_lrus() is called rarely - but not only at unmount.
- * This doesn't race because it runs in each cpu either in irq
- * or with preempt disabled.
- */
- static void invalidate_bh_lru(void *arg)
- {
- struct bh_lru *b = &get_cpu_var(bh_lrus);
- __invalidate_bh_lrus(b);
- put_cpu_var(bh_lrus);
- }
- bool has_bh_in_lru(int cpu, void *dummy)
- {
- struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
- int i;
-
- for (i = 0; i < BH_LRU_SIZE; i++) {
- if (b->bhs[i])
- return true;
- }
- return false;
- }
- void invalidate_bh_lrus(void)
- {
- on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
- }
- EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
- /*
- * It's called from workqueue context so we need a bh_lru_lock to close
- * the race with preemption/irq.
- */
- void invalidate_bh_lrus_cpu(void)
- {
- struct bh_lru *b;
- bh_lru_lock();
- b = this_cpu_ptr(&bh_lrus);
- __invalidate_bh_lrus(b);
- bh_lru_unlock();
- }
- void folio_set_bh(struct buffer_head *bh, struct folio *folio,
- unsigned long offset)
- {
- bh->b_folio = folio;
- BUG_ON(offset >= folio_size(folio));
- if (folio_test_highmem(folio))
- /*
- * This catches illegal uses and preserves the offset:
- */
- bh->b_data = (char *)(0 + offset);
- else
- bh->b_data = folio_address(folio) + offset;
- }
- EXPORT_SYMBOL(folio_set_bh);
- /*
- * Called when truncating a buffer on a page completely.
- */
- /* Bits that are cleared during an invalidate */
- #define BUFFER_FLAGS_DISCARD \
- (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
- 1 << BH_Delay | 1 << BH_Unwritten)
- static void discard_buffer(struct buffer_head * bh)
- {
- unsigned long b_state;
- lock_buffer(bh);
- clear_buffer_dirty(bh);
- bh->b_bdev = NULL;
- b_state = READ_ONCE(bh->b_state);
- do {
- } while (!try_cmpxchg(&bh->b_state, &b_state,
- b_state & ~BUFFER_FLAGS_DISCARD));
- unlock_buffer(bh);
- }
- /**
- * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
- * @folio: The folio which is affected.
- * @offset: start of the range to invalidate
- * @length: length of the range to invalidate
- *
- * block_invalidate_folio() is called when all or part of the folio has been
- * invalidated by a truncate operation.
- *
- * block_invalidate_folio() does not have to release all buffers, but it must
- * ensure that no dirty buffer is left outside @offset and that no I/O
- * is underway against any of the blocks which are outside the truncation
- * point. Because the caller is about to free (and possibly reuse) those
- * blocks on-disk.
- */
- void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
- {
- struct buffer_head *head, *bh, *next;
- size_t curr_off = 0;
- size_t stop = length + offset;
- BUG_ON(!folio_test_locked(folio));
- /*
- * Check for overflow
- */
- BUG_ON(stop > folio_size(folio) || stop < length);
- head = folio_buffers(folio);
- if (!head)
- return;
- bh = head;
- do {
- size_t next_off = curr_off + bh->b_size;
- next = bh->b_this_page;
- /*
- * Are we still fully in range ?
- */
- if (next_off > stop)
- goto out;
- /*
- * is this block fully invalidated?
- */
- if (offset <= curr_off)
- discard_buffer(bh);
- curr_off = next_off;
- bh = next;
- } while (bh != head);
- /*
- * We release buffers only if the entire folio is being invalidated.
- * The get_block cached value has been unconditionally invalidated,
- * so real IO is not possible anymore.
- */
- if (length == folio_size(folio))
- filemap_release_folio(folio, 0);
- out:
- return;
- }
- EXPORT_SYMBOL(block_invalidate_folio);
- /*
- * We attach and possibly dirty the buffers atomically wrt
- * block_dirty_folio() via i_private_lock. try_to_free_buffers
- * is already excluded via the folio lock.
- */
- struct buffer_head *create_empty_buffers(struct folio *folio,
- unsigned long blocksize, unsigned long b_state)
- {
- struct buffer_head *bh, *head, *tail;
- gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
- head = folio_alloc_buffers(folio, blocksize, gfp);
- bh = head;
- do {
- bh->b_state |= b_state;
- tail = bh;
- bh = bh->b_this_page;
- } while (bh);
- tail->b_this_page = head;
- spin_lock(&folio->mapping->i_private_lock);
- if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
- bh = head;
- do {
- if (folio_test_dirty(folio))
- set_buffer_dirty(bh);
- if (folio_test_uptodate(folio))
- set_buffer_uptodate(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
- folio_attach_private(folio, head);
- spin_unlock(&folio->mapping->i_private_lock);
- return head;
- }
- EXPORT_SYMBOL(create_empty_buffers);
- /**
- * clean_bdev_aliases: clean a range of buffers in block device
- * @bdev: Block device to clean buffers in
- * @block: Start of a range of blocks to clean
- * @len: Number of blocks to clean
- *
- * We are taking a range of blocks for data and we don't want writeback of any
- * buffer-cache aliases starting from return from this function and until the
- * moment when something will explicitly mark the buffer dirty (hopefully that
- * will not happen until we will free that block ;-) We don't even need to mark
- * it not-uptodate - nobody can expect anything from a newly allocated buffer
- * anyway. We used to use unmap_buffer() for such invalidation, but that was
- * wrong. We definitely don't want to mark the alias unmapped, for example - it
- * would confuse anyone who might pick it with bread() afterwards...
- *
- * Also.. Note that bforget() doesn't lock the buffer. So there can be
- * writeout I/O going on against recently-freed buffers. We don't wait on that
- * I/O in bforget() - it's more efficient to wait on the I/O only if we really
- * need to. That happens here.
- */
- void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
- {
- struct address_space *bd_mapping = bdev->bd_mapping;
- const int blkbits = bd_mapping->host->i_blkbits;
- struct folio_batch fbatch;
- pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
- pgoff_t end;
- int i, count;
- struct buffer_head *bh;
- struct buffer_head *head;
- end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
- folio_batch_init(&fbatch);
- while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
- count = folio_batch_count(&fbatch);
- for (i = 0; i < count; i++) {
- struct folio *folio = fbatch.folios[i];
- if (!folio_buffers(folio))
- continue;
- /*
- * We use folio lock instead of bd_mapping->i_private_lock
- * to pin buffers here since we can afford to sleep and
- * it scales better than a global spinlock lock.
- */
- folio_lock(folio);
- /* Recheck when the folio is locked which pins bhs */
- head = folio_buffers(folio);
- if (!head)
- goto unlock_page;
- bh = head;
- do {
- if (!buffer_mapped(bh) || (bh->b_blocknr < block))
- goto next;
- if (bh->b_blocknr >= block + len)
- break;
- clear_buffer_dirty(bh);
- wait_on_buffer(bh);
- clear_buffer_req(bh);
- next:
- bh = bh->b_this_page;
- } while (bh != head);
- unlock_page:
- folio_unlock(folio);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- /* End of range already reached? */
- if (index > end || !index)
- break;
- }
- }
- EXPORT_SYMBOL(clean_bdev_aliases);
- static struct buffer_head *folio_create_buffers(struct folio *folio,
- struct inode *inode,
- unsigned int b_state)
- {
- struct buffer_head *bh;
- BUG_ON(!folio_test_locked(folio));
- bh = folio_buffers(folio);
- if (!bh)
- bh = create_empty_buffers(folio,
- 1 << READ_ONCE(inode->i_blkbits), b_state);
- return bh;
- }
- /*
- * NOTE! All mapped/uptodate combinations are valid:
- *
- * Mapped Uptodate Meaning
- *
- * No No "unknown" - must do get_block()
- * No Yes "hole" - zero-filled
- * Yes No "allocated" - allocated on disk, not read in
- * Yes Yes "valid" - allocated and up-to-date in memory.
- *
- * "Dirty" is valid only with the last case (mapped+uptodate).
- */
- /*
- * While block_write_full_folio is writing back the dirty buffers under
- * the page lock, whoever dirtied the buffers may decide to clean them
- * again at any time. We handle that by only looking at the buffer
- * state inside lock_buffer().
- *
- * If block_write_full_folio() is called for regular writeback
- * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
- * locked buffer. This only can happen if someone has written the buffer
- * directly, with submit_bh(). At the address_space level PageWriteback
- * prevents this contention from occurring.
- *
- * If block_write_full_folio() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
- * causes the writes to be flagged as synchronous writes.
- */
- int __block_write_full_folio(struct inode *inode, struct folio *folio,
- get_block_t *get_block, struct writeback_control *wbc)
- {
- int err;
- sector_t block;
- sector_t last_block;
- struct buffer_head *bh, *head;
- size_t blocksize;
- int nr_underway = 0;
- blk_opf_t write_flags = wbc_to_write_flags(wbc);
- head = folio_create_buffers(folio, inode,
- (1 << BH_Dirty) | (1 << BH_Uptodate));
- /*
- * Be very careful. We have no exclusion from block_dirty_folio
- * here, and the (potentially unmapped) buffers may become dirty at
- * any time. If a buffer becomes dirty here after we've inspected it
- * then we just miss that fact, and the folio stays dirty.
- *
- * Buffers outside i_size may be dirtied by block_dirty_folio;
- * handle that here by just cleaning them.
- */
- bh = head;
- blocksize = bh->b_size;
- block = div_u64(folio_pos(folio), blocksize);
- last_block = div_u64(i_size_read(inode) - 1, blocksize);
- /*
- * Get all the dirty buffers mapped to disk addresses and
- * handle any aliases from the underlying blockdev's mapping.
- */
- do {
- if (block > last_block) {
- /*
- * mapped buffers outside i_size will occur, because
- * this folio can be outside i_size when there is a
- * truncate in progress.
- */
- /*
- * The buffer was zeroed by block_write_full_folio()
- */
- clear_buffer_dirty(bh);
- set_buffer_uptodate(bh);
- } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
- buffer_dirty(bh)) {
- WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, block, bh, 1);
- if (err)
- goto recover;
- clear_buffer_delay(bh);
- if (buffer_new(bh)) {
- /* blockdev mappings never come here */
- clear_buffer_new(bh);
- clean_bdev_bh_alias(bh);
- }
- }
- bh = bh->b_this_page;
- block++;
- } while (bh != head);
- do {
- if (!buffer_mapped(bh))
- continue;
- /*
- * If it's a fully non-blocking write attempt and we cannot
- * lock the buffer then redirty the folio. Note that this can
- * potentially cause a busy-wait loop from writeback threads
- * and kswapd activity, but those code paths have their own
- * higher-level throttling.
- */
- if (wbc->sync_mode != WB_SYNC_NONE) {
- lock_buffer(bh);
- } else if (!trylock_buffer(bh)) {
- folio_redirty_for_writepage(wbc, folio);
- continue;
- }
- if (test_clear_buffer_dirty(bh)) {
- mark_buffer_async_write_endio(bh,
- end_buffer_async_write);
- } else {
- unlock_buffer(bh);
- }
- } while ((bh = bh->b_this_page) != head);
- /*
- * The folio and its buffers are protected by the writeback flag,
- * so we can drop the bh refcounts early.
- */
- BUG_ON(folio_test_writeback(folio));
- folio_start_writeback(folio);
- do {
- struct buffer_head *next = bh->b_this_page;
- if (buffer_async_write(bh)) {
- submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
- inode->i_write_hint, wbc);
- nr_underway++;
- }
- bh = next;
- } while (bh != head);
- folio_unlock(folio);
- err = 0;
- done:
- if (nr_underway == 0) {
- /*
- * The folio was marked dirty, but the buffers were
- * clean. Someone wrote them back by hand with
- * write_dirty_buffer/submit_bh. A rare case.
- */
- folio_end_writeback(folio);
- /*
- * The folio and buffer_heads can be released at any time from
- * here on.
- */
- }
- return err;
- recover:
- /*
- * ENOSPC, or some other error. We may already have added some
- * blocks to the file, so we need to write these out to avoid
- * exposing stale data.
- * The folio is currently locked and not marked for writeback
- */
- bh = head;
- /* Recovery: lock and submit the mapped buffers */
- do {
- if (buffer_mapped(bh) && buffer_dirty(bh) &&
- !buffer_delay(bh)) {
- lock_buffer(bh);
- mark_buffer_async_write_endio(bh,
- end_buffer_async_write);
- } else {
- /*
- * The buffer may have been set dirty during
- * attachment to a dirty folio.
- */
- clear_buffer_dirty(bh);
- }
- } while ((bh = bh->b_this_page) != head);
- BUG_ON(folio_test_writeback(folio));
- mapping_set_error(folio->mapping, err);
- folio_start_writeback(folio);
- do {
- struct buffer_head *next = bh->b_this_page;
- if (buffer_async_write(bh)) {
- clear_buffer_dirty(bh);
- submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
- inode->i_write_hint, wbc);
- nr_underway++;
- }
- bh = next;
- } while (bh != head);
- folio_unlock(folio);
- goto done;
- }
- EXPORT_SYMBOL(__block_write_full_folio);
- /*
- * If a folio has any new buffers, zero them out here, and mark them uptodate
- * and dirty so they'll be written out (in order to prevent uninitialised
- * block data from leaking). And clear the new bit.
- */
- void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
- {
- size_t block_start, block_end;
- struct buffer_head *head, *bh;
- BUG_ON(!folio_test_locked(folio));
- head = folio_buffers(folio);
- if (!head)
- return;
- bh = head;
- block_start = 0;
- do {
- block_end = block_start + bh->b_size;
- if (buffer_new(bh)) {
- if (block_end > from && block_start < to) {
- if (!folio_test_uptodate(folio)) {
- size_t start, xend;
- start = max(from, block_start);
- xend = min(to, block_end);
- folio_zero_segment(folio, start, xend);
- set_buffer_uptodate(bh);
- }
- clear_buffer_new(bh);
- mark_buffer_dirty(bh);
- }
- }
- block_start = block_end;
- bh = bh->b_this_page;
- } while (bh != head);
- }
- EXPORT_SYMBOL(folio_zero_new_buffers);
- static int
- iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
- const struct iomap *iomap)
- {
- loff_t offset = (loff_t)block << inode->i_blkbits;
- bh->b_bdev = iomap->bdev;
- /*
- * Block points to offset in file we need to map, iomap contains
- * the offset at which the map starts. If the map ends before the
- * current block, then do not map the buffer and let the caller
- * handle it.
- */
- if (offset >= iomap->offset + iomap->length)
- return -EIO;
- switch (iomap->type) {
- case IOMAP_HOLE:
- /*
- * If the buffer is not up to date or beyond the current EOF,
- * we need to mark it as new to ensure sub-block zeroing is
- * executed if necessary.
- */
- if (!buffer_uptodate(bh) ||
- (offset >= i_size_read(inode)))
- set_buffer_new(bh);
- return 0;
- case IOMAP_DELALLOC:
- if (!buffer_uptodate(bh) ||
- (offset >= i_size_read(inode)))
- set_buffer_new(bh);
- set_buffer_uptodate(bh);
- set_buffer_mapped(bh);
- set_buffer_delay(bh);
- return 0;
- case IOMAP_UNWRITTEN:
- /*
- * For unwritten regions, we always need to ensure that regions
- * in the block we are not writing to are zeroed. Mark the
- * buffer as new to ensure this.
- */
- set_buffer_new(bh);
- set_buffer_unwritten(bh);
- fallthrough;
- case IOMAP_MAPPED:
- if ((iomap->flags & IOMAP_F_NEW) ||
- offset >= i_size_read(inode)) {
- /*
- * This can happen if truncating the block device races
- * with the check in the caller as i_size updates on
- * block devices aren't synchronized by i_rwsem for
- * block devices.
- */
- if (S_ISBLK(inode->i_mode))
- return -EIO;
- set_buffer_new(bh);
- }
- bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
- inode->i_blkbits;
- set_buffer_mapped(bh);
- return 0;
- default:
- WARN_ON_ONCE(1);
- return -EIO;
- }
- }
- int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
- get_block_t *get_block, const struct iomap *iomap)
- {
- size_t from = offset_in_folio(folio, pos);
- size_t to = from + len;
- struct inode *inode = folio->mapping->host;
- size_t block_start, block_end;
- sector_t block;
- int err = 0;
- size_t blocksize;
- struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
- BUG_ON(!folio_test_locked(folio));
- BUG_ON(to > folio_size(folio));
- BUG_ON(from > to);
- head = folio_create_buffers(folio, inode, 0);
- blocksize = head->b_size;
- block = div_u64(folio_pos(folio), blocksize);
- for (bh = head, block_start = 0; bh != head || !block_start;
- block++, block_start=block_end, bh = bh->b_this_page) {
- block_end = block_start + blocksize;
- if (block_end <= from || block_start >= to) {
- if (folio_test_uptodate(folio)) {
- if (!buffer_uptodate(bh))
- set_buffer_uptodate(bh);
- }
- continue;
- }
- if (buffer_new(bh))
- clear_buffer_new(bh);
- if (!buffer_mapped(bh)) {
- WARN_ON(bh->b_size != blocksize);
- if (get_block)
- err = get_block(inode, block, bh, 1);
- else
- err = iomap_to_bh(inode, block, bh, iomap);
- if (err)
- break;
- if (buffer_new(bh)) {
- clean_bdev_bh_alias(bh);
- if (folio_test_uptodate(folio)) {
- clear_buffer_new(bh);
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- continue;
- }
- if (block_end > to || block_start < from)
- folio_zero_segments(folio,
- to, block_end,
- block_start, from);
- continue;
- }
- }
- if (folio_test_uptodate(folio)) {
- if (!buffer_uptodate(bh))
- set_buffer_uptodate(bh);
- continue;
- }
- if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
- !buffer_unwritten(bh) &&
- (block_start < from || block_end > to)) {
- bh_read_nowait(bh, 0);
- *wait_bh++=bh;
- }
- }
- /*
- * If we issued read requests - let them complete.
- */
- while(wait_bh > wait) {
- wait_on_buffer(*--wait_bh);
- if (!buffer_uptodate(*wait_bh))
- err = -EIO;
- }
- if (unlikely(err))
- folio_zero_new_buffers(folio, from, to);
- return err;
- }
- int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
- get_block_t *get_block)
- {
- return __block_write_begin_int(folio, pos, len, get_block, NULL);
- }
- EXPORT_SYMBOL(__block_write_begin);
- static void __block_commit_write(struct folio *folio, size_t from, size_t to)
- {
- size_t block_start, block_end;
- bool partial = false;
- unsigned blocksize;
- struct buffer_head *bh, *head;
- bh = head = folio_buffers(folio);
- if (!bh)
- return;
- blocksize = bh->b_size;
- block_start = 0;
- do {
- block_end = block_start + blocksize;
- if (block_end <= from || block_start >= to) {
- if (!buffer_uptodate(bh))
- partial = true;
- } else {
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- }
- if (buffer_new(bh))
- clear_buffer_new(bh);
- block_start = block_end;
- bh = bh->b_this_page;
- } while (bh != head);
- /*
- * If this is a partial write which happened to make all buffers
- * uptodate then we can optimize away a bogus read_folio() for
- * the next read(). Here we 'discover' whether the folio went
- * uptodate as a result of this (potentially partial) write.
- */
- if (!partial)
- folio_mark_uptodate(folio);
- }
- /*
- * block_write_begin takes care of the basic task of block allocation and
- * bringing partial write blocks uptodate first.
- *
- * The filesystem needs to handle block truncation upon failure.
- */
- int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct folio **foliop, get_block_t *get_block)
- {
- pgoff_t index = pos >> PAGE_SHIFT;
- struct folio *folio;
- int status;
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
- mapping_gfp_mask(mapping));
- if (IS_ERR(folio))
- return PTR_ERR(folio);
- status = __block_write_begin_int(folio, pos, len, get_block, NULL);
- if (unlikely(status)) {
- folio_unlock(folio);
- folio_put(folio);
- folio = NULL;
- }
- *foliop = folio;
- return status;
- }
- EXPORT_SYMBOL(block_write_begin);
- int block_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
- {
- size_t start = pos - folio_pos(folio);
- if (unlikely(copied < len)) {
- /*
- * The buffers that were written will now be uptodate, so
- * we don't have to worry about a read_folio reading them
- * and overwriting a partial write. However if we have
- * encountered a short write and only partially written
- * into a buffer, it will not be marked uptodate, so a
- * read_folio might come in and destroy our partial write.
- *
- * Do the simplest thing, and just treat any short write to a
- * non uptodate folio as a zero-length write, and force the
- * caller to redo the whole thing.
- */
- if (!folio_test_uptodate(folio))
- copied = 0;
- folio_zero_new_buffers(folio, start+copied, start+len);
- }
- flush_dcache_folio(folio);
- /* This could be a short (even 0-length) commit */
- __block_commit_write(folio, start, start + copied);
- return copied;
- }
- EXPORT_SYMBOL(block_write_end);
- int generic_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct folio *folio, void *fsdata)
- {
- struct inode *inode = mapping->host;
- loff_t old_size = inode->i_size;
- bool i_size_changed = false;
- copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
- /*
- * No need to use i_size_read() here, the i_size cannot change under us
- * because we hold i_rwsem.
- *
- * But it's important to update i_size while still holding folio lock:
- * page writeout could otherwise come in and zero beyond i_size.
- */
- if (pos + copied > inode->i_size) {
- i_size_write(inode, pos + copied);
- i_size_changed = true;
- }
- folio_unlock(folio);
- folio_put(folio);
- if (old_size < pos)
- pagecache_isize_extended(inode, old_size, pos);
- /*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
- * filesystems.
- */
- if (i_size_changed)
- mark_inode_dirty(inode);
- return copied;
- }
- EXPORT_SYMBOL(generic_write_end);
- /*
- * block_is_partially_uptodate checks whether buffers within a folio are
- * uptodate or not.
- *
- * Returns true if all buffers which correspond to the specified part
- * of the folio are uptodate.
- */
- bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
- {
- unsigned block_start, block_end, blocksize;
- unsigned to;
- struct buffer_head *bh, *head;
- bool ret = true;
- head = folio_buffers(folio);
- if (!head)
- return false;
- blocksize = head->b_size;
- to = min_t(unsigned, folio_size(folio) - from, count);
- to = from + to;
- if (from < blocksize && to > folio_size(folio) - blocksize)
- return false;
- bh = head;
- block_start = 0;
- do {
- block_end = block_start + blocksize;
- if (block_end > from && block_start < to) {
- if (!buffer_uptodate(bh)) {
- ret = false;
- break;
- }
- if (block_end >= to)
- break;
- }
- block_start = block_end;
- bh = bh->b_this_page;
- } while (bh != head);
- return ret;
- }
- EXPORT_SYMBOL(block_is_partially_uptodate);
- /*
- * Generic "read_folio" function for block devices that have the normal
- * get_block functionality. This is most of the block device filesystems.
- * Reads the folio asynchronously --- the unlock_buffer() and
- * set/clear_buffer_uptodate() functions propagate buffer state into the
- * folio once IO has completed.
- */
- int block_read_full_folio(struct folio *folio, get_block_t *get_block)
- {
- struct inode *inode = folio->mapping->host;
- sector_t iblock, lblock;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- size_t blocksize;
- int nr, i;
- int fully_mapped = 1;
- bool page_error = false;
- loff_t limit = i_size_read(inode);
- /* This is needed for ext4. */
- if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
- limit = inode->i_sb->s_maxbytes;
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
- head = folio_create_buffers(folio, inode, 0);
- blocksize = head->b_size;
- iblock = div_u64(folio_pos(folio), blocksize);
- lblock = div_u64(limit + blocksize - 1, blocksize);
- bh = head;
- nr = 0;
- i = 0;
- do {
- if (buffer_uptodate(bh))
- continue;
- if (!buffer_mapped(bh)) {
- int err = 0;
- fully_mapped = 0;
- if (iblock < lblock) {
- WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, iblock, bh, 0);
- if (err)
- page_error = true;
- }
- if (!buffer_mapped(bh)) {
- folio_zero_range(folio, i * blocksize,
- blocksize);
- if (!err)
- set_buffer_uptodate(bh);
- continue;
- }
- /*
- * get_block() might have updated the buffer
- * synchronously
- */
- if (buffer_uptodate(bh))
- continue;
- }
- arr[nr++] = bh;
- } while (i++, iblock++, (bh = bh->b_this_page) != head);
- if (fully_mapped)
- folio_set_mappedtodisk(folio);
- if (!nr) {
- /*
- * All buffers are uptodate or get_block() returned an
- * error when trying to map them - we can finish the read.
- */
- folio_end_read(folio, !page_error);
- return 0;
- }
- /* Stage two: lock the buffers */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- lock_buffer(bh);
- mark_buffer_async_read(bh);
- }
- /*
- * Stage 3: start the IO. Check for uptodateness
- * inside the buffer lock in case another process reading
- * the underlying blockdev brought it uptodate (the sct fix).
- */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
- else
- submit_bh(REQ_OP_READ, bh);
- }
- return 0;
- }
- EXPORT_SYMBOL(block_read_full_folio);
- /* utility function for filesystems that need to do work on expanding
- * truncates. Uses filesystem pagecache writes to allow the filesystem to
- * deal with the hole.
- */
- int generic_cont_expand_simple(struct inode *inode, loff_t size)
- {
- struct address_space *mapping = inode->i_mapping;
- const struct address_space_operations *aops = mapping->a_ops;
- struct folio *folio;
- void *fsdata = NULL;
- int err;
- err = inode_newsize_ok(inode, size);
- if (err)
- goto out;
- err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
- if (err)
- goto out;
- err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
- BUG_ON(err > 0);
- out:
- return err;
- }
- EXPORT_SYMBOL(generic_cont_expand_simple);
- static int cont_expand_zero(struct file *file, struct address_space *mapping,
- loff_t pos, loff_t *bytes)
- {
- struct inode *inode = mapping->host;
- const struct address_space_operations *aops = mapping->a_ops;
- unsigned int blocksize = i_blocksize(inode);
- struct folio *folio;
- void *fsdata = NULL;
- pgoff_t index, curidx;
- loff_t curpos;
- unsigned zerofrom, offset, len;
- int err = 0;
- index = pos >> PAGE_SHIFT;
- offset = pos & ~PAGE_MASK;
- while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
- zerofrom = curpos & ~PAGE_MASK;
- if (zerofrom & (blocksize-1)) {
- *bytes |= (blocksize-1);
- (*bytes)++;
- }
- len = PAGE_SIZE - zerofrom;
- err = aops->write_begin(file, mapping, curpos, len,
- &folio, &fsdata);
- if (err)
- goto out;
- folio_zero_range(folio, offset_in_folio(folio, curpos), len);
- err = aops->write_end(file, mapping, curpos, len, len,
- folio, fsdata);
- if (err < 0)
- goto out;
- BUG_ON(err != len);
- err = 0;
- balance_dirty_pages_ratelimited(mapping);
- if (fatal_signal_pending(current)) {
- err = -EINTR;
- goto out;
- }
- }
- /* page covers the boundary, find the boundary offset */
- if (index == curidx) {
- zerofrom = curpos & ~PAGE_MASK;
- /* if we will expand the thing last block will be filled */
- if (offset <= zerofrom) {
- goto out;
- }
- if (zerofrom & (blocksize-1)) {
- *bytes |= (blocksize-1);
- (*bytes)++;
- }
- len = offset - zerofrom;
- err = aops->write_begin(file, mapping, curpos, len,
- &folio, &fsdata);
- if (err)
- goto out;
- folio_zero_range(folio, offset_in_folio(folio, curpos), len);
- err = aops->write_end(file, mapping, curpos, len, len,
- folio, fsdata);
- if (err < 0)
- goto out;
- BUG_ON(err != len);
- err = 0;
- }
- out:
- return err;
- }
- /*
- * For moronic filesystems that do not allow holes in file.
- * We may have to extend the file.
- */
- int cont_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata,
- get_block_t *get_block, loff_t *bytes)
- {
- struct inode *inode = mapping->host;
- unsigned int blocksize = i_blocksize(inode);
- unsigned int zerofrom;
- int err;
- err = cont_expand_zero(file, mapping, pos, bytes);
- if (err)
- return err;
- zerofrom = *bytes & ~PAGE_MASK;
- if (pos+len > *bytes && zerofrom & (blocksize-1)) {
- *bytes |= (blocksize-1);
- (*bytes)++;
- }
- return block_write_begin(mapping, pos, len, foliop, get_block);
- }
- EXPORT_SYMBOL(cont_write_begin);
- void block_commit_write(struct page *page, unsigned from, unsigned to)
- {
- struct folio *folio = page_folio(page);
- __block_commit_write(folio, from, to);
- }
- EXPORT_SYMBOL(block_commit_write);
- /*
- * block_page_mkwrite() is not allowed to change the file size as it gets
- * called from a page fault handler when a page is first dirtied. Hence we must
- * be careful to check for EOF conditions here. We set the page up correctly
- * for a written page which means we get ENOSPC checking when writing into
- * holes and correct delalloc and unwritten extent mapping on filesystems that
- * support these features.
- *
- * We are not allowed to take the i_mutex here so we have to play games to
- * protect against truncate races as the page could now be beyond EOF. Because
- * truncate writes the inode size before removing pages, once we have the
- * page lock we can determine safely if the page is beyond EOF. If it is not
- * beyond EOF, then the page is guaranteed safe against truncation until we
- * unlock the page.
- *
- * Direct callers of this function should protect against filesystem freezing
- * using sb_start_pagefault() - sb_end_pagefault() functions.
- */
- int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block)
- {
- struct folio *folio = page_folio(vmf->page);
- struct inode *inode = file_inode(vma->vm_file);
- unsigned long end;
- loff_t size;
- int ret;
- folio_lock(folio);
- size = i_size_read(inode);
- if ((folio->mapping != inode->i_mapping) ||
- (folio_pos(folio) >= size)) {
- /* We overload EFAULT to mean page got truncated */
- ret = -EFAULT;
- goto out_unlock;
- }
- end = folio_size(folio);
- /* folio is wholly or partially inside EOF */
- if (folio_pos(folio) + end > size)
- end = size - folio_pos(folio);
- ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
- if (unlikely(ret))
- goto out_unlock;
- __block_commit_write(folio, 0, end);
- folio_mark_dirty(folio);
- folio_wait_stable(folio);
- return 0;
- out_unlock:
- folio_unlock(folio);
- return ret;
- }
- EXPORT_SYMBOL(block_page_mkwrite);
- int block_truncate_page(struct address_space *mapping,
- loff_t from, get_block_t *get_block)
- {
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned blocksize;
- sector_t iblock;
- size_t offset, length, pos;
- struct inode *inode = mapping->host;
- struct folio *folio;
- struct buffer_head *bh;
- int err = 0;
- blocksize = i_blocksize(inode);
- length = from & (blocksize - 1);
- /* Block boundary? Nothing to do */
- if (!length)
- return 0;
- length = blocksize - length;
- iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
- folio = filemap_grab_folio(mapping, index);
- if (IS_ERR(folio))
- return PTR_ERR(folio);
- bh = folio_buffers(folio);
- if (!bh)
- bh = create_empty_buffers(folio, blocksize, 0);
- /* Find the buffer that contains "offset" */
- offset = offset_in_folio(folio, from);
- pos = blocksize;
- while (offset >= pos) {
- bh = bh->b_this_page;
- iblock++;
- pos += blocksize;
- }
- if (!buffer_mapped(bh)) {
- WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, iblock, bh, 0);
- if (err)
- goto unlock;
- /* unmapped? It's a hole - nothing to do */
- if (!buffer_mapped(bh))
- goto unlock;
- }
- /* Ok, it's mapped. Make sure it's up-to-date */
- if (folio_test_uptodate(folio))
- set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
- err = bh_read(bh, 0);
- /* Uhhuh. Read error. Complain and punt. */
- if (err < 0)
- goto unlock;
- }
- folio_zero_range(folio, offset, length);
- mark_buffer_dirty(bh);
- unlock:
- folio_unlock(folio);
- folio_put(folio);
- return err;
- }
- EXPORT_SYMBOL(block_truncate_page);
- /*
- * The generic ->writepage function for buffer-backed address_spaces
- */
- int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
- void *get_block)
- {
- struct inode * const inode = folio->mapping->host;
- loff_t i_size = i_size_read(inode);
- /* Is the folio fully inside i_size? */
- if (folio_pos(folio) + folio_size(folio) <= i_size)
- return __block_write_full_folio(inode, folio, get_block, wbc);
- /* Is the folio fully outside i_size? (truncate in progress) */
- if (folio_pos(folio) >= i_size) {
- folio_unlock(folio);
- return 0; /* don't care */
- }
- /*
- * The folio straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
- * in multiples of the page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when mapped, and
- * writes to that region are not written out to the file."
- */
- folio_zero_segment(folio, offset_in_folio(folio, i_size),
- folio_size(folio));
- return __block_write_full_folio(inode, folio, get_block, wbc);
- }
- sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
- get_block_t *get_block)
- {
- struct inode *inode = mapping->host;
- struct buffer_head tmp = {
- .b_size = i_blocksize(inode),
- };
- get_block(inode, block, &tmp, 0);
- return tmp.b_blocknr;
- }
- EXPORT_SYMBOL(generic_block_bmap);
- static void end_bio_bh_io_sync(struct bio *bio)
- {
- struct buffer_head *bh = bio->bi_private;
- if (unlikely(bio_flagged(bio, BIO_QUIET)))
- set_bit(BH_Quiet, &bh->b_state);
- bh->b_end_io(bh, !bio->bi_status);
- bio_put(bio);
- }
- static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
- enum rw_hint write_hint,
- struct writeback_control *wbc)
- {
- const enum req_op op = opf & REQ_OP_MASK;
- struct bio *bio;
- BUG_ON(!buffer_locked(bh));
- BUG_ON(!buffer_mapped(bh));
- BUG_ON(!bh->b_end_io);
- BUG_ON(buffer_delay(bh));
- BUG_ON(buffer_unwritten(bh));
- /*
- * Only clear out a write error when rewriting
- */
- if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
- clear_buffer_write_io_error(bh);
- if (buffer_meta(bh))
- opf |= REQ_META;
- if (buffer_prio(bh))
- opf |= REQ_PRIO;
- bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
- fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_write_hint = write_hint;
- bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
- bio->bi_end_io = end_bio_bh_io_sync;
- bio->bi_private = bh;
- /* Take care of bh's that straddle the end of the device */
- guard_bio_eod(bio);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
- }
- submit_bio(bio);
- }
- void submit_bh(blk_opf_t opf, struct buffer_head *bh)
- {
- submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
- }
- EXPORT_SYMBOL(submit_bh);
- void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
- {
- lock_buffer(bh);
- if (!test_clear_buffer_dirty(bh)) {
- unlock_buffer(bh);
- return;
- }
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
- submit_bh(REQ_OP_WRITE | op_flags, bh);
- }
- EXPORT_SYMBOL(write_dirty_buffer);
- /*
- * For a data-integrity writeout, we need to wait upon any in-progress I/O
- * and then start new I/O and then wait upon it. The caller must have a ref on
- * the buffer_head.
- */
- int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
- {
- WARN_ON(atomic_read(&bh->b_count) < 1);
- lock_buffer(bh);
- if (test_clear_buffer_dirty(bh)) {
- /*
- * The bh should be mapped, but it might not be if the
- * device was hot-removed. Not much we can do but fail the I/O.
- */
- if (!buffer_mapped(bh)) {
- unlock_buffer(bh);
- return -EIO;
- }
- get_bh(bh);
- bh->b_end_io = end_buffer_write_sync;
- submit_bh(REQ_OP_WRITE | op_flags, bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- return -EIO;
- } else {
- unlock_buffer(bh);
- }
- return 0;
- }
- EXPORT_SYMBOL(__sync_dirty_buffer);
- int sync_dirty_buffer(struct buffer_head *bh)
- {
- return __sync_dirty_buffer(bh, REQ_SYNC);
- }
- EXPORT_SYMBOL(sync_dirty_buffer);
- static inline int buffer_busy(struct buffer_head *bh)
- {
- return atomic_read(&bh->b_count) |
- (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
- }
- static bool
- drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
- {
- struct buffer_head *head = folio_buffers(folio);
- struct buffer_head *bh;
- bh = head;
- do {
- if (buffer_busy(bh))
- goto failed;
- bh = bh->b_this_page;
- } while (bh != head);
- do {
- struct buffer_head *next = bh->b_this_page;
- if (bh->b_assoc_map)
- __remove_assoc_queue(bh);
- bh = next;
- } while (bh != head);
- *buffers_to_free = head;
- folio_detach_private(folio);
- return true;
- failed:
- return false;
- }
- /**
- * try_to_free_buffers - Release buffers attached to this folio.
- * @folio: The folio.
- *
- * If any buffers are in use (dirty, under writeback, elevated refcount),
- * no buffers will be freed.
- *
- * If the folio is dirty but all the buffers are clean then we need to
- * be sure to mark the folio clean as well. This is because the folio
- * may be against a block device, and a later reattachment of buffers
- * to a dirty folio will set *all* buffers dirty. Which would corrupt
- * filesystem data on the same device.
- *
- * The same applies to regular filesystem folios: if all the buffers are
- * clean then we set the folio clean and proceed. To do that, we require
- * total exclusion from block_dirty_folio(). That is obtained with
- * i_private_lock.
- *
- * Exclusion against try_to_free_buffers may be obtained by either
- * locking the folio or by holding its mapping's i_private_lock.
- *
- * Context: Process context. @folio must be locked. Will not sleep.
- * Return: true if all buffers attached to this folio were freed.
- */
- bool try_to_free_buffers(struct folio *folio)
- {
- struct address_space * const mapping = folio->mapping;
- struct buffer_head *buffers_to_free = NULL;
- bool ret = 0;
- BUG_ON(!folio_test_locked(folio));
- if (folio_test_writeback(folio))
- return false;
- if (mapping == NULL) { /* can this still happen? */
- ret = drop_buffers(folio, &buffers_to_free);
- goto out;
- }
- spin_lock(&mapping->i_private_lock);
- ret = drop_buffers(folio, &buffers_to_free);
- /*
- * If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty folio. We
- * clean the folio here; otherwise the VM will never notice
- * that the filesystem did any IO at all.
- *
- * Also, during truncate, discard_buffer will have marked all
- * the folio's buffers clean. We discover that here and clean
- * the folio also.
- *
- * i_private_lock must be held over this entire operation in order
- * to synchronise against block_dirty_folio and prevent the
- * dirty bit from being lost.
- */
- if (ret)
- folio_cancel_dirty(folio);
- spin_unlock(&mapping->i_private_lock);
- out:
- if (buffers_to_free) {
- struct buffer_head *bh = buffers_to_free;
- do {
- struct buffer_head *next = bh->b_this_page;
- free_buffer_head(bh);
- bh = next;
- } while (bh != buffers_to_free);
- }
- return ret;
- }
- EXPORT_SYMBOL(try_to_free_buffers);
- /*
- * Buffer-head allocation
- */
- static struct kmem_cache *bh_cachep __ro_after_init;
- /*
- * Once the number of bh's in the machine exceeds this level, we start
- * stripping them in writeback.
- */
- static unsigned long max_buffer_heads __ro_after_init;
- int buffer_heads_over_limit;
- struct bh_accounting {
- int nr; /* Number of live bh's */
- int ratelimit; /* Limit cacheline bouncing */
- };
- static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
- static void recalc_bh_state(void)
- {
- int i;
- int tot = 0;
- if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
- return;
- __this_cpu_write(bh_accounting.ratelimit, 0);
- for_each_online_cpu(i)
- tot += per_cpu(bh_accounting, i).nr;
- buffer_heads_over_limit = (tot > max_buffer_heads);
- }
- struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
- {
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
- if (ret) {
- INIT_LIST_HEAD(&ret->b_assoc_buffers);
- spin_lock_init(&ret->b_uptodate_lock);
- preempt_disable();
- __this_cpu_inc(bh_accounting.nr);
- recalc_bh_state();
- preempt_enable();
- }
- return ret;
- }
- EXPORT_SYMBOL(alloc_buffer_head);
- void free_buffer_head(struct buffer_head *bh)
- {
- BUG_ON(!list_empty(&bh->b_assoc_buffers));
- kmem_cache_free(bh_cachep, bh);
- preempt_disable();
- __this_cpu_dec(bh_accounting.nr);
- recalc_bh_state();
- preempt_enable();
- }
- EXPORT_SYMBOL(free_buffer_head);
- static int buffer_exit_cpu_dead(unsigned int cpu)
- {
- int i;
- struct bh_lru *b = &per_cpu(bh_lrus, cpu);
- for (i = 0; i < BH_LRU_SIZE; i++) {
- brelse(b->bhs[i]);
- b->bhs[i] = NULL;
- }
- this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
- per_cpu(bh_accounting, cpu).nr = 0;
- return 0;
- }
- /**
- * bh_uptodate_or_lock - Test whether the buffer is uptodate
- * @bh: struct buffer_head
- *
- * Return true if the buffer is up-to-date and false,
- * with the buffer locked, if not.
- */
- int bh_uptodate_or_lock(struct buffer_head *bh)
- {
- if (!buffer_uptodate(bh)) {
- lock_buffer(bh);
- if (!buffer_uptodate(bh))
- return 0;
- unlock_buffer(bh);
- }
- return 1;
- }
- EXPORT_SYMBOL(bh_uptodate_or_lock);
- /**
- * __bh_read - Submit read for a locked buffer
- * @bh: struct buffer_head
- * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
- * @wait: wait until reading finish
- *
- * Returns zero on success or don't wait, and -EIO on error.
- */
- int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
- {
- int ret = 0;
- BUG_ON(!buffer_locked(bh));
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ | op_flags, bh);
- if (wait) {
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- ret = -EIO;
- }
- return ret;
- }
- EXPORT_SYMBOL(__bh_read);
- /**
- * __bh_read_batch - Submit read for a batch of unlocked buffers
- * @nr: entry number of the buffer batch
- * @bhs: a batch of struct buffer_head
- * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
- * @force_lock: force to get a lock on the buffer if set, otherwise drops any
- * buffer that cannot lock.
- *
- * Returns zero on success or don't wait, and -EIO on error.
- */
- void __bh_read_batch(int nr, struct buffer_head *bhs[],
- blk_opf_t op_flags, bool force_lock)
- {
- int i;
- for (i = 0; i < nr; i++) {
- struct buffer_head *bh = bhs[i];
- if (buffer_uptodate(bh))
- continue;
- if (force_lock)
- lock_buffer(bh);
- else
- if (!trylock_buffer(bh))
- continue;
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- continue;
- }
- bh->b_end_io = end_buffer_read_sync;
- get_bh(bh);
- submit_bh(REQ_OP_READ | op_flags, bh);
- }
- }
- EXPORT_SYMBOL(__bh_read_batch);
- void __init buffer_init(void)
- {
- unsigned long nrpages;
- int ret;
- bh_cachep = KMEM_CACHE(buffer_head,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
- /*
- * Limit the bh occupancy to 10% of ZONE_NORMAL
- */
- nrpages = (nr_free_buffer_pages() * 10) / 100;
- max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
- ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
- NULL, buffer_exit_cpu_dead);
- WARN_ON(ret < 0);
- }
|