xfs_ialloc.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_btree.h"
  16. #include "xfs_ialloc.h"
  17. #include "xfs_ialloc_btree.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_errortag.h"
  20. #include "xfs_error.h"
  21. #include "xfs_bmap.h"
  22. #include "xfs_trans.h"
  23. #include "xfs_buf_item.h"
  24. #include "xfs_icreate_item.h"
  25. #include "xfs_icache.h"
  26. #include "xfs_trace.h"
  27. #include "xfs_log.h"
  28. #include "xfs_rmap.h"
  29. #include "xfs_ag.h"
  30. #include "xfs_health.h"
  31. /*
  32. * Lookup a record by ino in the btree given by cur.
  33. */
  34. int /* error */
  35. xfs_inobt_lookup(
  36. struct xfs_btree_cur *cur, /* btree cursor */
  37. xfs_agino_t ino, /* starting inode of chunk */
  38. xfs_lookup_t dir, /* <=, >=, == */
  39. int *stat) /* success/failure */
  40. {
  41. cur->bc_rec.i.ir_startino = ino;
  42. cur->bc_rec.i.ir_holemask = 0;
  43. cur->bc_rec.i.ir_count = 0;
  44. cur->bc_rec.i.ir_freecount = 0;
  45. cur->bc_rec.i.ir_free = 0;
  46. return xfs_btree_lookup(cur, dir, stat);
  47. }
  48. /*
  49. * Update the record referred to by cur to the value given.
  50. * This either works (return 0) or gets an EFSCORRUPTED error.
  51. */
  52. STATIC int /* error */
  53. xfs_inobt_update(
  54. struct xfs_btree_cur *cur, /* btree cursor */
  55. xfs_inobt_rec_incore_t *irec) /* btree record */
  56. {
  57. union xfs_btree_rec rec;
  58. rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
  59. if (xfs_has_sparseinodes(cur->bc_mp)) {
  60. rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
  61. rec.inobt.ir_u.sp.ir_count = irec->ir_count;
  62. rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
  63. } else {
  64. /* ir_holemask/ir_count not supported on-disk */
  65. rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
  66. }
  67. rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
  68. return xfs_btree_update(cur, &rec);
  69. }
  70. /* Convert on-disk btree record to incore inobt record. */
  71. void
  72. xfs_inobt_btrec_to_irec(
  73. struct xfs_mount *mp,
  74. const union xfs_btree_rec *rec,
  75. struct xfs_inobt_rec_incore *irec)
  76. {
  77. irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
  78. if (xfs_has_sparseinodes(mp)) {
  79. irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
  80. irec->ir_count = rec->inobt.ir_u.sp.ir_count;
  81. irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
  82. } else {
  83. /*
  84. * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
  85. * values for full inode chunks.
  86. */
  87. irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
  88. irec->ir_count = XFS_INODES_PER_CHUNK;
  89. irec->ir_freecount =
  90. be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
  91. }
  92. irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
  93. }
  94. /* Compute the freecount of an incore inode record. */
  95. uint8_t
  96. xfs_inobt_rec_freecount(
  97. const struct xfs_inobt_rec_incore *irec)
  98. {
  99. uint64_t realfree = irec->ir_free;
  100. if (xfs_inobt_issparse(irec->ir_holemask))
  101. realfree &= xfs_inobt_irec_to_allocmask(irec);
  102. return hweight64(realfree);
  103. }
  104. /* Simple checks for inode records. */
  105. xfs_failaddr_t
  106. xfs_inobt_check_irec(
  107. struct xfs_perag *pag,
  108. const struct xfs_inobt_rec_incore *irec)
  109. {
  110. /* Record has to be properly aligned within the AG. */
  111. if (!xfs_verify_agino(pag, irec->ir_startino))
  112. return __this_address;
  113. if (!xfs_verify_agino(pag,
  114. irec->ir_startino + XFS_INODES_PER_CHUNK - 1))
  115. return __this_address;
  116. if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
  117. irec->ir_count > XFS_INODES_PER_CHUNK)
  118. return __this_address;
  119. if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
  120. return __this_address;
  121. if (xfs_inobt_rec_freecount(irec) != irec->ir_freecount)
  122. return __this_address;
  123. return NULL;
  124. }
  125. static inline int
  126. xfs_inobt_complain_bad_rec(
  127. struct xfs_btree_cur *cur,
  128. xfs_failaddr_t fa,
  129. const struct xfs_inobt_rec_incore *irec)
  130. {
  131. struct xfs_mount *mp = cur->bc_mp;
  132. xfs_warn(mp,
  133. "%sbt record corruption in AG %d detected at %pS!",
  134. cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
  135. xfs_warn(mp,
  136. "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
  137. irec->ir_startino, irec->ir_count, irec->ir_freecount,
  138. irec->ir_free, irec->ir_holemask);
  139. xfs_btree_mark_sick(cur);
  140. return -EFSCORRUPTED;
  141. }
  142. /*
  143. * Get the data from the pointed-to record.
  144. */
  145. int
  146. xfs_inobt_get_rec(
  147. struct xfs_btree_cur *cur,
  148. struct xfs_inobt_rec_incore *irec,
  149. int *stat)
  150. {
  151. struct xfs_mount *mp = cur->bc_mp;
  152. union xfs_btree_rec *rec;
  153. xfs_failaddr_t fa;
  154. int error;
  155. error = xfs_btree_get_rec(cur, &rec, stat);
  156. if (error || *stat == 0)
  157. return error;
  158. xfs_inobt_btrec_to_irec(mp, rec, irec);
  159. fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec);
  160. if (fa)
  161. return xfs_inobt_complain_bad_rec(cur, fa, irec);
  162. return 0;
  163. }
  164. /*
  165. * Insert a single inobt record. Cursor must already point to desired location.
  166. */
  167. int
  168. xfs_inobt_insert_rec(
  169. struct xfs_btree_cur *cur,
  170. uint16_t holemask,
  171. uint8_t count,
  172. int32_t freecount,
  173. xfs_inofree_t free,
  174. int *stat)
  175. {
  176. cur->bc_rec.i.ir_holemask = holemask;
  177. cur->bc_rec.i.ir_count = count;
  178. cur->bc_rec.i.ir_freecount = freecount;
  179. cur->bc_rec.i.ir_free = free;
  180. return xfs_btree_insert(cur, stat);
  181. }
  182. /*
  183. * Insert records describing a newly allocated inode chunk into the inobt.
  184. */
  185. STATIC int
  186. xfs_inobt_insert(
  187. struct xfs_perag *pag,
  188. struct xfs_trans *tp,
  189. struct xfs_buf *agbp,
  190. xfs_agino_t newino,
  191. xfs_agino_t newlen,
  192. bool is_finobt)
  193. {
  194. struct xfs_btree_cur *cur;
  195. xfs_agino_t thisino;
  196. int i;
  197. int error;
  198. if (is_finobt)
  199. cur = xfs_finobt_init_cursor(pag, tp, agbp);
  200. else
  201. cur = xfs_inobt_init_cursor(pag, tp, agbp);
  202. for (thisino = newino;
  203. thisino < newino + newlen;
  204. thisino += XFS_INODES_PER_CHUNK) {
  205. error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
  206. if (error) {
  207. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  208. return error;
  209. }
  210. ASSERT(i == 0);
  211. error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
  212. XFS_INODES_PER_CHUNK,
  213. XFS_INODES_PER_CHUNK,
  214. XFS_INOBT_ALL_FREE, &i);
  215. if (error) {
  216. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  217. return error;
  218. }
  219. ASSERT(i == 1);
  220. }
  221. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  222. return 0;
  223. }
  224. /*
  225. * Verify that the number of free inodes in the AGI is correct.
  226. */
  227. #ifdef DEBUG
  228. static int
  229. xfs_check_agi_freecount(
  230. struct xfs_btree_cur *cur)
  231. {
  232. if (cur->bc_nlevels == 1) {
  233. xfs_inobt_rec_incore_t rec;
  234. int freecount = 0;
  235. int error;
  236. int i;
  237. error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
  238. if (error)
  239. return error;
  240. do {
  241. error = xfs_inobt_get_rec(cur, &rec, &i);
  242. if (error)
  243. return error;
  244. if (i) {
  245. freecount += rec.ir_freecount;
  246. error = xfs_btree_increment(cur, 0, &i);
  247. if (error)
  248. return error;
  249. }
  250. } while (i == 1);
  251. if (!xfs_is_shutdown(cur->bc_mp))
  252. ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
  253. }
  254. return 0;
  255. }
  256. #else
  257. #define xfs_check_agi_freecount(cur) 0
  258. #endif
  259. /*
  260. * Initialise a new set of inodes. When called without a transaction context
  261. * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
  262. * than logging them (which in a transaction context puts them into the AIL
  263. * for writeback rather than the xfsbufd queue).
  264. */
  265. int
  266. xfs_ialloc_inode_init(
  267. struct xfs_mount *mp,
  268. struct xfs_trans *tp,
  269. struct list_head *buffer_list,
  270. int icount,
  271. xfs_agnumber_t agno,
  272. xfs_agblock_t agbno,
  273. xfs_agblock_t length,
  274. unsigned int gen)
  275. {
  276. struct xfs_buf *fbuf;
  277. struct xfs_dinode *free;
  278. int nbufs;
  279. int version;
  280. int i, j;
  281. xfs_daddr_t d;
  282. xfs_ino_t ino = 0;
  283. int error;
  284. /*
  285. * Loop over the new block(s), filling in the inodes. For small block
  286. * sizes, manipulate the inodes in buffers which are multiples of the
  287. * blocks size.
  288. */
  289. nbufs = length / M_IGEO(mp)->blocks_per_cluster;
  290. /*
  291. * Figure out what version number to use in the inodes we create. If
  292. * the superblock version has caught up to the one that supports the new
  293. * inode format, then use the new inode version. Otherwise use the old
  294. * version so that old kernels will continue to be able to use the file
  295. * system.
  296. *
  297. * For v3 inodes, we also need to write the inode number into the inode,
  298. * so calculate the first inode number of the chunk here as
  299. * XFS_AGB_TO_AGINO() only works within a filesystem block, not
  300. * across multiple filesystem blocks (such as a cluster) and so cannot
  301. * be used in the cluster buffer loop below.
  302. *
  303. * Further, because we are writing the inode directly into the buffer
  304. * and calculating a CRC on the entire inode, we have ot log the entire
  305. * inode so that the entire range the CRC covers is present in the log.
  306. * That means for v3 inode we log the entire buffer rather than just the
  307. * inode cores.
  308. */
  309. if (xfs_has_v3inodes(mp)) {
  310. version = 3;
  311. ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
  312. /*
  313. * log the initialisation that is about to take place as an
  314. * logical operation. This means the transaction does not
  315. * need to log the physical changes to the inode buffers as log
  316. * recovery will know what initialisation is actually needed.
  317. * Hence we only need to log the buffers as "ordered" buffers so
  318. * they track in the AIL as if they were physically logged.
  319. */
  320. if (tp)
  321. xfs_icreate_log(tp, agno, agbno, icount,
  322. mp->m_sb.sb_inodesize, length, gen);
  323. } else
  324. version = 2;
  325. for (j = 0; j < nbufs; j++) {
  326. /*
  327. * Get the block.
  328. */
  329. d = XFS_AGB_TO_DADDR(mp, agno, agbno +
  330. (j * M_IGEO(mp)->blocks_per_cluster));
  331. error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
  332. mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
  333. XBF_UNMAPPED, &fbuf);
  334. if (error)
  335. return error;
  336. /* Initialize the inode buffers and log them appropriately. */
  337. fbuf->b_ops = &xfs_inode_buf_ops;
  338. xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
  339. for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
  340. int ioffset = i << mp->m_sb.sb_inodelog;
  341. free = xfs_make_iptr(mp, fbuf, i);
  342. free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
  343. free->di_version = version;
  344. free->di_gen = cpu_to_be32(gen);
  345. free->di_next_unlinked = cpu_to_be32(NULLAGINO);
  346. if (version == 3) {
  347. free->di_ino = cpu_to_be64(ino);
  348. ino++;
  349. uuid_copy(&free->di_uuid,
  350. &mp->m_sb.sb_meta_uuid);
  351. xfs_dinode_calc_crc(mp, free);
  352. } else if (tp) {
  353. /* just log the inode core */
  354. xfs_trans_log_buf(tp, fbuf, ioffset,
  355. ioffset + XFS_DINODE_SIZE(mp) - 1);
  356. }
  357. }
  358. if (tp) {
  359. /*
  360. * Mark the buffer as an inode allocation buffer so it
  361. * sticks in AIL at the point of this allocation
  362. * transaction. This ensures the they are on disk before
  363. * the tail of the log can be moved past this
  364. * transaction (i.e. by preventing relogging from moving
  365. * it forward in the log).
  366. */
  367. xfs_trans_inode_alloc_buf(tp, fbuf);
  368. if (version == 3) {
  369. /*
  370. * Mark the buffer as ordered so that they are
  371. * not physically logged in the transaction but
  372. * still tracked in the AIL as part of the
  373. * transaction and pin the log appropriately.
  374. */
  375. xfs_trans_ordered_buf(tp, fbuf);
  376. }
  377. } else {
  378. fbuf->b_flags |= XBF_DONE;
  379. xfs_buf_delwri_queue(fbuf, buffer_list);
  380. xfs_buf_relse(fbuf);
  381. }
  382. }
  383. return 0;
  384. }
  385. /*
  386. * Align startino and allocmask for a recently allocated sparse chunk such that
  387. * they are fit for insertion (or merge) into the on-disk inode btrees.
  388. *
  389. * Background:
  390. *
  391. * When enabled, sparse inode support increases the inode alignment from cluster
  392. * size to inode chunk size. This means that the minimum range between two
  393. * non-adjacent inode records in the inobt is large enough for a full inode
  394. * record. This allows for cluster sized, cluster aligned block allocation
  395. * without need to worry about whether the resulting inode record overlaps with
  396. * another record in the tree. Without this basic rule, we would have to deal
  397. * with the consequences of overlap by potentially undoing recent allocations in
  398. * the inode allocation codepath.
  399. *
  400. * Because of this alignment rule (which is enforced on mount), there are two
  401. * inobt possibilities for newly allocated sparse chunks. One is that the
  402. * aligned inode record for the chunk covers a range of inodes not already
  403. * covered in the inobt (i.e., it is safe to insert a new sparse record). The
  404. * other is that a record already exists at the aligned startino that considers
  405. * the newly allocated range as sparse. In the latter case, record content is
  406. * merged in hope that sparse inode chunks fill to full chunks over time.
  407. */
  408. STATIC void
  409. xfs_align_sparse_ino(
  410. struct xfs_mount *mp,
  411. xfs_agino_t *startino,
  412. uint16_t *allocmask)
  413. {
  414. xfs_agblock_t agbno;
  415. xfs_agblock_t mod;
  416. int offset;
  417. agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
  418. mod = agbno % mp->m_sb.sb_inoalignmt;
  419. if (!mod)
  420. return;
  421. /* calculate the inode offset and align startino */
  422. offset = XFS_AGB_TO_AGINO(mp, mod);
  423. *startino -= offset;
  424. /*
  425. * Since startino has been aligned down, left shift allocmask such that
  426. * it continues to represent the same physical inodes relative to the
  427. * new startino.
  428. */
  429. *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
  430. }
  431. /*
  432. * Determine whether the source inode record can merge into the target. Both
  433. * records must be sparse, the inode ranges must match and there must be no
  434. * allocation overlap between the records.
  435. */
  436. STATIC bool
  437. __xfs_inobt_can_merge(
  438. struct xfs_inobt_rec_incore *trec, /* tgt record */
  439. struct xfs_inobt_rec_incore *srec) /* src record */
  440. {
  441. uint64_t talloc;
  442. uint64_t salloc;
  443. /* records must cover the same inode range */
  444. if (trec->ir_startino != srec->ir_startino)
  445. return false;
  446. /* both records must be sparse */
  447. if (!xfs_inobt_issparse(trec->ir_holemask) ||
  448. !xfs_inobt_issparse(srec->ir_holemask))
  449. return false;
  450. /* both records must track some inodes */
  451. if (!trec->ir_count || !srec->ir_count)
  452. return false;
  453. /* can't exceed capacity of a full record */
  454. if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
  455. return false;
  456. /* verify there is no allocation overlap */
  457. talloc = xfs_inobt_irec_to_allocmask(trec);
  458. salloc = xfs_inobt_irec_to_allocmask(srec);
  459. if (talloc & salloc)
  460. return false;
  461. return true;
  462. }
  463. /*
  464. * Merge the source inode record into the target. The caller must call
  465. * __xfs_inobt_can_merge() to ensure the merge is valid.
  466. */
  467. STATIC void
  468. __xfs_inobt_rec_merge(
  469. struct xfs_inobt_rec_incore *trec, /* target */
  470. struct xfs_inobt_rec_incore *srec) /* src */
  471. {
  472. ASSERT(trec->ir_startino == srec->ir_startino);
  473. /* combine the counts */
  474. trec->ir_count += srec->ir_count;
  475. trec->ir_freecount += srec->ir_freecount;
  476. /*
  477. * Merge the holemask and free mask. For both fields, 0 bits refer to
  478. * allocated inodes. We combine the allocated ranges with bitwise AND.
  479. */
  480. trec->ir_holemask &= srec->ir_holemask;
  481. trec->ir_free &= srec->ir_free;
  482. }
  483. /*
  484. * Insert a new sparse inode chunk into the associated inode allocation btree.
  485. * The inode record for the sparse chunk is pre-aligned to a startino that
  486. * should match any pre-existing sparse inode record in the tree. This allows
  487. * sparse chunks to fill over time.
  488. *
  489. * If no preexisting record exists, the provided record is inserted.
  490. * If there is a preexisting record, the provided record is merged with the
  491. * existing record and updated in place. The merged record is returned in nrec.
  492. *
  493. * It is considered corruption if a merge is requested and not possible. Given
  494. * the sparse inode alignment constraints, this should never happen.
  495. */
  496. STATIC int
  497. xfs_inobt_insert_sprec(
  498. struct xfs_perag *pag,
  499. struct xfs_trans *tp,
  500. struct xfs_buf *agbp,
  501. struct xfs_inobt_rec_incore *nrec) /* in/out: new/merged rec. */
  502. {
  503. struct xfs_mount *mp = pag->pag_mount;
  504. struct xfs_btree_cur *cur;
  505. int error;
  506. int i;
  507. struct xfs_inobt_rec_incore rec;
  508. cur = xfs_inobt_init_cursor(pag, tp, agbp);
  509. /* the new record is pre-aligned so we know where to look */
  510. error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
  511. if (error)
  512. goto error;
  513. /* if nothing there, insert a new record and return */
  514. if (i == 0) {
  515. error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
  516. nrec->ir_count, nrec->ir_freecount,
  517. nrec->ir_free, &i);
  518. if (error)
  519. goto error;
  520. if (XFS_IS_CORRUPT(mp, i != 1)) {
  521. xfs_btree_mark_sick(cur);
  522. error = -EFSCORRUPTED;
  523. goto error;
  524. }
  525. goto out;
  526. }
  527. /*
  528. * A record exists at this startino. Merge the records.
  529. */
  530. error = xfs_inobt_get_rec(cur, &rec, &i);
  531. if (error)
  532. goto error;
  533. if (XFS_IS_CORRUPT(mp, i != 1)) {
  534. xfs_btree_mark_sick(cur);
  535. error = -EFSCORRUPTED;
  536. goto error;
  537. }
  538. if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
  539. xfs_btree_mark_sick(cur);
  540. error = -EFSCORRUPTED;
  541. goto error;
  542. }
  543. /*
  544. * This should never fail. If we have coexisting records that
  545. * cannot merge, something is seriously wrong.
  546. */
  547. if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
  548. xfs_btree_mark_sick(cur);
  549. error = -EFSCORRUPTED;
  550. goto error;
  551. }
  552. trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
  553. rec.ir_holemask, nrec->ir_startino,
  554. nrec->ir_holemask);
  555. /* merge to nrec to output the updated record */
  556. __xfs_inobt_rec_merge(nrec, &rec);
  557. trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
  558. nrec->ir_holemask);
  559. error = xfs_inobt_rec_check_count(mp, nrec);
  560. if (error)
  561. goto error;
  562. error = xfs_inobt_update(cur, nrec);
  563. if (error)
  564. goto error;
  565. out:
  566. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  567. return 0;
  568. error:
  569. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  570. return error;
  571. }
  572. /*
  573. * Insert a new sparse inode chunk into the free inode btree. The inode
  574. * record for the sparse chunk is pre-aligned to a startino that should match
  575. * any pre-existing sparse inode record in the tree. This allows sparse chunks
  576. * to fill over time.
  577. *
  578. * The new record is always inserted, overwriting a pre-existing record if
  579. * there is one.
  580. */
  581. STATIC int
  582. xfs_finobt_insert_sprec(
  583. struct xfs_perag *pag,
  584. struct xfs_trans *tp,
  585. struct xfs_buf *agbp,
  586. struct xfs_inobt_rec_incore *nrec) /* in/out: new rec. */
  587. {
  588. struct xfs_mount *mp = pag->pag_mount;
  589. struct xfs_btree_cur *cur;
  590. int error;
  591. int i;
  592. cur = xfs_finobt_init_cursor(pag, tp, agbp);
  593. /* the new record is pre-aligned so we know where to look */
  594. error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
  595. if (error)
  596. goto error;
  597. /* if nothing there, insert a new record and return */
  598. if (i == 0) {
  599. error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
  600. nrec->ir_count, nrec->ir_freecount,
  601. nrec->ir_free, &i);
  602. if (error)
  603. goto error;
  604. if (XFS_IS_CORRUPT(mp, i != 1)) {
  605. xfs_btree_mark_sick(cur);
  606. error = -EFSCORRUPTED;
  607. goto error;
  608. }
  609. } else {
  610. error = xfs_inobt_update(cur, nrec);
  611. if (error)
  612. goto error;
  613. }
  614. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  615. return 0;
  616. error:
  617. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  618. return error;
  619. }
  620. /*
  621. * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
  622. * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
  623. * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
  624. * inode count threshold, or the usual negative error code for other errors.
  625. */
  626. STATIC int
  627. xfs_ialloc_ag_alloc(
  628. struct xfs_perag *pag,
  629. struct xfs_trans *tp,
  630. struct xfs_buf *agbp)
  631. {
  632. struct xfs_agi *agi;
  633. struct xfs_alloc_arg args;
  634. int error;
  635. xfs_agino_t newino; /* new first inode's number */
  636. xfs_agino_t newlen; /* new number of inodes */
  637. int isaligned = 0; /* inode allocation at stripe */
  638. /* unit boundary */
  639. /* init. to full chunk */
  640. struct xfs_inobt_rec_incore rec;
  641. struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
  642. uint16_t allocmask = (uint16_t) -1;
  643. int do_sparse = 0;
  644. memset(&args, 0, sizeof(args));
  645. args.tp = tp;
  646. args.mp = tp->t_mountp;
  647. args.fsbno = NULLFSBLOCK;
  648. args.oinfo = XFS_RMAP_OINFO_INODES;
  649. args.pag = pag;
  650. #ifdef DEBUG
  651. /* randomly do sparse inode allocations */
  652. if (xfs_has_sparseinodes(tp->t_mountp) &&
  653. igeo->ialloc_min_blks < igeo->ialloc_blks)
  654. do_sparse = get_random_u32_below(2);
  655. #endif
  656. /*
  657. * Locking will ensure that we don't have two callers in here
  658. * at one time.
  659. */
  660. newlen = igeo->ialloc_inos;
  661. if (igeo->maxicount &&
  662. percpu_counter_read_positive(&args.mp->m_icount) + newlen >
  663. igeo->maxicount)
  664. return -ENOSPC;
  665. args.minlen = args.maxlen = igeo->ialloc_blks;
  666. /*
  667. * First try to allocate inodes contiguous with the last-allocated
  668. * chunk of inodes. If the filesystem is striped, this will fill
  669. * an entire stripe unit with inodes.
  670. */
  671. agi = agbp->b_addr;
  672. newino = be32_to_cpu(agi->agi_newino);
  673. args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
  674. igeo->ialloc_blks;
  675. if (do_sparse)
  676. goto sparse_alloc;
  677. if (likely(newino != NULLAGINO &&
  678. (args.agbno < be32_to_cpu(agi->agi_length)))) {
  679. args.prod = 1;
  680. /*
  681. * We need to take into account alignment here to ensure that
  682. * we don't modify the free list if we fail to have an exact
  683. * block. If we don't have an exact match, and every oher
  684. * attempt allocation attempt fails, we'll end up cancelling
  685. * a dirty transaction and shutting down.
  686. *
  687. * For an exact allocation, alignment must be 1,
  688. * however we need to take cluster alignment into account when
  689. * fixing up the freelist. Use the minalignslop field to
  690. * indicate that extra blocks might be required for alignment,
  691. * but not to use them in the actual exact allocation.
  692. */
  693. args.alignment = 1;
  694. args.minalignslop = igeo->cluster_align - 1;
  695. /* Allow space for the inode btree to split. */
  696. args.minleft = igeo->inobt_maxlevels;
  697. error = xfs_alloc_vextent_exact_bno(&args,
  698. XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
  699. args.agbno));
  700. if (error)
  701. return error;
  702. /*
  703. * This request might have dirtied the transaction if the AG can
  704. * satisfy the request, but the exact block was not available.
  705. * If the allocation did fail, subsequent requests will relax
  706. * the exact agbno requirement and increase the alignment
  707. * instead. It is critical that the total size of the request
  708. * (len + alignment + slop) does not increase from this point
  709. * on, so reset minalignslop to ensure it is not included in
  710. * subsequent requests.
  711. */
  712. args.minalignslop = 0;
  713. }
  714. if (unlikely(args.fsbno == NULLFSBLOCK)) {
  715. /*
  716. * Set the alignment for the allocation.
  717. * If stripe alignment is turned on then align at stripe unit
  718. * boundary.
  719. * If the cluster size is smaller than a filesystem block
  720. * then we're doing I/O for inodes in filesystem block size
  721. * pieces, so don't need alignment anyway.
  722. */
  723. isaligned = 0;
  724. if (igeo->ialloc_align) {
  725. ASSERT(!xfs_has_noalign(args.mp));
  726. args.alignment = args.mp->m_dalign;
  727. isaligned = 1;
  728. } else
  729. args.alignment = igeo->cluster_align;
  730. /*
  731. * Allocate a fixed-size extent of inodes.
  732. */
  733. args.prod = 1;
  734. /*
  735. * Allow space for the inode btree to split.
  736. */
  737. args.minleft = igeo->inobt_maxlevels;
  738. error = xfs_alloc_vextent_near_bno(&args,
  739. XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
  740. be32_to_cpu(agi->agi_root)));
  741. if (error)
  742. return error;
  743. }
  744. /*
  745. * If stripe alignment is turned on, then try again with cluster
  746. * alignment.
  747. */
  748. if (isaligned && args.fsbno == NULLFSBLOCK) {
  749. args.alignment = igeo->cluster_align;
  750. error = xfs_alloc_vextent_near_bno(&args,
  751. XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
  752. be32_to_cpu(agi->agi_root)));
  753. if (error)
  754. return error;
  755. }
  756. /*
  757. * Finally, try a sparse allocation if the filesystem supports it and
  758. * the sparse allocation length is smaller than a full chunk.
  759. */
  760. if (xfs_has_sparseinodes(args.mp) &&
  761. igeo->ialloc_min_blks < igeo->ialloc_blks &&
  762. args.fsbno == NULLFSBLOCK) {
  763. sparse_alloc:
  764. args.alignment = args.mp->m_sb.sb_spino_align;
  765. args.prod = 1;
  766. args.minlen = igeo->ialloc_min_blks;
  767. args.maxlen = args.minlen;
  768. /*
  769. * The inode record will be aligned to full chunk size. We must
  770. * prevent sparse allocation from AG boundaries that result in
  771. * invalid inode records, such as records that start at agbno 0
  772. * or extend beyond the AG.
  773. *
  774. * Set min agbno to the first aligned, non-zero agbno and max to
  775. * the last aligned agbno that is at least one full chunk from
  776. * the end of the AG.
  777. */
  778. args.min_agbno = args.mp->m_sb.sb_inoalignmt;
  779. args.max_agbno = round_down(xfs_ag_block_count(args.mp,
  780. pag->pag_agno),
  781. args.mp->m_sb.sb_inoalignmt) -
  782. igeo->ialloc_blks;
  783. error = xfs_alloc_vextent_near_bno(&args,
  784. XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
  785. be32_to_cpu(agi->agi_root)));
  786. if (error)
  787. return error;
  788. newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
  789. ASSERT(newlen <= XFS_INODES_PER_CHUNK);
  790. allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
  791. }
  792. if (args.fsbno == NULLFSBLOCK)
  793. return -EAGAIN;
  794. ASSERT(args.len == args.minlen);
  795. /*
  796. * Stamp and write the inode buffers.
  797. *
  798. * Seed the new inode cluster with a random generation number. This
  799. * prevents short-term reuse of generation numbers if a chunk is
  800. * freed and then immediately reallocated. We use random numbers
  801. * rather than a linear progression to prevent the next generation
  802. * number from being easily guessable.
  803. */
  804. error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
  805. args.agbno, args.len, get_random_u32());
  806. if (error)
  807. return error;
  808. /*
  809. * Convert the results.
  810. */
  811. newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
  812. if (xfs_inobt_issparse(~allocmask)) {
  813. /*
  814. * We've allocated a sparse chunk. Align the startino and mask.
  815. */
  816. xfs_align_sparse_ino(args.mp, &newino, &allocmask);
  817. rec.ir_startino = newino;
  818. rec.ir_holemask = ~allocmask;
  819. rec.ir_count = newlen;
  820. rec.ir_freecount = newlen;
  821. rec.ir_free = XFS_INOBT_ALL_FREE;
  822. /*
  823. * Insert the sparse record into the inobt and allow for a merge
  824. * if necessary. If a merge does occur, rec is updated to the
  825. * merged record.
  826. */
  827. error = xfs_inobt_insert_sprec(pag, tp, agbp, &rec);
  828. if (error == -EFSCORRUPTED) {
  829. xfs_alert(args.mp,
  830. "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
  831. XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
  832. rec.ir_startino),
  833. rec.ir_holemask, rec.ir_count);
  834. xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
  835. }
  836. if (error)
  837. return error;
  838. /*
  839. * We can't merge the part we've just allocated as for the inobt
  840. * due to finobt semantics. The original record may or may not
  841. * exist independent of whether physical inodes exist in this
  842. * sparse chunk.
  843. *
  844. * We must update the finobt record based on the inobt record.
  845. * rec contains the fully merged and up to date inobt record
  846. * from the previous call. Set merge false to replace any
  847. * existing record with this one.
  848. */
  849. if (xfs_has_finobt(args.mp)) {
  850. error = xfs_finobt_insert_sprec(pag, tp, agbp, &rec);
  851. if (error)
  852. return error;
  853. }
  854. } else {
  855. /* full chunk - insert new records to both btrees */
  856. error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, false);
  857. if (error)
  858. return error;
  859. if (xfs_has_finobt(args.mp)) {
  860. error = xfs_inobt_insert(pag, tp, agbp, newino,
  861. newlen, true);
  862. if (error)
  863. return error;
  864. }
  865. }
  866. /*
  867. * Update AGI counts and newino.
  868. */
  869. be32_add_cpu(&agi->agi_count, newlen);
  870. be32_add_cpu(&agi->agi_freecount, newlen);
  871. pag->pagi_freecount += newlen;
  872. pag->pagi_count += newlen;
  873. agi->agi_newino = cpu_to_be32(newino);
  874. /*
  875. * Log allocation group header fields
  876. */
  877. xfs_ialloc_log_agi(tp, agbp,
  878. XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
  879. /*
  880. * Modify/log superblock values for inode count and inode free count.
  881. */
  882. xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
  883. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
  884. return 0;
  885. }
  886. /*
  887. * Try to retrieve the next record to the left/right from the current one.
  888. */
  889. STATIC int
  890. xfs_ialloc_next_rec(
  891. struct xfs_btree_cur *cur,
  892. xfs_inobt_rec_incore_t *rec,
  893. int *done,
  894. int left)
  895. {
  896. int error;
  897. int i;
  898. if (left)
  899. error = xfs_btree_decrement(cur, 0, &i);
  900. else
  901. error = xfs_btree_increment(cur, 0, &i);
  902. if (error)
  903. return error;
  904. *done = !i;
  905. if (i) {
  906. error = xfs_inobt_get_rec(cur, rec, &i);
  907. if (error)
  908. return error;
  909. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  910. xfs_btree_mark_sick(cur);
  911. return -EFSCORRUPTED;
  912. }
  913. }
  914. return 0;
  915. }
  916. STATIC int
  917. xfs_ialloc_get_rec(
  918. struct xfs_btree_cur *cur,
  919. xfs_agino_t agino,
  920. xfs_inobt_rec_incore_t *rec,
  921. int *done)
  922. {
  923. int error;
  924. int i;
  925. error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
  926. if (error)
  927. return error;
  928. *done = !i;
  929. if (i) {
  930. error = xfs_inobt_get_rec(cur, rec, &i);
  931. if (error)
  932. return error;
  933. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  934. xfs_btree_mark_sick(cur);
  935. return -EFSCORRUPTED;
  936. }
  937. }
  938. return 0;
  939. }
  940. /*
  941. * Return the offset of the first free inode in the record. If the inode chunk
  942. * is sparsely allocated, we convert the record holemask to inode granularity
  943. * and mask off the unallocated regions from the inode free mask.
  944. */
  945. STATIC int
  946. xfs_inobt_first_free_inode(
  947. struct xfs_inobt_rec_incore *rec)
  948. {
  949. xfs_inofree_t realfree;
  950. /* if there are no holes, return the first available offset */
  951. if (!xfs_inobt_issparse(rec->ir_holemask))
  952. return xfs_lowbit64(rec->ir_free);
  953. realfree = xfs_inobt_irec_to_allocmask(rec);
  954. realfree &= rec->ir_free;
  955. return xfs_lowbit64(realfree);
  956. }
  957. /*
  958. * If this AG has corrupt inodes, check if allocating this inode would fail
  959. * with corruption errors. Returns 0 if we're clear, or EAGAIN to try again
  960. * somewhere else.
  961. */
  962. static int
  963. xfs_dialloc_check_ino(
  964. struct xfs_perag *pag,
  965. struct xfs_trans *tp,
  966. xfs_ino_t ino)
  967. {
  968. struct xfs_imap imap;
  969. struct xfs_buf *bp;
  970. int error;
  971. error = xfs_imap(pag, tp, ino, &imap, 0);
  972. if (error)
  973. return -EAGAIN;
  974. error = xfs_imap_to_bp(pag->pag_mount, tp, &imap, &bp);
  975. if (error)
  976. return -EAGAIN;
  977. xfs_trans_brelse(tp, bp);
  978. return 0;
  979. }
  980. /*
  981. * Allocate an inode using the inobt-only algorithm.
  982. */
  983. STATIC int
  984. xfs_dialloc_ag_inobt(
  985. struct xfs_perag *pag,
  986. struct xfs_trans *tp,
  987. struct xfs_buf *agbp,
  988. xfs_ino_t parent,
  989. xfs_ino_t *inop)
  990. {
  991. struct xfs_mount *mp = tp->t_mountp;
  992. struct xfs_agi *agi = agbp->b_addr;
  993. xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
  994. xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
  995. struct xfs_btree_cur *cur, *tcur;
  996. struct xfs_inobt_rec_incore rec, trec;
  997. xfs_ino_t ino;
  998. int error;
  999. int offset;
  1000. int i, j;
  1001. int searchdistance = 10;
  1002. ASSERT(xfs_perag_initialised_agi(pag));
  1003. ASSERT(xfs_perag_allows_inodes(pag));
  1004. ASSERT(pag->pagi_freecount > 0);
  1005. restart_pagno:
  1006. cur = xfs_inobt_init_cursor(pag, tp, agbp);
  1007. /*
  1008. * If pagino is 0 (this is the root inode allocation) use newino.
  1009. * This must work because we've just allocated some.
  1010. */
  1011. if (!pagino)
  1012. pagino = be32_to_cpu(agi->agi_newino);
  1013. error = xfs_check_agi_freecount(cur);
  1014. if (error)
  1015. goto error0;
  1016. /*
  1017. * If in the same AG as the parent, try to get near the parent.
  1018. */
  1019. if (pagno == pag->pag_agno) {
  1020. int doneleft; /* done, to the left */
  1021. int doneright; /* done, to the right */
  1022. error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
  1023. if (error)
  1024. goto error0;
  1025. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1026. xfs_btree_mark_sick(cur);
  1027. error = -EFSCORRUPTED;
  1028. goto error0;
  1029. }
  1030. error = xfs_inobt_get_rec(cur, &rec, &j);
  1031. if (error)
  1032. goto error0;
  1033. if (XFS_IS_CORRUPT(mp, j != 1)) {
  1034. xfs_btree_mark_sick(cur);
  1035. error = -EFSCORRUPTED;
  1036. goto error0;
  1037. }
  1038. if (rec.ir_freecount > 0) {
  1039. /*
  1040. * Found a free inode in the same chunk
  1041. * as the parent, done.
  1042. */
  1043. goto alloc_inode;
  1044. }
  1045. /*
  1046. * In the same AG as parent, but parent's chunk is full.
  1047. */
  1048. /* duplicate the cursor, search left & right simultaneously */
  1049. error = xfs_btree_dup_cursor(cur, &tcur);
  1050. if (error)
  1051. goto error0;
  1052. /*
  1053. * Skip to last blocks looked up if same parent inode.
  1054. */
  1055. if (pagino != NULLAGINO &&
  1056. pag->pagl_pagino == pagino &&
  1057. pag->pagl_leftrec != NULLAGINO &&
  1058. pag->pagl_rightrec != NULLAGINO) {
  1059. error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
  1060. &trec, &doneleft);
  1061. if (error)
  1062. goto error1;
  1063. error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
  1064. &rec, &doneright);
  1065. if (error)
  1066. goto error1;
  1067. } else {
  1068. /* search left with tcur, back up 1 record */
  1069. error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
  1070. if (error)
  1071. goto error1;
  1072. /* search right with cur, go forward 1 record. */
  1073. error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
  1074. if (error)
  1075. goto error1;
  1076. }
  1077. /*
  1078. * Loop until we find an inode chunk with a free inode.
  1079. */
  1080. while (--searchdistance > 0 && (!doneleft || !doneright)) {
  1081. int useleft; /* using left inode chunk this time */
  1082. /* figure out the closer block if both are valid. */
  1083. if (!doneleft && !doneright) {
  1084. useleft = pagino -
  1085. (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
  1086. rec.ir_startino - pagino;
  1087. } else {
  1088. useleft = !doneleft;
  1089. }
  1090. /* free inodes to the left? */
  1091. if (useleft && trec.ir_freecount) {
  1092. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1093. cur = tcur;
  1094. pag->pagl_leftrec = trec.ir_startino;
  1095. pag->pagl_rightrec = rec.ir_startino;
  1096. pag->pagl_pagino = pagino;
  1097. rec = trec;
  1098. goto alloc_inode;
  1099. }
  1100. /* free inodes to the right? */
  1101. if (!useleft && rec.ir_freecount) {
  1102. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  1103. pag->pagl_leftrec = trec.ir_startino;
  1104. pag->pagl_rightrec = rec.ir_startino;
  1105. pag->pagl_pagino = pagino;
  1106. goto alloc_inode;
  1107. }
  1108. /* get next record to check */
  1109. if (useleft) {
  1110. error = xfs_ialloc_next_rec(tcur, &trec,
  1111. &doneleft, 1);
  1112. } else {
  1113. error = xfs_ialloc_next_rec(cur, &rec,
  1114. &doneright, 0);
  1115. }
  1116. if (error)
  1117. goto error1;
  1118. }
  1119. if (searchdistance <= 0) {
  1120. /*
  1121. * Not in range - save last search
  1122. * location and allocate a new inode
  1123. */
  1124. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  1125. pag->pagl_leftrec = trec.ir_startino;
  1126. pag->pagl_rightrec = rec.ir_startino;
  1127. pag->pagl_pagino = pagino;
  1128. } else {
  1129. /*
  1130. * We've reached the end of the btree. because
  1131. * we are only searching a small chunk of the
  1132. * btree each search, there is obviously free
  1133. * inodes closer to the parent inode than we
  1134. * are now. restart the search again.
  1135. */
  1136. pag->pagl_pagino = NULLAGINO;
  1137. pag->pagl_leftrec = NULLAGINO;
  1138. pag->pagl_rightrec = NULLAGINO;
  1139. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  1140. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1141. goto restart_pagno;
  1142. }
  1143. }
  1144. /*
  1145. * In a different AG from the parent.
  1146. * See if the most recently allocated block has any free.
  1147. */
  1148. if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
  1149. error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
  1150. XFS_LOOKUP_EQ, &i);
  1151. if (error)
  1152. goto error0;
  1153. if (i == 1) {
  1154. error = xfs_inobt_get_rec(cur, &rec, &j);
  1155. if (error)
  1156. goto error0;
  1157. if (j == 1 && rec.ir_freecount > 0) {
  1158. /*
  1159. * The last chunk allocated in the group
  1160. * still has a free inode.
  1161. */
  1162. goto alloc_inode;
  1163. }
  1164. }
  1165. }
  1166. /*
  1167. * None left in the last group, search the whole AG
  1168. */
  1169. error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
  1170. if (error)
  1171. goto error0;
  1172. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1173. xfs_btree_mark_sick(cur);
  1174. error = -EFSCORRUPTED;
  1175. goto error0;
  1176. }
  1177. for (;;) {
  1178. error = xfs_inobt_get_rec(cur, &rec, &i);
  1179. if (error)
  1180. goto error0;
  1181. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1182. xfs_btree_mark_sick(cur);
  1183. error = -EFSCORRUPTED;
  1184. goto error0;
  1185. }
  1186. if (rec.ir_freecount > 0)
  1187. break;
  1188. error = xfs_btree_increment(cur, 0, &i);
  1189. if (error)
  1190. goto error0;
  1191. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1192. xfs_btree_mark_sick(cur);
  1193. error = -EFSCORRUPTED;
  1194. goto error0;
  1195. }
  1196. }
  1197. alloc_inode:
  1198. offset = xfs_inobt_first_free_inode(&rec);
  1199. ASSERT(offset >= 0);
  1200. ASSERT(offset < XFS_INODES_PER_CHUNK);
  1201. ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
  1202. XFS_INODES_PER_CHUNK) == 0);
  1203. ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
  1204. if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
  1205. error = xfs_dialloc_check_ino(pag, tp, ino);
  1206. if (error)
  1207. goto error0;
  1208. }
  1209. rec.ir_free &= ~XFS_INOBT_MASK(offset);
  1210. rec.ir_freecount--;
  1211. error = xfs_inobt_update(cur, &rec);
  1212. if (error)
  1213. goto error0;
  1214. be32_add_cpu(&agi->agi_freecount, -1);
  1215. xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
  1216. pag->pagi_freecount--;
  1217. error = xfs_check_agi_freecount(cur);
  1218. if (error)
  1219. goto error0;
  1220. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1221. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
  1222. *inop = ino;
  1223. return 0;
  1224. error1:
  1225. xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
  1226. error0:
  1227. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  1228. return error;
  1229. }
  1230. /*
  1231. * Use the free inode btree to allocate an inode based on distance from the
  1232. * parent. Note that the provided cursor may be deleted and replaced.
  1233. */
  1234. STATIC int
  1235. xfs_dialloc_ag_finobt_near(
  1236. xfs_agino_t pagino,
  1237. struct xfs_btree_cur **ocur,
  1238. struct xfs_inobt_rec_incore *rec)
  1239. {
  1240. struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
  1241. struct xfs_btree_cur *rcur; /* right search cursor */
  1242. struct xfs_inobt_rec_incore rrec;
  1243. int error;
  1244. int i, j;
  1245. error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
  1246. if (error)
  1247. return error;
  1248. if (i == 1) {
  1249. error = xfs_inobt_get_rec(lcur, rec, &i);
  1250. if (error)
  1251. return error;
  1252. if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) {
  1253. xfs_btree_mark_sick(lcur);
  1254. return -EFSCORRUPTED;
  1255. }
  1256. /*
  1257. * See if we've landed in the parent inode record. The finobt
  1258. * only tracks chunks with at least one free inode, so record
  1259. * existence is enough.
  1260. */
  1261. if (pagino >= rec->ir_startino &&
  1262. pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
  1263. return 0;
  1264. }
  1265. error = xfs_btree_dup_cursor(lcur, &rcur);
  1266. if (error)
  1267. return error;
  1268. error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
  1269. if (error)
  1270. goto error_rcur;
  1271. if (j == 1) {
  1272. error = xfs_inobt_get_rec(rcur, &rrec, &j);
  1273. if (error)
  1274. goto error_rcur;
  1275. if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
  1276. xfs_btree_mark_sick(lcur);
  1277. error = -EFSCORRUPTED;
  1278. goto error_rcur;
  1279. }
  1280. }
  1281. if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
  1282. xfs_btree_mark_sick(lcur);
  1283. error = -EFSCORRUPTED;
  1284. goto error_rcur;
  1285. }
  1286. if (i == 1 && j == 1) {
  1287. /*
  1288. * Both the left and right records are valid. Choose the closer
  1289. * inode chunk to the target.
  1290. */
  1291. if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
  1292. (rrec.ir_startino - pagino)) {
  1293. *rec = rrec;
  1294. xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
  1295. *ocur = rcur;
  1296. } else {
  1297. xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
  1298. }
  1299. } else if (j == 1) {
  1300. /* only the right record is valid */
  1301. *rec = rrec;
  1302. xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
  1303. *ocur = rcur;
  1304. } else if (i == 1) {
  1305. /* only the left record is valid */
  1306. xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
  1307. }
  1308. return 0;
  1309. error_rcur:
  1310. xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
  1311. return error;
  1312. }
  1313. /*
  1314. * Use the free inode btree to find a free inode based on a newino hint. If
  1315. * the hint is NULL, find the first free inode in the AG.
  1316. */
  1317. STATIC int
  1318. xfs_dialloc_ag_finobt_newino(
  1319. struct xfs_agi *agi,
  1320. struct xfs_btree_cur *cur,
  1321. struct xfs_inobt_rec_incore *rec)
  1322. {
  1323. int error;
  1324. int i;
  1325. if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
  1326. error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
  1327. XFS_LOOKUP_EQ, &i);
  1328. if (error)
  1329. return error;
  1330. if (i == 1) {
  1331. error = xfs_inobt_get_rec(cur, rec, &i);
  1332. if (error)
  1333. return error;
  1334. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1335. xfs_btree_mark_sick(cur);
  1336. return -EFSCORRUPTED;
  1337. }
  1338. return 0;
  1339. }
  1340. }
  1341. /*
  1342. * Find the first inode available in the AG.
  1343. */
  1344. error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
  1345. if (error)
  1346. return error;
  1347. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1348. xfs_btree_mark_sick(cur);
  1349. return -EFSCORRUPTED;
  1350. }
  1351. error = xfs_inobt_get_rec(cur, rec, &i);
  1352. if (error)
  1353. return error;
  1354. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1355. xfs_btree_mark_sick(cur);
  1356. return -EFSCORRUPTED;
  1357. }
  1358. return 0;
  1359. }
  1360. /*
  1361. * Update the inobt based on a modification made to the finobt. Also ensure that
  1362. * the records from both trees are equivalent post-modification.
  1363. */
  1364. STATIC int
  1365. xfs_dialloc_ag_update_inobt(
  1366. struct xfs_btree_cur *cur, /* inobt cursor */
  1367. struct xfs_inobt_rec_incore *frec, /* finobt record */
  1368. int offset) /* inode offset */
  1369. {
  1370. struct xfs_inobt_rec_incore rec;
  1371. int error;
  1372. int i;
  1373. error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
  1374. if (error)
  1375. return error;
  1376. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1377. xfs_btree_mark_sick(cur);
  1378. return -EFSCORRUPTED;
  1379. }
  1380. error = xfs_inobt_get_rec(cur, &rec, &i);
  1381. if (error)
  1382. return error;
  1383. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1384. xfs_btree_mark_sick(cur);
  1385. return -EFSCORRUPTED;
  1386. }
  1387. ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
  1388. XFS_INODES_PER_CHUNK) == 0);
  1389. rec.ir_free &= ~XFS_INOBT_MASK(offset);
  1390. rec.ir_freecount--;
  1391. if (XFS_IS_CORRUPT(cur->bc_mp,
  1392. rec.ir_free != frec->ir_free ||
  1393. rec.ir_freecount != frec->ir_freecount)) {
  1394. xfs_btree_mark_sick(cur);
  1395. return -EFSCORRUPTED;
  1396. }
  1397. return xfs_inobt_update(cur, &rec);
  1398. }
  1399. /*
  1400. * Allocate an inode using the free inode btree, if available. Otherwise, fall
  1401. * back to the inobt search algorithm.
  1402. *
  1403. * The caller selected an AG for us, and made sure that free inodes are
  1404. * available.
  1405. */
  1406. static int
  1407. xfs_dialloc_ag(
  1408. struct xfs_perag *pag,
  1409. struct xfs_trans *tp,
  1410. struct xfs_buf *agbp,
  1411. xfs_ino_t parent,
  1412. xfs_ino_t *inop)
  1413. {
  1414. struct xfs_mount *mp = tp->t_mountp;
  1415. struct xfs_agi *agi = agbp->b_addr;
  1416. xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
  1417. xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
  1418. struct xfs_btree_cur *cur; /* finobt cursor */
  1419. struct xfs_btree_cur *icur; /* inobt cursor */
  1420. struct xfs_inobt_rec_incore rec;
  1421. xfs_ino_t ino;
  1422. int error;
  1423. int offset;
  1424. int i;
  1425. if (!xfs_has_finobt(mp))
  1426. return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop);
  1427. /*
  1428. * If pagino is 0 (this is the root inode allocation) use newino.
  1429. * This must work because we've just allocated some.
  1430. */
  1431. if (!pagino)
  1432. pagino = be32_to_cpu(agi->agi_newino);
  1433. cur = xfs_finobt_init_cursor(pag, tp, agbp);
  1434. error = xfs_check_agi_freecount(cur);
  1435. if (error)
  1436. goto error_cur;
  1437. /*
  1438. * The search algorithm depends on whether we're in the same AG as the
  1439. * parent. If so, find the closest available inode to the parent. If
  1440. * not, consider the agi hint or find the first free inode in the AG.
  1441. */
  1442. if (pag->pag_agno == pagno)
  1443. error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
  1444. else
  1445. error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
  1446. if (error)
  1447. goto error_cur;
  1448. offset = xfs_inobt_first_free_inode(&rec);
  1449. ASSERT(offset >= 0);
  1450. ASSERT(offset < XFS_INODES_PER_CHUNK);
  1451. ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
  1452. XFS_INODES_PER_CHUNK) == 0);
  1453. ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
  1454. if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
  1455. error = xfs_dialloc_check_ino(pag, tp, ino);
  1456. if (error)
  1457. goto error_cur;
  1458. }
  1459. /*
  1460. * Modify or remove the finobt record.
  1461. */
  1462. rec.ir_free &= ~XFS_INOBT_MASK(offset);
  1463. rec.ir_freecount--;
  1464. if (rec.ir_freecount)
  1465. error = xfs_inobt_update(cur, &rec);
  1466. else
  1467. error = xfs_btree_delete(cur, &i);
  1468. if (error)
  1469. goto error_cur;
  1470. /*
  1471. * The finobt has now been updated appropriately. We haven't updated the
  1472. * agi and superblock yet, so we can create an inobt cursor and validate
  1473. * the original freecount. If all is well, make the equivalent update to
  1474. * the inobt using the finobt record and offset information.
  1475. */
  1476. icur = xfs_inobt_init_cursor(pag, tp, agbp);
  1477. error = xfs_check_agi_freecount(icur);
  1478. if (error)
  1479. goto error_icur;
  1480. error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
  1481. if (error)
  1482. goto error_icur;
  1483. /*
  1484. * Both trees have now been updated. We must update the perag and
  1485. * superblock before we can check the freecount for each btree.
  1486. */
  1487. be32_add_cpu(&agi->agi_freecount, -1);
  1488. xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
  1489. pag->pagi_freecount--;
  1490. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
  1491. error = xfs_check_agi_freecount(icur);
  1492. if (error)
  1493. goto error_icur;
  1494. error = xfs_check_agi_freecount(cur);
  1495. if (error)
  1496. goto error_icur;
  1497. xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
  1498. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1499. *inop = ino;
  1500. return 0;
  1501. error_icur:
  1502. xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
  1503. error_cur:
  1504. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  1505. return error;
  1506. }
  1507. static int
  1508. xfs_dialloc_roll(
  1509. struct xfs_trans **tpp,
  1510. struct xfs_buf *agibp)
  1511. {
  1512. struct xfs_trans *tp = *tpp;
  1513. struct xfs_dquot_acct *dqinfo;
  1514. int error;
  1515. /*
  1516. * Hold to on to the agibp across the commit so no other allocation can
  1517. * come in and take the free inodes we just allocated for our caller.
  1518. */
  1519. xfs_trans_bhold(tp, agibp);
  1520. /*
  1521. * We want the quota changes to be associated with the next transaction,
  1522. * NOT this one. So, detach the dqinfo from this and attach it to the
  1523. * next transaction.
  1524. */
  1525. dqinfo = tp->t_dqinfo;
  1526. tp->t_dqinfo = NULL;
  1527. error = xfs_trans_roll(&tp);
  1528. /* Re-attach the quota info that we detached from prev trx. */
  1529. tp->t_dqinfo = dqinfo;
  1530. /*
  1531. * Join the buffer even on commit error so that the buffer is released
  1532. * when the caller cancels the transaction and doesn't have to handle
  1533. * this error case specially.
  1534. */
  1535. xfs_trans_bjoin(tp, agibp);
  1536. *tpp = tp;
  1537. return error;
  1538. }
  1539. static bool
  1540. xfs_dialloc_good_ag(
  1541. struct xfs_perag *pag,
  1542. struct xfs_trans *tp,
  1543. umode_t mode,
  1544. int flags,
  1545. bool ok_alloc)
  1546. {
  1547. struct xfs_mount *mp = tp->t_mountp;
  1548. xfs_extlen_t ineed;
  1549. xfs_extlen_t longest = 0;
  1550. int needspace;
  1551. int error;
  1552. if (!pag)
  1553. return false;
  1554. if (!xfs_perag_allows_inodes(pag))
  1555. return false;
  1556. if (!xfs_perag_initialised_agi(pag)) {
  1557. error = xfs_ialloc_read_agi(pag, tp, 0, NULL);
  1558. if (error)
  1559. return false;
  1560. }
  1561. if (pag->pagi_freecount)
  1562. return true;
  1563. if (!ok_alloc)
  1564. return false;
  1565. if (!xfs_perag_initialised_agf(pag)) {
  1566. error = xfs_alloc_read_agf(pag, tp, flags, NULL);
  1567. if (error)
  1568. return false;
  1569. }
  1570. /*
  1571. * Check that there is enough free space for the file plus a chunk of
  1572. * inodes if we need to allocate some. If this is the first pass across
  1573. * the AGs, take into account the potential space needed for alignment
  1574. * of inode chunks when checking the longest contiguous free space in
  1575. * the AG - this prevents us from getting ENOSPC because we have free
  1576. * space larger than ialloc_blks but alignment constraints prevent us
  1577. * from using it.
  1578. *
  1579. * If we can't find an AG with space for full alignment slack to be
  1580. * taken into account, we must be near ENOSPC in all AGs. Hence we
  1581. * don't include alignment for the second pass and so if we fail
  1582. * allocation due to alignment issues then it is most likely a real
  1583. * ENOSPC condition.
  1584. *
  1585. * XXX(dgc): this calculation is now bogus thanks to the per-ag
  1586. * reservations that xfs_alloc_fix_freelist() now does via
  1587. * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
  1588. * be more than large enough for the check below to succeed, but
  1589. * xfs_alloc_space_available() will fail because of the non-zero
  1590. * metadata reservation and hence we won't actually be able to allocate
  1591. * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
  1592. * because of this.
  1593. */
  1594. ineed = M_IGEO(mp)->ialloc_min_blks;
  1595. if (flags && ineed > 1)
  1596. ineed += M_IGEO(mp)->cluster_align;
  1597. longest = pag->pagf_longest;
  1598. if (!longest)
  1599. longest = pag->pagf_flcount > 0;
  1600. needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
  1601. if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
  1602. return false;
  1603. return true;
  1604. }
  1605. static int
  1606. xfs_dialloc_try_ag(
  1607. struct xfs_perag *pag,
  1608. struct xfs_trans **tpp,
  1609. xfs_ino_t parent,
  1610. xfs_ino_t *new_ino,
  1611. bool ok_alloc)
  1612. {
  1613. struct xfs_buf *agbp;
  1614. xfs_ino_t ino;
  1615. int error;
  1616. /*
  1617. * Then read in the AGI buffer and recheck with the AGI buffer
  1618. * lock held.
  1619. */
  1620. error = xfs_ialloc_read_agi(pag, *tpp, 0, &agbp);
  1621. if (error)
  1622. return error;
  1623. if (!pag->pagi_freecount) {
  1624. if (!ok_alloc) {
  1625. error = -EAGAIN;
  1626. goto out_release;
  1627. }
  1628. error = xfs_ialloc_ag_alloc(pag, *tpp, agbp);
  1629. if (error < 0)
  1630. goto out_release;
  1631. /*
  1632. * We successfully allocated space for an inode cluster in this
  1633. * AG. Roll the transaction so that we can allocate one of the
  1634. * new inodes.
  1635. */
  1636. ASSERT(pag->pagi_freecount > 0);
  1637. error = xfs_dialloc_roll(tpp, agbp);
  1638. if (error)
  1639. goto out_release;
  1640. }
  1641. /* Allocate an inode in the found AG */
  1642. error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino);
  1643. if (!error)
  1644. *new_ino = ino;
  1645. return error;
  1646. out_release:
  1647. xfs_trans_brelse(*tpp, agbp);
  1648. return error;
  1649. }
  1650. /*
  1651. * Allocate an on-disk inode.
  1652. *
  1653. * Mode is used to tell whether the new inode is a directory and hence where to
  1654. * locate it. The on-disk inode that is allocated will be returned in @new_ino
  1655. * on success, otherwise an error will be set to indicate the failure (e.g.
  1656. * -ENOSPC).
  1657. */
  1658. int
  1659. xfs_dialloc(
  1660. struct xfs_trans **tpp,
  1661. const struct xfs_icreate_args *args,
  1662. xfs_ino_t *new_ino)
  1663. {
  1664. struct xfs_mount *mp = (*tpp)->t_mountp;
  1665. xfs_ino_t parent = args->pip ? args->pip->i_ino : 0;
  1666. umode_t mode = args->mode & S_IFMT;
  1667. xfs_agnumber_t agno;
  1668. int error = 0;
  1669. xfs_agnumber_t start_agno;
  1670. struct xfs_perag *pag;
  1671. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  1672. bool ok_alloc = true;
  1673. bool low_space = false;
  1674. int flags;
  1675. xfs_ino_t ino = NULLFSINO;
  1676. /*
  1677. * Directories, symlinks, and regular files frequently allocate at least
  1678. * one block, so factor that potential expansion when we examine whether
  1679. * an AG has enough space for file creation.
  1680. */
  1681. if (S_ISDIR(mode))
  1682. start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) %
  1683. mp->m_maxagi;
  1684. else {
  1685. start_agno = XFS_INO_TO_AGNO(mp, parent);
  1686. if (start_agno >= mp->m_maxagi)
  1687. start_agno = 0;
  1688. }
  1689. /*
  1690. * If we have already hit the ceiling of inode blocks then clear
  1691. * ok_alloc so we scan all available agi structures for a free
  1692. * inode.
  1693. *
  1694. * Read rough value of mp->m_icount by percpu_counter_read_positive,
  1695. * which will sacrifice the preciseness but improve the performance.
  1696. */
  1697. if (igeo->maxicount &&
  1698. percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
  1699. > igeo->maxicount) {
  1700. ok_alloc = false;
  1701. }
  1702. /*
  1703. * If we are near to ENOSPC, we want to prefer allocation from AGs that
  1704. * have free inodes in them rather than use up free space allocating new
  1705. * inode chunks. Hence we turn off allocation for the first non-blocking
  1706. * pass through the AGs if we are near ENOSPC to consume free inodes
  1707. * that we can immediately allocate, but then we allow allocation on the
  1708. * second pass if we fail to find an AG with free inodes in it.
  1709. */
  1710. if (percpu_counter_read_positive(&mp->m_fdblocks) <
  1711. mp->m_low_space[XFS_LOWSP_1_PCNT]) {
  1712. ok_alloc = false;
  1713. low_space = true;
  1714. }
  1715. /*
  1716. * Loop until we find an allocation group that either has free inodes
  1717. * or in which we can allocate some inodes. Iterate through the
  1718. * allocation groups upward, wrapping at the end.
  1719. */
  1720. flags = XFS_ALLOC_FLAG_TRYLOCK;
  1721. retry:
  1722. for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) {
  1723. if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) {
  1724. error = xfs_dialloc_try_ag(pag, tpp, parent,
  1725. &ino, ok_alloc);
  1726. if (error != -EAGAIN)
  1727. break;
  1728. error = 0;
  1729. }
  1730. if (xfs_is_shutdown(mp)) {
  1731. error = -EFSCORRUPTED;
  1732. break;
  1733. }
  1734. }
  1735. if (pag)
  1736. xfs_perag_rele(pag);
  1737. if (error)
  1738. return error;
  1739. if (ino == NULLFSINO) {
  1740. if (flags) {
  1741. flags = 0;
  1742. if (low_space)
  1743. ok_alloc = true;
  1744. goto retry;
  1745. }
  1746. return -ENOSPC;
  1747. }
  1748. /*
  1749. * Protect against obviously corrupt allocation btree records. Later
  1750. * xfs_iget checks will catch re-allocation of other active in-memory
  1751. * and on-disk inodes. If we don't catch reallocating the parent inode
  1752. * here we will deadlock in xfs_iget() so we have to do these checks
  1753. * first.
  1754. */
  1755. if (ino == parent || !xfs_verify_dir_ino(mp, ino)) {
  1756. xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
  1757. xfs_agno_mark_sick(mp, XFS_INO_TO_AGNO(mp, ino),
  1758. XFS_SICK_AG_INOBT);
  1759. return -EFSCORRUPTED;
  1760. }
  1761. *new_ino = ino;
  1762. return 0;
  1763. }
  1764. /*
  1765. * Free the blocks of an inode chunk. We must consider that the inode chunk
  1766. * might be sparse and only free the regions that are allocated as part of the
  1767. * chunk.
  1768. */
  1769. static int
  1770. xfs_difree_inode_chunk(
  1771. struct xfs_trans *tp,
  1772. xfs_agnumber_t agno,
  1773. struct xfs_inobt_rec_incore *rec)
  1774. {
  1775. struct xfs_mount *mp = tp->t_mountp;
  1776. xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
  1777. rec->ir_startino);
  1778. int startidx, endidx;
  1779. int nextbit;
  1780. xfs_agblock_t agbno;
  1781. int contigblk;
  1782. DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
  1783. if (!xfs_inobt_issparse(rec->ir_holemask)) {
  1784. /* not sparse, calculate extent info directly */
  1785. return xfs_free_extent_later(tp,
  1786. XFS_AGB_TO_FSB(mp, agno, sagbno),
  1787. M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
  1788. XFS_AG_RESV_NONE, 0);
  1789. }
  1790. /* holemask is only 16-bits (fits in an unsigned long) */
  1791. ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
  1792. holemask[0] = rec->ir_holemask;
  1793. /*
  1794. * Find contiguous ranges of zeroes (i.e., allocated regions) in the
  1795. * holemask and convert the start/end index of each range to an extent.
  1796. * We start with the start and end index both pointing at the first 0 in
  1797. * the mask.
  1798. */
  1799. startidx = endidx = find_first_zero_bit(holemask,
  1800. XFS_INOBT_HOLEMASK_BITS);
  1801. nextbit = startidx + 1;
  1802. while (startidx < XFS_INOBT_HOLEMASK_BITS) {
  1803. int error;
  1804. nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
  1805. nextbit);
  1806. /*
  1807. * If the next zero bit is contiguous, update the end index of
  1808. * the current range and continue.
  1809. */
  1810. if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
  1811. nextbit == endidx + 1) {
  1812. endidx = nextbit;
  1813. goto next;
  1814. }
  1815. /*
  1816. * nextbit is not contiguous with the current end index. Convert
  1817. * the current start/end to an extent and add it to the free
  1818. * list.
  1819. */
  1820. agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
  1821. mp->m_sb.sb_inopblock;
  1822. contigblk = ((endidx - startidx + 1) *
  1823. XFS_INODES_PER_HOLEMASK_BIT) /
  1824. mp->m_sb.sb_inopblock;
  1825. ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
  1826. ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
  1827. error = xfs_free_extent_later(tp,
  1828. XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
  1829. &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE, 0);
  1830. if (error)
  1831. return error;
  1832. /* reset range to current bit and carry on... */
  1833. startidx = endidx = nextbit;
  1834. next:
  1835. nextbit++;
  1836. }
  1837. return 0;
  1838. }
  1839. STATIC int
  1840. xfs_difree_inobt(
  1841. struct xfs_perag *pag,
  1842. struct xfs_trans *tp,
  1843. struct xfs_buf *agbp,
  1844. xfs_agino_t agino,
  1845. struct xfs_icluster *xic,
  1846. struct xfs_inobt_rec_incore *orec)
  1847. {
  1848. struct xfs_mount *mp = pag->pag_mount;
  1849. struct xfs_agi *agi = agbp->b_addr;
  1850. struct xfs_btree_cur *cur;
  1851. struct xfs_inobt_rec_incore rec;
  1852. int ilen;
  1853. int error;
  1854. int i;
  1855. int off;
  1856. ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
  1857. ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
  1858. /*
  1859. * Initialize the cursor.
  1860. */
  1861. cur = xfs_inobt_init_cursor(pag, tp, agbp);
  1862. error = xfs_check_agi_freecount(cur);
  1863. if (error)
  1864. goto error0;
  1865. /*
  1866. * Look for the entry describing this inode.
  1867. */
  1868. if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
  1869. xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
  1870. __func__, error);
  1871. goto error0;
  1872. }
  1873. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1874. xfs_btree_mark_sick(cur);
  1875. error = -EFSCORRUPTED;
  1876. goto error0;
  1877. }
  1878. error = xfs_inobt_get_rec(cur, &rec, &i);
  1879. if (error) {
  1880. xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
  1881. __func__, error);
  1882. goto error0;
  1883. }
  1884. if (XFS_IS_CORRUPT(mp, i != 1)) {
  1885. xfs_btree_mark_sick(cur);
  1886. error = -EFSCORRUPTED;
  1887. goto error0;
  1888. }
  1889. /*
  1890. * Get the offset in the inode chunk.
  1891. */
  1892. off = agino - rec.ir_startino;
  1893. ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
  1894. ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
  1895. /*
  1896. * Mark the inode free & increment the count.
  1897. */
  1898. rec.ir_free |= XFS_INOBT_MASK(off);
  1899. rec.ir_freecount++;
  1900. /*
  1901. * When an inode chunk is free, it becomes eligible for removal. Don't
  1902. * remove the chunk if the block size is large enough for multiple inode
  1903. * chunks (that might not be free).
  1904. */
  1905. if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
  1906. mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
  1907. xic->deleted = true;
  1908. xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
  1909. rec.ir_startino);
  1910. xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
  1911. /*
  1912. * Remove the inode cluster from the AGI B+Tree, adjust the
  1913. * AGI and Superblock inode counts, and mark the disk space
  1914. * to be freed when the transaction is committed.
  1915. */
  1916. ilen = rec.ir_freecount;
  1917. be32_add_cpu(&agi->agi_count, -ilen);
  1918. be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
  1919. xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
  1920. pag->pagi_freecount -= ilen - 1;
  1921. pag->pagi_count -= ilen;
  1922. xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
  1923. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
  1924. if ((error = xfs_btree_delete(cur, &i))) {
  1925. xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
  1926. __func__, error);
  1927. goto error0;
  1928. }
  1929. error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
  1930. if (error)
  1931. goto error0;
  1932. } else {
  1933. xic->deleted = false;
  1934. error = xfs_inobt_update(cur, &rec);
  1935. if (error) {
  1936. xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
  1937. __func__, error);
  1938. goto error0;
  1939. }
  1940. /*
  1941. * Change the inode free counts and log the ag/sb changes.
  1942. */
  1943. be32_add_cpu(&agi->agi_freecount, 1);
  1944. xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
  1945. pag->pagi_freecount++;
  1946. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
  1947. }
  1948. error = xfs_check_agi_freecount(cur);
  1949. if (error)
  1950. goto error0;
  1951. *orec = rec;
  1952. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1953. return 0;
  1954. error0:
  1955. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  1956. return error;
  1957. }
  1958. /*
  1959. * Free an inode in the free inode btree.
  1960. */
  1961. STATIC int
  1962. xfs_difree_finobt(
  1963. struct xfs_perag *pag,
  1964. struct xfs_trans *tp,
  1965. struct xfs_buf *agbp,
  1966. xfs_agino_t agino,
  1967. struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
  1968. {
  1969. struct xfs_mount *mp = pag->pag_mount;
  1970. struct xfs_btree_cur *cur;
  1971. struct xfs_inobt_rec_incore rec;
  1972. int offset = agino - ibtrec->ir_startino;
  1973. int error;
  1974. int i;
  1975. cur = xfs_finobt_init_cursor(pag, tp, agbp);
  1976. error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
  1977. if (error)
  1978. goto error;
  1979. if (i == 0) {
  1980. /*
  1981. * If the record does not exist in the finobt, we must have just
  1982. * freed an inode in a previously fully allocated chunk. If not,
  1983. * something is out of sync.
  1984. */
  1985. if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
  1986. xfs_btree_mark_sick(cur);
  1987. error = -EFSCORRUPTED;
  1988. goto error;
  1989. }
  1990. error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
  1991. ibtrec->ir_count,
  1992. ibtrec->ir_freecount,
  1993. ibtrec->ir_free, &i);
  1994. if (error)
  1995. goto error;
  1996. ASSERT(i == 1);
  1997. goto out;
  1998. }
  1999. /*
  2000. * Read and update the existing record. We could just copy the ibtrec
  2001. * across here, but that would defeat the purpose of having redundant
  2002. * metadata. By making the modifications independently, we can catch
  2003. * corruptions that we wouldn't see if we just copied from one record
  2004. * to another.
  2005. */
  2006. error = xfs_inobt_get_rec(cur, &rec, &i);
  2007. if (error)
  2008. goto error;
  2009. if (XFS_IS_CORRUPT(mp, i != 1)) {
  2010. xfs_btree_mark_sick(cur);
  2011. error = -EFSCORRUPTED;
  2012. goto error;
  2013. }
  2014. rec.ir_free |= XFS_INOBT_MASK(offset);
  2015. rec.ir_freecount++;
  2016. if (XFS_IS_CORRUPT(mp,
  2017. rec.ir_free != ibtrec->ir_free ||
  2018. rec.ir_freecount != ibtrec->ir_freecount)) {
  2019. xfs_btree_mark_sick(cur);
  2020. error = -EFSCORRUPTED;
  2021. goto error;
  2022. }
  2023. /*
  2024. * The content of inobt records should always match between the inobt
  2025. * and finobt. The lifecycle of records in the finobt is different from
  2026. * the inobt in that the finobt only tracks records with at least one
  2027. * free inode. Hence, if all of the inodes are free and we aren't
  2028. * keeping inode chunks permanently on disk, remove the record.
  2029. * Otherwise, update the record with the new information.
  2030. *
  2031. * Note that we currently can't free chunks when the block size is large
  2032. * enough for multiple chunks. Leave the finobt record to remain in sync
  2033. * with the inobt.
  2034. */
  2035. if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
  2036. mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
  2037. error = xfs_btree_delete(cur, &i);
  2038. if (error)
  2039. goto error;
  2040. ASSERT(i == 1);
  2041. } else {
  2042. error = xfs_inobt_update(cur, &rec);
  2043. if (error)
  2044. goto error;
  2045. }
  2046. out:
  2047. error = xfs_check_agi_freecount(cur);
  2048. if (error)
  2049. goto error;
  2050. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  2051. return 0;
  2052. error:
  2053. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  2054. return error;
  2055. }
  2056. /*
  2057. * Free disk inode. Carefully avoids touching the incore inode, all
  2058. * manipulations incore are the caller's responsibility.
  2059. * The on-disk inode is not changed by this operation, only the
  2060. * btree (free inode mask) is changed.
  2061. */
  2062. int
  2063. xfs_difree(
  2064. struct xfs_trans *tp,
  2065. struct xfs_perag *pag,
  2066. xfs_ino_t inode,
  2067. struct xfs_icluster *xic)
  2068. {
  2069. /* REFERENCED */
  2070. xfs_agblock_t agbno; /* block number containing inode */
  2071. struct xfs_buf *agbp; /* buffer for allocation group header */
  2072. xfs_agino_t agino; /* allocation group inode number */
  2073. int error; /* error return value */
  2074. struct xfs_mount *mp = tp->t_mountp;
  2075. struct xfs_inobt_rec_incore rec;/* btree record */
  2076. /*
  2077. * Break up inode number into its components.
  2078. */
  2079. if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
  2080. xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
  2081. __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
  2082. ASSERT(0);
  2083. return -EINVAL;
  2084. }
  2085. agino = XFS_INO_TO_AGINO(mp, inode);
  2086. if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
  2087. xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
  2088. __func__, (unsigned long long)inode,
  2089. (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
  2090. ASSERT(0);
  2091. return -EINVAL;
  2092. }
  2093. agbno = XFS_AGINO_TO_AGBNO(mp, agino);
  2094. if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) {
  2095. xfs_warn(mp, "%s: agbno >= xfs_ag_block_count (%d >= %d).",
  2096. __func__, agbno, xfs_ag_block_count(mp, pag->pag_agno));
  2097. ASSERT(0);
  2098. return -EINVAL;
  2099. }
  2100. /*
  2101. * Get the allocation group header.
  2102. */
  2103. error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
  2104. if (error) {
  2105. xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
  2106. __func__, error);
  2107. return error;
  2108. }
  2109. /*
  2110. * Fix up the inode allocation btree.
  2111. */
  2112. error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec);
  2113. if (error)
  2114. goto error0;
  2115. /*
  2116. * Fix up the free inode btree.
  2117. */
  2118. if (xfs_has_finobt(mp)) {
  2119. error = xfs_difree_finobt(pag, tp, agbp, agino, &rec);
  2120. if (error)
  2121. goto error0;
  2122. }
  2123. return 0;
  2124. error0:
  2125. return error;
  2126. }
  2127. STATIC int
  2128. xfs_imap_lookup(
  2129. struct xfs_perag *pag,
  2130. struct xfs_trans *tp,
  2131. xfs_agino_t agino,
  2132. xfs_agblock_t agbno,
  2133. xfs_agblock_t *chunk_agbno,
  2134. xfs_agblock_t *offset_agbno,
  2135. int flags)
  2136. {
  2137. struct xfs_mount *mp = pag->pag_mount;
  2138. struct xfs_inobt_rec_incore rec;
  2139. struct xfs_btree_cur *cur;
  2140. struct xfs_buf *agbp;
  2141. int error;
  2142. int i;
  2143. error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
  2144. if (error) {
  2145. xfs_alert(mp,
  2146. "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
  2147. __func__, error, pag->pag_agno);
  2148. return error;
  2149. }
  2150. /*
  2151. * Lookup the inode record for the given agino. If the record cannot be
  2152. * found, then it's an invalid inode number and we should abort. Once
  2153. * we have a record, we need to ensure it contains the inode number
  2154. * we are looking up.
  2155. */
  2156. cur = xfs_inobt_init_cursor(pag, tp, agbp);
  2157. error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
  2158. if (!error) {
  2159. if (i)
  2160. error = xfs_inobt_get_rec(cur, &rec, &i);
  2161. if (!error && i == 0)
  2162. error = -EINVAL;
  2163. }
  2164. xfs_trans_brelse(tp, agbp);
  2165. xfs_btree_del_cursor(cur, error);
  2166. if (error)
  2167. return error;
  2168. /* check that the returned record contains the required inode */
  2169. if (rec.ir_startino > agino ||
  2170. rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
  2171. return -EINVAL;
  2172. /* for untrusted inodes check it is allocated first */
  2173. if ((flags & XFS_IGET_UNTRUSTED) &&
  2174. (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
  2175. return -EINVAL;
  2176. *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
  2177. *offset_agbno = agbno - *chunk_agbno;
  2178. return 0;
  2179. }
  2180. /*
  2181. * Return the location of the inode in imap, for mapping it into a buffer.
  2182. */
  2183. int
  2184. xfs_imap(
  2185. struct xfs_perag *pag,
  2186. struct xfs_trans *tp,
  2187. xfs_ino_t ino, /* inode to locate */
  2188. struct xfs_imap *imap, /* location map structure */
  2189. uint flags) /* flags for inode btree lookup */
  2190. {
  2191. struct xfs_mount *mp = pag->pag_mount;
  2192. xfs_agblock_t agbno; /* block number of inode in the alloc group */
  2193. xfs_agino_t agino; /* inode number within alloc group */
  2194. xfs_agblock_t chunk_agbno; /* first block in inode chunk */
  2195. xfs_agblock_t cluster_agbno; /* first block in inode cluster */
  2196. int error; /* error code */
  2197. int offset; /* index of inode in its buffer */
  2198. xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
  2199. ASSERT(ino != NULLFSINO);
  2200. /*
  2201. * Split up the inode number into its parts.
  2202. */
  2203. agino = XFS_INO_TO_AGINO(mp, ino);
  2204. agbno = XFS_AGINO_TO_AGBNO(mp, agino);
  2205. if (agbno >= xfs_ag_block_count(mp, pag->pag_agno) ||
  2206. ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
  2207. error = -EINVAL;
  2208. #ifdef DEBUG
  2209. /*
  2210. * Don't output diagnostic information for untrusted inodes
  2211. * as they can be invalid without implying corruption.
  2212. */
  2213. if (flags & XFS_IGET_UNTRUSTED)
  2214. return error;
  2215. if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) {
  2216. xfs_alert(mp,
  2217. "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
  2218. __func__, (unsigned long long)agbno,
  2219. (unsigned long)xfs_ag_block_count(mp,
  2220. pag->pag_agno));
  2221. }
  2222. if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
  2223. xfs_alert(mp,
  2224. "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
  2225. __func__, ino,
  2226. XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
  2227. }
  2228. xfs_stack_trace();
  2229. #endif /* DEBUG */
  2230. return error;
  2231. }
  2232. /*
  2233. * For bulkstat and handle lookups, we have an untrusted inode number
  2234. * that we have to verify is valid. We cannot do this just by reading
  2235. * the inode buffer as it may have been unlinked and removed leaving
  2236. * inodes in stale state on disk. Hence we have to do a btree lookup
  2237. * in all cases where an untrusted inode number is passed.
  2238. */
  2239. if (flags & XFS_IGET_UNTRUSTED) {
  2240. error = xfs_imap_lookup(pag, tp, agino, agbno,
  2241. &chunk_agbno, &offset_agbno, flags);
  2242. if (error)
  2243. return error;
  2244. goto out_map;
  2245. }
  2246. /*
  2247. * If the inode cluster size is the same as the blocksize or
  2248. * smaller we get to the buffer by simple arithmetics.
  2249. */
  2250. if (M_IGEO(mp)->blocks_per_cluster == 1) {
  2251. offset = XFS_INO_TO_OFFSET(mp, ino);
  2252. ASSERT(offset < mp->m_sb.sb_inopblock);
  2253. imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
  2254. imap->im_len = XFS_FSB_TO_BB(mp, 1);
  2255. imap->im_boffset = (unsigned short)(offset <<
  2256. mp->m_sb.sb_inodelog);
  2257. return 0;
  2258. }
  2259. /*
  2260. * If the inode chunks are aligned then use simple maths to
  2261. * find the location. Otherwise we have to do a btree
  2262. * lookup to find the location.
  2263. */
  2264. if (M_IGEO(mp)->inoalign_mask) {
  2265. offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
  2266. chunk_agbno = agbno - offset_agbno;
  2267. } else {
  2268. error = xfs_imap_lookup(pag, tp, agino, agbno,
  2269. &chunk_agbno, &offset_agbno, flags);
  2270. if (error)
  2271. return error;
  2272. }
  2273. out_map:
  2274. ASSERT(agbno >= chunk_agbno);
  2275. cluster_agbno = chunk_agbno +
  2276. ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
  2277. M_IGEO(mp)->blocks_per_cluster);
  2278. offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
  2279. XFS_INO_TO_OFFSET(mp, ino);
  2280. imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
  2281. imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
  2282. imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
  2283. /*
  2284. * If the inode number maps to a block outside the bounds
  2285. * of the file system then return NULL rather than calling
  2286. * read_buf and panicing when we get an error from the
  2287. * driver.
  2288. */
  2289. if ((imap->im_blkno + imap->im_len) >
  2290. XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
  2291. xfs_alert(mp,
  2292. "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
  2293. __func__, (unsigned long long) imap->im_blkno,
  2294. (unsigned long long) imap->im_len,
  2295. XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
  2296. return -EINVAL;
  2297. }
  2298. return 0;
  2299. }
  2300. /*
  2301. * Log specified fields for the ag hdr (inode section). The growth of the agi
  2302. * structure over time requires that we interpret the buffer as two logical
  2303. * regions delineated by the end of the unlinked list. This is due to the size
  2304. * of the hash table and its location in the middle of the agi.
  2305. *
  2306. * For example, a request to log a field before agi_unlinked and a field after
  2307. * agi_unlinked could cause us to log the entire hash table and use an excessive
  2308. * amount of log space. To avoid this behavior, log the region up through
  2309. * agi_unlinked in one call and the region after agi_unlinked through the end of
  2310. * the structure in another.
  2311. */
  2312. void
  2313. xfs_ialloc_log_agi(
  2314. struct xfs_trans *tp,
  2315. struct xfs_buf *bp,
  2316. uint32_t fields)
  2317. {
  2318. int first; /* first byte number */
  2319. int last; /* last byte number */
  2320. static const short offsets[] = { /* field starting offsets */
  2321. /* keep in sync with bit definitions */
  2322. offsetof(xfs_agi_t, agi_magicnum),
  2323. offsetof(xfs_agi_t, agi_versionnum),
  2324. offsetof(xfs_agi_t, agi_seqno),
  2325. offsetof(xfs_agi_t, agi_length),
  2326. offsetof(xfs_agi_t, agi_count),
  2327. offsetof(xfs_agi_t, agi_root),
  2328. offsetof(xfs_agi_t, agi_level),
  2329. offsetof(xfs_agi_t, agi_freecount),
  2330. offsetof(xfs_agi_t, agi_newino),
  2331. offsetof(xfs_agi_t, agi_dirino),
  2332. offsetof(xfs_agi_t, agi_unlinked),
  2333. offsetof(xfs_agi_t, agi_free_root),
  2334. offsetof(xfs_agi_t, agi_free_level),
  2335. offsetof(xfs_agi_t, agi_iblocks),
  2336. sizeof(xfs_agi_t)
  2337. };
  2338. #ifdef DEBUG
  2339. struct xfs_agi *agi = bp->b_addr;
  2340. ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
  2341. #endif
  2342. /*
  2343. * Compute byte offsets for the first and last fields in the first
  2344. * region and log the agi buffer. This only logs up through
  2345. * agi_unlinked.
  2346. */
  2347. if (fields & XFS_AGI_ALL_BITS_R1) {
  2348. xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
  2349. &first, &last);
  2350. xfs_trans_log_buf(tp, bp, first, last);
  2351. }
  2352. /*
  2353. * Mask off the bits in the first region and calculate the first and
  2354. * last field offsets for any bits in the second region.
  2355. */
  2356. fields &= ~XFS_AGI_ALL_BITS_R1;
  2357. if (fields) {
  2358. xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
  2359. &first, &last);
  2360. xfs_trans_log_buf(tp, bp, first, last);
  2361. }
  2362. }
  2363. static xfs_failaddr_t
  2364. xfs_agi_verify(
  2365. struct xfs_buf *bp)
  2366. {
  2367. struct xfs_mount *mp = bp->b_mount;
  2368. struct xfs_agi *agi = bp->b_addr;
  2369. xfs_failaddr_t fa;
  2370. uint32_t agi_seqno = be32_to_cpu(agi->agi_seqno);
  2371. uint32_t agi_length = be32_to_cpu(agi->agi_length);
  2372. int i;
  2373. if (xfs_has_crc(mp)) {
  2374. if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
  2375. return __this_address;
  2376. if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
  2377. return __this_address;
  2378. }
  2379. /*
  2380. * Validate the magic number of the agi block.
  2381. */
  2382. if (!xfs_verify_magic(bp, agi->agi_magicnum))
  2383. return __this_address;
  2384. if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
  2385. return __this_address;
  2386. fa = xfs_validate_ag_length(bp, agi_seqno, agi_length);
  2387. if (fa)
  2388. return fa;
  2389. if (be32_to_cpu(agi->agi_level) < 1 ||
  2390. be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
  2391. return __this_address;
  2392. if (xfs_has_finobt(mp) &&
  2393. (be32_to_cpu(agi->agi_free_level) < 1 ||
  2394. be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
  2395. return __this_address;
  2396. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  2397. if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
  2398. continue;
  2399. if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
  2400. return __this_address;
  2401. }
  2402. return NULL;
  2403. }
  2404. static void
  2405. xfs_agi_read_verify(
  2406. struct xfs_buf *bp)
  2407. {
  2408. struct xfs_mount *mp = bp->b_mount;
  2409. xfs_failaddr_t fa;
  2410. if (xfs_has_crc(mp) &&
  2411. !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
  2412. xfs_verifier_error(bp, -EFSBADCRC, __this_address);
  2413. else {
  2414. fa = xfs_agi_verify(bp);
  2415. if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
  2416. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  2417. }
  2418. }
  2419. static void
  2420. xfs_agi_write_verify(
  2421. struct xfs_buf *bp)
  2422. {
  2423. struct xfs_mount *mp = bp->b_mount;
  2424. struct xfs_buf_log_item *bip = bp->b_log_item;
  2425. struct xfs_agi *agi = bp->b_addr;
  2426. xfs_failaddr_t fa;
  2427. fa = xfs_agi_verify(bp);
  2428. if (fa) {
  2429. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  2430. return;
  2431. }
  2432. if (!xfs_has_crc(mp))
  2433. return;
  2434. if (bip)
  2435. agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
  2436. xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
  2437. }
  2438. const struct xfs_buf_ops xfs_agi_buf_ops = {
  2439. .name = "xfs_agi",
  2440. .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
  2441. .verify_read = xfs_agi_read_verify,
  2442. .verify_write = xfs_agi_write_verify,
  2443. .verify_struct = xfs_agi_verify,
  2444. };
  2445. /*
  2446. * Read in the allocation group header (inode allocation section)
  2447. */
  2448. int
  2449. xfs_read_agi(
  2450. struct xfs_perag *pag,
  2451. struct xfs_trans *tp,
  2452. xfs_buf_flags_t flags,
  2453. struct xfs_buf **agibpp)
  2454. {
  2455. struct xfs_mount *mp = pag->pag_mount;
  2456. int error;
  2457. trace_xfs_read_agi(pag->pag_mount, pag->pag_agno);
  2458. error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
  2459. XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
  2460. XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops);
  2461. if (xfs_metadata_is_sick(error))
  2462. xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
  2463. if (error)
  2464. return error;
  2465. if (tp)
  2466. xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF);
  2467. xfs_buf_set_ref(*agibpp, XFS_AGI_REF);
  2468. return 0;
  2469. }
  2470. /*
  2471. * Read in the agi and initialise the per-ag data. If the caller supplies a
  2472. * @agibpp, return the locked AGI buffer to them, otherwise release it.
  2473. */
  2474. int
  2475. xfs_ialloc_read_agi(
  2476. struct xfs_perag *pag,
  2477. struct xfs_trans *tp,
  2478. int flags,
  2479. struct xfs_buf **agibpp)
  2480. {
  2481. struct xfs_buf *agibp;
  2482. struct xfs_agi *agi;
  2483. int error;
  2484. trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
  2485. error = xfs_read_agi(pag, tp,
  2486. (flags & XFS_IALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
  2487. &agibp);
  2488. if (error)
  2489. return error;
  2490. agi = agibp->b_addr;
  2491. if (!xfs_perag_initialised_agi(pag)) {
  2492. pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
  2493. pag->pagi_count = be32_to_cpu(agi->agi_count);
  2494. set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
  2495. }
  2496. /*
  2497. * It's possible for these to be out of sync if
  2498. * we are in the middle of a forced shutdown.
  2499. */
  2500. ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
  2501. xfs_is_shutdown(pag->pag_mount));
  2502. if (agibpp)
  2503. *agibpp = agibp;
  2504. else
  2505. xfs_trans_brelse(tp, agibp);
  2506. return 0;
  2507. }
  2508. /* How many inodes are backed by inode clusters ondisk? */
  2509. STATIC int
  2510. xfs_ialloc_count_ondisk(
  2511. struct xfs_btree_cur *cur,
  2512. xfs_agino_t low,
  2513. xfs_agino_t high,
  2514. unsigned int *allocated)
  2515. {
  2516. struct xfs_inobt_rec_incore irec;
  2517. unsigned int ret = 0;
  2518. int has_record;
  2519. int error;
  2520. error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
  2521. if (error)
  2522. return error;
  2523. while (has_record) {
  2524. unsigned int i, hole_idx;
  2525. error = xfs_inobt_get_rec(cur, &irec, &has_record);
  2526. if (error)
  2527. return error;
  2528. if (irec.ir_startino > high)
  2529. break;
  2530. for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
  2531. if (irec.ir_startino + i < low)
  2532. continue;
  2533. if (irec.ir_startino + i > high)
  2534. break;
  2535. hole_idx = i / XFS_INODES_PER_HOLEMASK_BIT;
  2536. if (!(irec.ir_holemask & (1U << hole_idx)))
  2537. ret++;
  2538. }
  2539. error = xfs_btree_increment(cur, 0, &has_record);
  2540. if (error)
  2541. return error;
  2542. }
  2543. *allocated = ret;
  2544. return 0;
  2545. }
  2546. /* Is there an inode record covering a given extent? */
  2547. int
  2548. xfs_ialloc_has_inodes_at_extent(
  2549. struct xfs_btree_cur *cur,
  2550. xfs_agblock_t bno,
  2551. xfs_extlen_t len,
  2552. enum xbtree_recpacking *outcome)
  2553. {
  2554. xfs_agino_t agino;
  2555. xfs_agino_t last_agino;
  2556. unsigned int allocated;
  2557. int error;
  2558. agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
  2559. last_agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
  2560. error = xfs_ialloc_count_ondisk(cur, agino, last_agino, &allocated);
  2561. if (error)
  2562. return error;
  2563. if (allocated == 0)
  2564. *outcome = XBTREE_RECPACKING_EMPTY;
  2565. else if (allocated == last_agino - agino + 1)
  2566. *outcome = XBTREE_RECPACKING_FULL;
  2567. else
  2568. *outcome = XBTREE_RECPACKING_SPARSE;
  2569. return 0;
  2570. }
  2571. struct xfs_ialloc_count_inodes {
  2572. xfs_agino_t count;
  2573. xfs_agino_t freecount;
  2574. };
  2575. /* Record inode counts across all inobt records. */
  2576. STATIC int
  2577. xfs_ialloc_count_inodes_rec(
  2578. struct xfs_btree_cur *cur,
  2579. const union xfs_btree_rec *rec,
  2580. void *priv)
  2581. {
  2582. struct xfs_inobt_rec_incore irec;
  2583. struct xfs_ialloc_count_inodes *ci = priv;
  2584. xfs_failaddr_t fa;
  2585. xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
  2586. fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec);
  2587. if (fa)
  2588. return xfs_inobt_complain_bad_rec(cur, fa, &irec);
  2589. ci->count += irec.ir_count;
  2590. ci->freecount += irec.ir_freecount;
  2591. return 0;
  2592. }
  2593. /* Count allocated and free inodes under an inobt. */
  2594. int
  2595. xfs_ialloc_count_inodes(
  2596. struct xfs_btree_cur *cur,
  2597. xfs_agino_t *count,
  2598. xfs_agino_t *freecount)
  2599. {
  2600. struct xfs_ialloc_count_inodes ci = {0};
  2601. int error;
  2602. ASSERT(xfs_btree_is_ino(cur->bc_ops));
  2603. error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
  2604. if (error)
  2605. return error;
  2606. *count = ci.count;
  2607. *freecount = ci.freecount;
  2608. return 0;
  2609. }
  2610. /*
  2611. * Initialize inode-related geometry information.
  2612. *
  2613. * Compute the inode btree min and max levels and set maxicount.
  2614. *
  2615. * Set the inode cluster size. This may still be overridden by the file
  2616. * system block size if it is larger than the chosen cluster size.
  2617. *
  2618. * For v5 filesystems, scale the cluster size with the inode size to keep a
  2619. * constant ratio of inode per cluster buffer, but only if mkfs has set the
  2620. * inode alignment value appropriately for larger cluster sizes.
  2621. *
  2622. * Then compute the inode cluster alignment information.
  2623. */
  2624. void
  2625. xfs_ialloc_setup_geometry(
  2626. struct xfs_mount *mp)
  2627. {
  2628. struct xfs_sb *sbp = &mp->m_sb;
  2629. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  2630. uint64_t icount;
  2631. uint inodes;
  2632. igeo->new_diflags2 = 0;
  2633. if (xfs_has_bigtime(mp))
  2634. igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
  2635. if (xfs_has_large_extent_counts(mp))
  2636. igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64;
  2637. /* Compute inode btree geometry. */
  2638. igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
  2639. igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, true);
  2640. igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, false);
  2641. igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
  2642. igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
  2643. igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
  2644. sbp->sb_inopblock);
  2645. igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
  2646. if (sbp->sb_spino_align)
  2647. igeo->ialloc_min_blks = sbp->sb_spino_align;
  2648. else
  2649. igeo->ialloc_min_blks = igeo->ialloc_blks;
  2650. /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
  2651. inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
  2652. igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
  2653. inodes);
  2654. ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk());
  2655. /*
  2656. * Set the maximum inode count for this filesystem, being careful not
  2657. * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
  2658. * users should never get here due to failing sb verification, but
  2659. * certain users (xfs_db) need to be usable even with corrupt metadata.
  2660. */
  2661. if (sbp->sb_imax_pct && igeo->ialloc_blks) {
  2662. /*
  2663. * Make sure the maximum inode count is a multiple
  2664. * of the units we allocate inodes in.
  2665. */
  2666. icount = sbp->sb_dblocks * sbp->sb_imax_pct;
  2667. do_div(icount, 100);
  2668. do_div(icount, igeo->ialloc_blks);
  2669. igeo->maxicount = XFS_FSB_TO_INO(mp,
  2670. icount * igeo->ialloc_blks);
  2671. } else {
  2672. igeo->maxicount = 0;
  2673. }
  2674. /*
  2675. * Compute the desired size of an inode cluster buffer size, which
  2676. * starts at 8K and (on v5 filesystems) scales up with larger inode
  2677. * sizes.
  2678. *
  2679. * Preserve the desired inode cluster size because the sparse inodes
  2680. * feature uses that desired size (not the actual size) to compute the
  2681. * sparse inode alignment. The mount code validates this value, so we
  2682. * cannot change the behavior.
  2683. */
  2684. igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
  2685. if (xfs_has_v3inodes(mp)) {
  2686. int new_size = igeo->inode_cluster_size_raw;
  2687. new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
  2688. if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
  2689. igeo->inode_cluster_size_raw = new_size;
  2690. }
  2691. /* Calculate inode cluster ratios. */
  2692. if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
  2693. igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
  2694. igeo->inode_cluster_size_raw);
  2695. else
  2696. igeo->blocks_per_cluster = 1;
  2697. igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
  2698. igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
  2699. /* Calculate inode cluster alignment. */
  2700. if (xfs_has_align(mp) &&
  2701. mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
  2702. igeo->cluster_align = mp->m_sb.sb_inoalignmt;
  2703. else
  2704. igeo->cluster_align = 1;
  2705. igeo->inoalign_mask = igeo->cluster_align - 1;
  2706. igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
  2707. /*
  2708. * If we are using stripe alignment, check whether
  2709. * the stripe unit is a multiple of the inode alignment
  2710. */
  2711. if (mp->m_dalign && igeo->inoalign_mask &&
  2712. !(mp->m_dalign & igeo->inoalign_mask))
  2713. igeo->ialloc_align = mp->m_dalign;
  2714. else
  2715. igeo->ialloc_align = 0;
  2716. if (mp->m_sb.sb_blocksize > PAGE_SIZE)
  2717. igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
  2718. else
  2719. igeo->min_folio_order = 0;
  2720. }
  2721. /* Compute the location of the root directory inode that is laid out by mkfs. */
  2722. xfs_ino_t
  2723. xfs_ialloc_calc_rootino(
  2724. struct xfs_mount *mp,
  2725. int sunit)
  2726. {
  2727. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  2728. xfs_agblock_t first_bno;
  2729. /*
  2730. * Pre-calculate the geometry of AG 0. We know what it looks like
  2731. * because libxfs knows how to create allocation groups now.
  2732. *
  2733. * first_bno is the first block in which mkfs could possibly have
  2734. * allocated the root directory inode, once we factor in the metadata
  2735. * that mkfs formats before it. Namely, the four AG headers...
  2736. */
  2737. first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
  2738. /* ...the two free space btree roots... */
  2739. first_bno += 2;
  2740. /* ...the inode btree root... */
  2741. first_bno += 1;
  2742. /* ...the initial AGFL... */
  2743. first_bno += xfs_alloc_min_freelist(mp, NULL);
  2744. /* ...the free inode btree root... */
  2745. if (xfs_has_finobt(mp))
  2746. first_bno++;
  2747. /* ...the reverse mapping btree root... */
  2748. if (xfs_has_rmapbt(mp))
  2749. first_bno++;
  2750. /* ...the reference count btree... */
  2751. if (xfs_has_reflink(mp))
  2752. first_bno++;
  2753. /*
  2754. * ...and the log, if it is allocated in the first allocation group.
  2755. *
  2756. * This can happen with filesystems that only have a single
  2757. * allocation group, or very odd geometries created by old mkfs
  2758. * versions on very small filesystems.
  2759. */
  2760. if (xfs_ag_contains_log(mp, 0))
  2761. first_bno += mp->m_sb.sb_logblocks;
  2762. /*
  2763. * Now round first_bno up to whatever allocation alignment is given
  2764. * by the filesystem or was passed in.
  2765. */
  2766. if (xfs_has_dalign(mp) && igeo->ialloc_align > 0)
  2767. first_bno = roundup(first_bno, sunit);
  2768. else if (xfs_has_align(mp) &&
  2769. mp->m_sb.sb_inoalignmt > 1)
  2770. first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
  2771. return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
  2772. }
  2773. /*
  2774. * Ensure there are not sparse inode clusters that cross the new EOAG.
  2775. *
  2776. * This is a no-op for non-spinode filesystems since clusters are always fully
  2777. * allocated and checking the bnobt suffices. However, a spinode filesystem
  2778. * could have a record where the upper inodes are free blocks. If those blocks
  2779. * were removed from the filesystem, the inode record would extend beyond EOAG,
  2780. * which will be flagged as corruption.
  2781. */
  2782. int
  2783. xfs_ialloc_check_shrink(
  2784. struct xfs_perag *pag,
  2785. struct xfs_trans *tp,
  2786. struct xfs_buf *agibp,
  2787. xfs_agblock_t new_length)
  2788. {
  2789. struct xfs_inobt_rec_incore rec;
  2790. struct xfs_btree_cur *cur;
  2791. xfs_agino_t agino;
  2792. int has;
  2793. int error;
  2794. if (!xfs_has_sparseinodes(pag->pag_mount))
  2795. return 0;
  2796. cur = xfs_inobt_init_cursor(pag, tp, agibp);
  2797. /* Look up the inobt record that would correspond to the new EOFS. */
  2798. agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length);
  2799. error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
  2800. if (error || !has)
  2801. goto out;
  2802. error = xfs_inobt_get_rec(cur, &rec, &has);
  2803. if (error)
  2804. goto out;
  2805. if (!has) {
  2806. xfs_ag_mark_sick(pag, XFS_SICK_AG_INOBT);
  2807. error = -EFSCORRUPTED;
  2808. goto out;
  2809. }
  2810. /* If the record covers inodes that would be beyond EOFS, bail out. */
  2811. if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
  2812. error = -ENOSPC;
  2813. goto out;
  2814. }
  2815. out:
  2816. xfs_btree_del_cursor(cur, error);
  2817. return error;
  2818. }