xfs_buf.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include <linux/backing-dev.h>
  8. #include <linux/dax.h>
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_trace.h"
  15. #include "xfs_log.h"
  16. #include "xfs_log_recover.h"
  17. #include "xfs_log_priv.h"
  18. #include "xfs_trans.h"
  19. #include "xfs_buf_item.h"
  20. #include "xfs_errortag.h"
  21. #include "xfs_error.h"
  22. #include "xfs_ag.h"
  23. #include "xfs_buf_mem.h"
  24. struct kmem_cache *xfs_buf_cache;
  25. /*
  26. * Locking orders
  27. *
  28. * xfs_buf_ioacct_inc:
  29. * xfs_buf_ioacct_dec:
  30. * b_sema (caller holds)
  31. * b_lock
  32. *
  33. * xfs_buf_stale:
  34. * b_sema (caller holds)
  35. * b_lock
  36. * lru_lock
  37. *
  38. * xfs_buf_rele:
  39. * b_lock
  40. * pag_buf_lock
  41. * lru_lock
  42. *
  43. * xfs_buftarg_drain_rele
  44. * lru_lock
  45. * b_lock (trylock due to inversion)
  46. *
  47. * xfs_buftarg_isolate
  48. * lru_lock
  49. * b_lock (trylock due to inversion)
  50. */
  51. static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
  52. static inline int
  53. xfs_buf_submit(
  54. struct xfs_buf *bp)
  55. {
  56. return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
  57. }
  58. static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
  59. {
  60. return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
  61. }
  62. static inline int
  63. xfs_buf_is_vmapped(
  64. struct xfs_buf *bp)
  65. {
  66. /*
  67. * Return true if the buffer is vmapped.
  68. *
  69. * b_addr is null if the buffer is not mapped, but the code is clever
  70. * enough to know it doesn't have to map a single page, so the check has
  71. * to be both for b_addr and bp->b_page_count > 1.
  72. */
  73. return bp->b_addr && bp->b_page_count > 1;
  74. }
  75. static inline int
  76. xfs_buf_vmap_len(
  77. struct xfs_buf *bp)
  78. {
  79. return (bp->b_page_count * PAGE_SIZE);
  80. }
  81. /*
  82. * Bump the I/O in flight count on the buftarg if we haven't yet done so for
  83. * this buffer. The count is incremented once per buffer (per hold cycle)
  84. * because the corresponding decrement is deferred to buffer release. Buffers
  85. * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
  86. * tracking adds unnecessary overhead. This is used for sychronization purposes
  87. * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
  88. * in-flight buffers.
  89. *
  90. * Buffers that are never released (e.g., superblock, iclog buffers) must set
  91. * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
  92. * never reaches zero and unmount hangs indefinitely.
  93. */
  94. static inline void
  95. xfs_buf_ioacct_inc(
  96. struct xfs_buf *bp)
  97. {
  98. if (bp->b_flags & XBF_NO_IOACCT)
  99. return;
  100. ASSERT(bp->b_flags & XBF_ASYNC);
  101. spin_lock(&bp->b_lock);
  102. if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
  103. bp->b_state |= XFS_BSTATE_IN_FLIGHT;
  104. percpu_counter_inc(&bp->b_target->bt_io_count);
  105. }
  106. spin_unlock(&bp->b_lock);
  107. }
  108. /*
  109. * Clear the in-flight state on a buffer about to be released to the LRU or
  110. * freed and unaccount from the buftarg.
  111. */
  112. static inline void
  113. __xfs_buf_ioacct_dec(
  114. struct xfs_buf *bp)
  115. {
  116. lockdep_assert_held(&bp->b_lock);
  117. if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
  118. bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
  119. percpu_counter_dec(&bp->b_target->bt_io_count);
  120. }
  121. }
  122. static inline void
  123. xfs_buf_ioacct_dec(
  124. struct xfs_buf *bp)
  125. {
  126. spin_lock(&bp->b_lock);
  127. __xfs_buf_ioacct_dec(bp);
  128. spin_unlock(&bp->b_lock);
  129. }
  130. /*
  131. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  132. * b_lru_ref count so that the buffer is freed immediately when the buffer
  133. * reference count falls to zero. If the buffer is already on the LRU, we need
  134. * to remove the reference that LRU holds on the buffer.
  135. *
  136. * This prevents build-up of stale buffers on the LRU.
  137. */
  138. void
  139. xfs_buf_stale(
  140. struct xfs_buf *bp)
  141. {
  142. ASSERT(xfs_buf_islocked(bp));
  143. bp->b_flags |= XBF_STALE;
  144. /*
  145. * Clear the delwri status so that a delwri queue walker will not
  146. * flush this buffer to disk now that it is stale. The delwri queue has
  147. * a reference to the buffer, so this is safe to do.
  148. */
  149. bp->b_flags &= ~_XBF_DELWRI_Q;
  150. /*
  151. * Once the buffer is marked stale and unlocked, a subsequent lookup
  152. * could reset b_flags. There is no guarantee that the buffer is
  153. * unaccounted (released to LRU) before that occurs. Drop in-flight
  154. * status now to preserve accounting consistency.
  155. */
  156. spin_lock(&bp->b_lock);
  157. __xfs_buf_ioacct_dec(bp);
  158. atomic_set(&bp->b_lru_ref, 0);
  159. if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
  160. (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
  161. atomic_dec(&bp->b_hold);
  162. ASSERT(atomic_read(&bp->b_hold) >= 1);
  163. spin_unlock(&bp->b_lock);
  164. }
  165. static int
  166. xfs_buf_get_maps(
  167. struct xfs_buf *bp,
  168. int map_count)
  169. {
  170. ASSERT(bp->b_maps == NULL);
  171. bp->b_map_count = map_count;
  172. if (map_count == 1) {
  173. bp->b_maps = &bp->__b_map;
  174. return 0;
  175. }
  176. bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
  177. GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
  178. if (!bp->b_maps)
  179. return -ENOMEM;
  180. return 0;
  181. }
  182. /*
  183. * Frees b_pages if it was allocated.
  184. */
  185. static void
  186. xfs_buf_free_maps(
  187. struct xfs_buf *bp)
  188. {
  189. if (bp->b_maps != &bp->__b_map) {
  190. kfree(bp->b_maps);
  191. bp->b_maps = NULL;
  192. }
  193. }
  194. static int
  195. _xfs_buf_alloc(
  196. struct xfs_buftarg *target,
  197. struct xfs_buf_map *map,
  198. int nmaps,
  199. xfs_buf_flags_t flags,
  200. struct xfs_buf **bpp)
  201. {
  202. struct xfs_buf *bp;
  203. int error;
  204. int i;
  205. *bpp = NULL;
  206. bp = kmem_cache_zalloc(xfs_buf_cache,
  207. GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
  208. /*
  209. * We don't want certain flags to appear in b_flags unless they are
  210. * specifically set by later operations on the buffer.
  211. */
  212. flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
  213. atomic_set(&bp->b_hold, 1);
  214. atomic_set(&bp->b_lru_ref, 1);
  215. init_completion(&bp->b_iowait);
  216. INIT_LIST_HEAD(&bp->b_lru);
  217. INIT_LIST_HEAD(&bp->b_list);
  218. INIT_LIST_HEAD(&bp->b_li_list);
  219. sema_init(&bp->b_sema, 0); /* held, no waiters */
  220. spin_lock_init(&bp->b_lock);
  221. bp->b_target = target;
  222. bp->b_mount = target->bt_mount;
  223. bp->b_flags = flags;
  224. /*
  225. * Set length and io_length to the same value initially.
  226. * I/O routines should use io_length, which will be the same in
  227. * most cases but may be reset (e.g. XFS recovery).
  228. */
  229. error = xfs_buf_get_maps(bp, nmaps);
  230. if (error) {
  231. kmem_cache_free(xfs_buf_cache, bp);
  232. return error;
  233. }
  234. bp->b_rhash_key = map[0].bm_bn;
  235. bp->b_length = 0;
  236. for (i = 0; i < nmaps; i++) {
  237. bp->b_maps[i].bm_bn = map[i].bm_bn;
  238. bp->b_maps[i].bm_len = map[i].bm_len;
  239. bp->b_length += map[i].bm_len;
  240. }
  241. atomic_set(&bp->b_pin_count, 0);
  242. init_waitqueue_head(&bp->b_waiters);
  243. XFS_STATS_INC(bp->b_mount, xb_create);
  244. trace_xfs_buf_init(bp, _RET_IP_);
  245. *bpp = bp;
  246. return 0;
  247. }
  248. static void
  249. xfs_buf_free_pages(
  250. struct xfs_buf *bp)
  251. {
  252. uint i;
  253. ASSERT(bp->b_flags & _XBF_PAGES);
  254. if (xfs_buf_is_vmapped(bp))
  255. vm_unmap_ram(bp->b_addr, bp->b_page_count);
  256. for (i = 0; i < bp->b_page_count; i++) {
  257. if (bp->b_pages[i])
  258. __free_page(bp->b_pages[i]);
  259. }
  260. mm_account_reclaimed_pages(bp->b_page_count);
  261. if (bp->b_pages != bp->b_page_array)
  262. kfree(bp->b_pages);
  263. bp->b_pages = NULL;
  264. bp->b_flags &= ~_XBF_PAGES;
  265. }
  266. static void
  267. xfs_buf_free_callback(
  268. struct callback_head *cb)
  269. {
  270. struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
  271. xfs_buf_free_maps(bp);
  272. kmem_cache_free(xfs_buf_cache, bp);
  273. }
  274. static void
  275. xfs_buf_free(
  276. struct xfs_buf *bp)
  277. {
  278. trace_xfs_buf_free(bp, _RET_IP_);
  279. ASSERT(list_empty(&bp->b_lru));
  280. if (xfs_buftarg_is_mem(bp->b_target))
  281. xmbuf_unmap_page(bp);
  282. else if (bp->b_flags & _XBF_PAGES)
  283. xfs_buf_free_pages(bp);
  284. else if (bp->b_flags & _XBF_KMEM)
  285. kfree(bp->b_addr);
  286. call_rcu(&bp->b_rcu, xfs_buf_free_callback);
  287. }
  288. static int
  289. xfs_buf_alloc_kmem(
  290. struct xfs_buf *bp,
  291. xfs_buf_flags_t flags)
  292. {
  293. gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
  294. size_t size = BBTOB(bp->b_length);
  295. /* Assure zeroed buffer for non-read cases. */
  296. if (!(flags & XBF_READ))
  297. gfp_mask |= __GFP_ZERO;
  298. bp->b_addr = kmalloc(size, gfp_mask);
  299. if (!bp->b_addr)
  300. return -ENOMEM;
  301. if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
  302. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  303. /* b_addr spans two pages - use alloc_page instead */
  304. kfree(bp->b_addr);
  305. bp->b_addr = NULL;
  306. return -ENOMEM;
  307. }
  308. bp->b_offset = offset_in_page(bp->b_addr);
  309. bp->b_pages = bp->b_page_array;
  310. bp->b_pages[0] = kmem_to_page(bp->b_addr);
  311. bp->b_page_count = 1;
  312. bp->b_flags |= _XBF_KMEM;
  313. return 0;
  314. }
  315. static int
  316. xfs_buf_alloc_pages(
  317. struct xfs_buf *bp,
  318. xfs_buf_flags_t flags)
  319. {
  320. gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
  321. long filled = 0;
  322. if (flags & XBF_READ_AHEAD)
  323. gfp_mask |= __GFP_NORETRY;
  324. /* Make sure that we have a page list */
  325. bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
  326. if (bp->b_page_count <= XB_PAGES) {
  327. bp->b_pages = bp->b_page_array;
  328. } else {
  329. bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
  330. gfp_mask);
  331. if (!bp->b_pages)
  332. return -ENOMEM;
  333. }
  334. bp->b_flags |= _XBF_PAGES;
  335. /* Assure zeroed buffer for non-read cases. */
  336. if (!(flags & XBF_READ))
  337. gfp_mask |= __GFP_ZERO;
  338. /*
  339. * Bulk filling of pages can take multiple calls. Not filling the entire
  340. * array is not an allocation failure, so don't back off if we get at
  341. * least one extra page.
  342. */
  343. for (;;) {
  344. long last = filled;
  345. filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
  346. bp->b_pages);
  347. if (filled == bp->b_page_count) {
  348. XFS_STATS_INC(bp->b_mount, xb_page_found);
  349. break;
  350. }
  351. if (filled != last)
  352. continue;
  353. if (flags & XBF_READ_AHEAD) {
  354. xfs_buf_free_pages(bp);
  355. return -ENOMEM;
  356. }
  357. XFS_STATS_INC(bp->b_mount, xb_page_retries);
  358. memalloc_retry_wait(gfp_mask);
  359. }
  360. return 0;
  361. }
  362. /*
  363. * Map buffer into kernel address-space if necessary.
  364. */
  365. STATIC int
  366. _xfs_buf_map_pages(
  367. struct xfs_buf *bp,
  368. xfs_buf_flags_t flags)
  369. {
  370. ASSERT(bp->b_flags & _XBF_PAGES);
  371. if (bp->b_page_count == 1) {
  372. /* A single page buffer is always mappable */
  373. bp->b_addr = page_address(bp->b_pages[0]);
  374. } else if (flags & XBF_UNMAPPED) {
  375. bp->b_addr = NULL;
  376. } else {
  377. int retried = 0;
  378. unsigned nofs_flag;
  379. /*
  380. * vm_map_ram() will allocate auxiliary structures (e.g.
  381. * pagetables) with GFP_KERNEL, yet we often under a scoped nofs
  382. * context here. Mixing GFP_KERNEL with GFP_NOFS allocations
  383. * from the same call site that can be run from both above and
  384. * below memory reclaim causes lockdep false positives. Hence we
  385. * always need to force this allocation to nofs context because
  386. * we can't pass __GFP_NOLOCKDEP down to auxillary structures to
  387. * prevent false positive lockdep reports.
  388. *
  389. * XXX(dgc): I think dquot reclaim is the only place we can get
  390. * to this function from memory reclaim context now. If we fix
  391. * that like we've fixed inode reclaim to avoid writeback from
  392. * reclaim, this nofs wrapping can go away.
  393. */
  394. nofs_flag = memalloc_nofs_save();
  395. do {
  396. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  397. -1);
  398. if (bp->b_addr)
  399. break;
  400. vm_unmap_aliases();
  401. } while (retried++ <= 1);
  402. memalloc_nofs_restore(nofs_flag);
  403. if (!bp->b_addr)
  404. return -ENOMEM;
  405. }
  406. return 0;
  407. }
  408. /*
  409. * Finding and Reading Buffers
  410. */
  411. static int
  412. _xfs_buf_obj_cmp(
  413. struct rhashtable_compare_arg *arg,
  414. const void *obj)
  415. {
  416. const struct xfs_buf_map *map = arg->key;
  417. const struct xfs_buf *bp = obj;
  418. /*
  419. * The key hashing in the lookup path depends on the key being the
  420. * first element of the compare_arg, make sure to assert this.
  421. */
  422. BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
  423. if (bp->b_rhash_key != map->bm_bn)
  424. return 1;
  425. if (unlikely(bp->b_length != map->bm_len)) {
  426. /*
  427. * found a block number match. If the range doesn't
  428. * match, the only way this is allowed is if the buffer
  429. * in the cache is stale and the transaction that made
  430. * it stale has not yet committed. i.e. we are
  431. * reallocating a busy extent. Skip this buffer and
  432. * continue searching for an exact match.
  433. *
  434. * Note: If we're scanning for incore buffers to stale, don't
  435. * complain if we find non-stale buffers.
  436. */
  437. if (!(map->bm_flags & XBM_LIVESCAN))
  438. ASSERT(bp->b_flags & XBF_STALE);
  439. return 1;
  440. }
  441. return 0;
  442. }
  443. static const struct rhashtable_params xfs_buf_hash_params = {
  444. .min_size = 32, /* empty AGs have minimal footprint */
  445. .nelem_hint = 16,
  446. .key_len = sizeof(xfs_daddr_t),
  447. .key_offset = offsetof(struct xfs_buf, b_rhash_key),
  448. .head_offset = offsetof(struct xfs_buf, b_rhash_head),
  449. .automatic_shrinking = true,
  450. .obj_cmpfn = _xfs_buf_obj_cmp,
  451. };
  452. int
  453. xfs_buf_cache_init(
  454. struct xfs_buf_cache *bch)
  455. {
  456. spin_lock_init(&bch->bc_lock);
  457. return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
  458. }
  459. void
  460. xfs_buf_cache_destroy(
  461. struct xfs_buf_cache *bch)
  462. {
  463. rhashtable_destroy(&bch->bc_hash);
  464. }
  465. static int
  466. xfs_buf_map_verify(
  467. struct xfs_buftarg *btp,
  468. struct xfs_buf_map *map)
  469. {
  470. xfs_daddr_t eofs;
  471. /* Check for IOs smaller than the sector size / not sector aligned */
  472. ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
  473. ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
  474. /*
  475. * Corrupted block numbers can get through to here, unfortunately, so we
  476. * have to check that the buffer falls within the filesystem bounds.
  477. */
  478. eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
  479. if (map->bm_bn < 0 || map->bm_bn >= eofs) {
  480. xfs_alert(btp->bt_mount,
  481. "%s: daddr 0x%llx out of range, EOFS 0x%llx",
  482. __func__, map->bm_bn, eofs);
  483. WARN_ON(1);
  484. return -EFSCORRUPTED;
  485. }
  486. return 0;
  487. }
  488. static int
  489. xfs_buf_find_lock(
  490. struct xfs_buf *bp,
  491. xfs_buf_flags_t flags)
  492. {
  493. if (flags & XBF_TRYLOCK) {
  494. if (!xfs_buf_trylock(bp)) {
  495. XFS_STATS_INC(bp->b_mount, xb_busy_locked);
  496. return -EAGAIN;
  497. }
  498. } else {
  499. xfs_buf_lock(bp);
  500. XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
  501. }
  502. /*
  503. * if the buffer is stale, clear all the external state associated with
  504. * it. We need to keep flags such as how we allocated the buffer memory
  505. * intact here.
  506. */
  507. if (bp->b_flags & XBF_STALE) {
  508. if (flags & XBF_LIVESCAN) {
  509. xfs_buf_unlock(bp);
  510. return -ENOENT;
  511. }
  512. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  513. bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
  514. bp->b_ops = NULL;
  515. }
  516. return 0;
  517. }
  518. static inline int
  519. xfs_buf_lookup(
  520. struct xfs_buf_cache *bch,
  521. struct xfs_buf_map *map,
  522. xfs_buf_flags_t flags,
  523. struct xfs_buf **bpp)
  524. {
  525. struct xfs_buf *bp;
  526. int error;
  527. rcu_read_lock();
  528. bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
  529. if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
  530. rcu_read_unlock();
  531. return -ENOENT;
  532. }
  533. rcu_read_unlock();
  534. error = xfs_buf_find_lock(bp, flags);
  535. if (error) {
  536. xfs_buf_rele(bp);
  537. return error;
  538. }
  539. trace_xfs_buf_find(bp, flags, _RET_IP_);
  540. *bpp = bp;
  541. return 0;
  542. }
  543. /*
  544. * Insert the new_bp into the hash table. This consumes the perag reference
  545. * taken for the lookup regardless of the result of the insert.
  546. */
  547. static int
  548. xfs_buf_find_insert(
  549. struct xfs_buftarg *btp,
  550. struct xfs_buf_cache *bch,
  551. struct xfs_perag *pag,
  552. struct xfs_buf_map *cmap,
  553. struct xfs_buf_map *map,
  554. int nmaps,
  555. xfs_buf_flags_t flags,
  556. struct xfs_buf **bpp)
  557. {
  558. struct xfs_buf *new_bp;
  559. struct xfs_buf *bp;
  560. int error;
  561. error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
  562. if (error)
  563. goto out_drop_pag;
  564. if (xfs_buftarg_is_mem(new_bp->b_target)) {
  565. error = xmbuf_map_page(new_bp);
  566. } else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
  567. xfs_buf_alloc_kmem(new_bp, flags) < 0) {
  568. /*
  569. * For buffers that fit entirely within a single page, first
  570. * attempt to allocate the memory from the heap to minimise
  571. * memory usage. If we can't get heap memory for these small
  572. * buffers, we fall back to using the page allocator.
  573. */
  574. error = xfs_buf_alloc_pages(new_bp, flags);
  575. }
  576. if (error)
  577. goto out_free_buf;
  578. spin_lock(&bch->bc_lock);
  579. bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
  580. &new_bp->b_rhash_head, xfs_buf_hash_params);
  581. if (IS_ERR(bp)) {
  582. error = PTR_ERR(bp);
  583. spin_unlock(&bch->bc_lock);
  584. goto out_free_buf;
  585. }
  586. if (bp && atomic_inc_not_zero(&bp->b_hold)) {
  587. /* found an existing buffer */
  588. spin_unlock(&bch->bc_lock);
  589. error = xfs_buf_find_lock(bp, flags);
  590. if (error)
  591. xfs_buf_rele(bp);
  592. else
  593. *bpp = bp;
  594. goto out_free_buf;
  595. }
  596. /* The new buffer keeps the perag reference until it is freed. */
  597. new_bp->b_pag = pag;
  598. spin_unlock(&bch->bc_lock);
  599. *bpp = new_bp;
  600. return 0;
  601. out_free_buf:
  602. xfs_buf_free(new_bp);
  603. out_drop_pag:
  604. if (pag)
  605. xfs_perag_put(pag);
  606. return error;
  607. }
  608. static inline struct xfs_perag *
  609. xfs_buftarg_get_pag(
  610. struct xfs_buftarg *btp,
  611. const struct xfs_buf_map *map)
  612. {
  613. struct xfs_mount *mp = btp->bt_mount;
  614. if (xfs_buftarg_is_mem(btp))
  615. return NULL;
  616. return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
  617. }
  618. static inline struct xfs_buf_cache *
  619. xfs_buftarg_buf_cache(
  620. struct xfs_buftarg *btp,
  621. struct xfs_perag *pag)
  622. {
  623. if (pag)
  624. return &pag->pag_bcache;
  625. return btp->bt_cache;
  626. }
  627. /*
  628. * Assembles a buffer covering the specified range. The code is optimised for
  629. * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  630. * more hits than misses.
  631. */
  632. int
  633. xfs_buf_get_map(
  634. struct xfs_buftarg *btp,
  635. struct xfs_buf_map *map,
  636. int nmaps,
  637. xfs_buf_flags_t flags,
  638. struct xfs_buf **bpp)
  639. {
  640. struct xfs_buf_cache *bch;
  641. struct xfs_perag *pag;
  642. struct xfs_buf *bp = NULL;
  643. struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
  644. int error;
  645. int i;
  646. if (flags & XBF_LIVESCAN)
  647. cmap.bm_flags |= XBM_LIVESCAN;
  648. for (i = 0; i < nmaps; i++)
  649. cmap.bm_len += map[i].bm_len;
  650. error = xfs_buf_map_verify(btp, &cmap);
  651. if (error)
  652. return error;
  653. pag = xfs_buftarg_get_pag(btp, &cmap);
  654. bch = xfs_buftarg_buf_cache(btp, pag);
  655. error = xfs_buf_lookup(bch, &cmap, flags, &bp);
  656. if (error && error != -ENOENT)
  657. goto out_put_perag;
  658. /* cache hits always outnumber misses by at least 10:1 */
  659. if (unlikely(!bp)) {
  660. XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
  661. if (flags & XBF_INCORE)
  662. goto out_put_perag;
  663. /* xfs_buf_find_insert() consumes the perag reference. */
  664. error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
  665. flags, &bp);
  666. if (error)
  667. return error;
  668. } else {
  669. XFS_STATS_INC(btp->bt_mount, xb_get_locked);
  670. if (pag)
  671. xfs_perag_put(pag);
  672. }
  673. /* We do not hold a perag reference anymore. */
  674. if (!bp->b_addr) {
  675. error = _xfs_buf_map_pages(bp, flags);
  676. if (unlikely(error)) {
  677. xfs_warn_ratelimited(btp->bt_mount,
  678. "%s: failed to map %u pages", __func__,
  679. bp->b_page_count);
  680. xfs_buf_relse(bp);
  681. return error;
  682. }
  683. }
  684. /*
  685. * Clear b_error if this is a lookup from a caller that doesn't expect
  686. * valid data to be found in the buffer.
  687. */
  688. if (!(flags & XBF_READ))
  689. xfs_buf_ioerror(bp, 0);
  690. XFS_STATS_INC(btp->bt_mount, xb_get);
  691. trace_xfs_buf_get(bp, flags, _RET_IP_);
  692. *bpp = bp;
  693. return 0;
  694. out_put_perag:
  695. if (pag)
  696. xfs_perag_put(pag);
  697. return error;
  698. }
  699. int
  700. _xfs_buf_read(
  701. struct xfs_buf *bp,
  702. xfs_buf_flags_t flags)
  703. {
  704. ASSERT(!(flags & XBF_WRITE));
  705. ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
  706. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
  707. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  708. return xfs_buf_submit(bp);
  709. }
  710. /*
  711. * Reverify a buffer found in cache without an attached ->b_ops.
  712. *
  713. * If the caller passed an ops structure and the buffer doesn't have ops
  714. * assigned, set the ops and use it to verify the contents. If verification
  715. * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
  716. * already in XBF_DONE state on entry.
  717. *
  718. * Under normal operations, every in-core buffer is verified on read I/O
  719. * completion. There are two scenarios that can lead to in-core buffers without
  720. * an assigned ->b_ops. The first is during log recovery of buffers on a V4
  721. * filesystem, though these buffers are purged at the end of recovery. The
  722. * other is online repair, which intentionally reads with a NULL buffer ops to
  723. * run several verifiers across an in-core buffer in order to establish buffer
  724. * type. If repair can't establish that, the buffer will be left in memory
  725. * with NULL buffer ops.
  726. */
  727. int
  728. xfs_buf_reverify(
  729. struct xfs_buf *bp,
  730. const struct xfs_buf_ops *ops)
  731. {
  732. ASSERT(bp->b_flags & XBF_DONE);
  733. ASSERT(bp->b_error == 0);
  734. if (!ops || bp->b_ops)
  735. return 0;
  736. bp->b_ops = ops;
  737. bp->b_ops->verify_read(bp);
  738. if (bp->b_error)
  739. bp->b_flags &= ~XBF_DONE;
  740. return bp->b_error;
  741. }
  742. int
  743. xfs_buf_read_map(
  744. struct xfs_buftarg *target,
  745. struct xfs_buf_map *map,
  746. int nmaps,
  747. xfs_buf_flags_t flags,
  748. struct xfs_buf **bpp,
  749. const struct xfs_buf_ops *ops,
  750. xfs_failaddr_t fa)
  751. {
  752. struct xfs_buf *bp;
  753. int error;
  754. flags |= XBF_READ;
  755. *bpp = NULL;
  756. error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
  757. if (error)
  758. return error;
  759. trace_xfs_buf_read(bp, flags, _RET_IP_);
  760. if (!(bp->b_flags & XBF_DONE)) {
  761. /* Initiate the buffer read and wait. */
  762. XFS_STATS_INC(target->bt_mount, xb_get_read);
  763. bp->b_ops = ops;
  764. error = _xfs_buf_read(bp, flags);
  765. /* Readahead iodone already dropped the buffer, so exit. */
  766. if (flags & XBF_ASYNC)
  767. return 0;
  768. } else {
  769. /* Buffer already read; all we need to do is check it. */
  770. error = xfs_buf_reverify(bp, ops);
  771. /* Readahead already finished; drop the buffer and exit. */
  772. if (flags & XBF_ASYNC) {
  773. xfs_buf_relse(bp);
  774. return 0;
  775. }
  776. /* We do not want read in the flags */
  777. bp->b_flags &= ~XBF_READ;
  778. ASSERT(bp->b_ops != NULL || ops == NULL);
  779. }
  780. /*
  781. * If we've had a read error, then the contents of the buffer are
  782. * invalid and should not be used. To ensure that a followup read tries
  783. * to pull the buffer from disk again, we clear the XBF_DONE flag and
  784. * mark the buffer stale. This ensures that anyone who has a current
  785. * reference to the buffer will interpret it's contents correctly and
  786. * future cache lookups will also treat it as an empty, uninitialised
  787. * buffer.
  788. */
  789. if (error) {
  790. /*
  791. * Check against log shutdown for error reporting because
  792. * metadata writeback may require a read first and we need to
  793. * report errors in metadata writeback until the log is shut
  794. * down. High level transaction read functions already check
  795. * against mount shutdown, anyway, so we only need to be
  796. * concerned about low level IO interactions here.
  797. */
  798. if (!xlog_is_shutdown(target->bt_mount->m_log))
  799. xfs_buf_ioerror_alert(bp, fa);
  800. bp->b_flags &= ~XBF_DONE;
  801. xfs_buf_stale(bp);
  802. xfs_buf_relse(bp);
  803. /* bad CRC means corrupted metadata */
  804. if (error == -EFSBADCRC)
  805. error = -EFSCORRUPTED;
  806. return error;
  807. }
  808. *bpp = bp;
  809. return 0;
  810. }
  811. /*
  812. * If we are not low on memory then do the readahead in a deadlock
  813. * safe manner.
  814. */
  815. void
  816. xfs_buf_readahead_map(
  817. struct xfs_buftarg *target,
  818. struct xfs_buf_map *map,
  819. int nmaps,
  820. const struct xfs_buf_ops *ops)
  821. {
  822. struct xfs_buf *bp;
  823. /*
  824. * Currently we don't have a good means or justification for performing
  825. * xmbuf_map_page asynchronously, so we don't do readahead.
  826. */
  827. if (xfs_buftarg_is_mem(target))
  828. return;
  829. xfs_buf_read_map(target, map, nmaps,
  830. XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
  831. __this_address);
  832. }
  833. /*
  834. * Read an uncached buffer from disk. Allocates and returns a locked
  835. * buffer containing the disk contents or nothing. Uncached buffers always have
  836. * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer
  837. * is cached or uncached during fault diagnosis.
  838. */
  839. int
  840. xfs_buf_read_uncached(
  841. struct xfs_buftarg *target,
  842. xfs_daddr_t daddr,
  843. size_t numblks,
  844. xfs_buf_flags_t flags,
  845. struct xfs_buf **bpp,
  846. const struct xfs_buf_ops *ops)
  847. {
  848. struct xfs_buf *bp;
  849. int error;
  850. *bpp = NULL;
  851. error = xfs_buf_get_uncached(target, numblks, flags, &bp);
  852. if (error)
  853. return error;
  854. /* set up the buffer for a read IO */
  855. ASSERT(bp->b_map_count == 1);
  856. bp->b_rhash_key = XFS_BUF_DADDR_NULL;
  857. bp->b_maps[0].bm_bn = daddr;
  858. bp->b_flags |= XBF_READ;
  859. bp->b_ops = ops;
  860. xfs_buf_submit(bp);
  861. if (bp->b_error) {
  862. error = bp->b_error;
  863. xfs_buf_relse(bp);
  864. return error;
  865. }
  866. *bpp = bp;
  867. return 0;
  868. }
  869. int
  870. xfs_buf_get_uncached(
  871. struct xfs_buftarg *target,
  872. size_t numblks,
  873. xfs_buf_flags_t flags,
  874. struct xfs_buf **bpp)
  875. {
  876. int error;
  877. struct xfs_buf *bp;
  878. DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
  879. *bpp = NULL;
  880. /* flags might contain irrelevant bits, pass only what we care about */
  881. error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
  882. if (error)
  883. return error;
  884. if (xfs_buftarg_is_mem(bp->b_target))
  885. error = xmbuf_map_page(bp);
  886. else
  887. error = xfs_buf_alloc_pages(bp, flags);
  888. if (error)
  889. goto fail_free_buf;
  890. error = _xfs_buf_map_pages(bp, 0);
  891. if (unlikely(error)) {
  892. xfs_warn(target->bt_mount,
  893. "%s: failed to map pages", __func__);
  894. goto fail_free_buf;
  895. }
  896. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  897. *bpp = bp;
  898. return 0;
  899. fail_free_buf:
  900. xfs_buf_free(bp);
  901. return error;
  902. }
  903. /*
  904. * Increment reference count on buffer, to hold the buffer concurrently
  905. * with another thread which may release (free) the buffer asynchronously.
  906. * Must hold the buffer already to call this function.
  907. */
  908. void
  909. xfs_buf_hold(
  910. struct xfs_buf *bp)
  911. {
  912. trace_xfs_buf_hold(bp, _RET_IP_);
  913. atomic_inc(&bp->b_hold);
  914. }
  915. static void
  916. xfs_buf_rele_uncached(
  917. struct xfs_buf *bp)
  918. {
  919. ASSERT(list_empty(&bp->b_lru));
  920. if (atomic_dec_and_test(&bp->b_hold)) {
  921. xfs_buf_ioacct_dec(bp);
  922. xfs_buf_free(bp);
  923. }
  924. }
  925. static void
  926. xfs_buf_rele_cached(
  927. struct xfs_buf *bp)
  928. {
  929. struct xfs_buftarg *btp = bp->b_target;
  930. struct xfs_perag *pag = bp->b_pag;
  931. struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
  932. bool release;
  933. bool freebuf = false;
  934. trace_xfs_buf_rele(bp, _RET_IP_);
  935. ASSERT(atomic_read(&bp->b_hold) > 0);
  936. /*
  937. * We grab the b_lock here first to serialise racing xfs_buf_rele()
  938. * calls. The pag_buf_lock being taken on the last reference only
  939. * serialises against racing lookups in xfs_buf_find(). IOWs, the second
  940. * to last reference we drop here is not serialised against the last
  941. * reference until we take bp->b_lock. Hence if we don't grab b_lock
  942. * first, the last "release" reference can win the race to the lock and
  943. * free the buffer before the second-to-last reference is processed,
  944. * leading to a use-after-free scenario.
  945. */
  946. spin_lock(&bp->b_lock);
  947. release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock);
  948. if (!release) {
  949. /*
  950. * Drop the in-flight state if the buffer is already on the LRU
  951. * and it holds the only reference. This is racy because we
  952. * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
  953. * ensures the decrement occurs only once per-buf.
  954. */
  955. if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
  956. __xfs_buf_ioacct_dec(bp);
  957. goto out_unlock;
  958. }
  959. /* the last reference has been dropped ... */
  960. __xfs_buf_ioacct_dec(bp);
  961. if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
  962. /*
  963. * If the buffer is added to the LRU take a new reference to the
  964. * buffer for the LRU and clear the (now stale) dispose list
  965. * state flag
  966. */
  967. if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
  968. bp->b_state &= ~XFS_BSTATE_DISPOSE;
  969. atomic_inc(&bp->b_hold);
  970. }
  971. spin_unlock(&bch->bc_lock);
  972. } else {
  973. /*
  974. * most of the time buffers will already be removed from the
  975. * LRU, so optimise that case by checking for the
  976. * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
  977. * was on was the disposal list
  978. */
  979. if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
  980. list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
  981. } else {
  982. ASSERT(list_empty(&bp->b_lru));
  983. }
  984. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  985. rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
  986. xfs_buf_hash_params);
  987. spin_unlock(&bch->bc_lock);
  988. if (pag)
  989. xfs_perag_put(pag);
  990. freebuf = true;
  991. }
  992. out_unlock:
  993. spin_unlock(&bp->b_lock);
  994. if (freebuf)
  995. xfs_buf_free(bp);
  996. }
  997. /*
  998. * Release a hold on the specified buffer.
  999. */
  1000. void
  1001. xfs_buf_rele(
  1002. struct xfs_buf *bp)
  1003. {
  1004. trace_xfs_buf_rele(bp, _RET_IP_);
  1005. if (xfs_buf_is_uncached(bp))
  1006. xfs_buf_rele_uncached(bp);
  1007. else
  1008. xfs_buf_rele_cached(bp);
  1009. }
  1010. /*
  1011. * Lock a buffer object, if it is not already locked.
  1012. *
  1013. * If we come across a stale, pinned, locked buffer, we know that we are
  1014. * being asked to lock a buffer that has been reallocated. Because it is
  1015. * pinned, we know that the log has not been pushed to disk and hence it
  1016. * will still be locked. Rather than continuing to have trylock attempts
  1017. * fail until someone else pushes the log, push it ourselves before
  1018. * returning. This means that the xfsaild will not get stuck trying
  1019. * to push on stale inode buffers.
  1020. */
  1021. int
  1022. xfs_buf_trylock(
  1023. struct xfs_buf *bp)
  1024. {
  1025. int locked;
  1026. locked = down_trylock(&bp->b_sema) == 0;
  1027. if (locked)
  1028. trace_xfs_buf_trylock(bp, _RET_IP_);
  1029. else
  1030. trace_xfs_buf_trylock_fail(bp, _RET_IP_);
  1031. return locked;
  1032. }
  1033. /*
  1034. * Lock a buffer object.
  1035. *
  1036. * If we come across a stale, pinned, locked buffer, we know that we
  1037. * are being asked to lock a buffer that has been reallocated. Because
  1038. * it is pinned, we know that the log has not been pushed to disk and
  1039. * hence it will still be locked. Rather than sleeping until someone
  1040. * else pushes the log, push it ourselves before trying to get the lock.
  1041. */
  1042. void
  1043. xfs_buf_lock(
  1044. struct xfs_buf *bp)
  1045. {
  1046. trace_xfs_buf_lock(bp, _RET_IP_);
  1047. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  1048. xfs_log_force(bp->b_mount, 0);
  1049. down(&bp->b_sema);
  1050. trace_xfs_buf_lock_done(bp, _RET_IP_);
  1051. }
  1052. void
  1053. xfs_buf_unlock(
  1054. struct xfs_buf *bp)
  1055. {
  1056. ASSERT(xfs_buf_islocked(bp));
  1057. up(&bp->b_sema);
  1058. trace_xfs_buf_unlock(bp, _RET_IP_);
  1059. }
  1060. STATIC void
  1061. xfs_buf_wait_unpin(
  1062. struct xfs_buf *bp)
  1063. {
  1064. DECLARE_WAITQUEUE (wait, current);
  1065. if (atomic_read(&bp->b_pin_count) == 0)
  1066. return;
  1067. add_wait_queue(&bp->b_waiters, &wait);
  1068. for (;;) {
  1069. set_current_state(TASK_UNINTERRUPTIBLE);
  1070. if (atomic_read(&bp->b_pin_count) == 0)
  1071. break;
  1072. io_schedule();
  1073. }
  1074. remove_wait_queue(&bp->b_waiters, &wait);
  1075. set_current_state(TASK_RUNNING);
  1076. }
  1077. static void
  1078. xfs_buf_ioerror_alert_ratelimited(
  1079. struct xfs_buf *bp)
  1080. {
  1081. static unsigned long lasttime;
  1082. static struct xfs_buftarg *lasttarg;
  1083. if (bp->b_target != lasttarg ||
  1084. time_after(jiffies, (lasttime + 5*HZ))) {
  1085. lasttime = jiffies;
  1086. xfs_buf_ioerror_alert(bp, __this_address);
  1087. }
  1088. lasttarg = bp->b_target;
  1089. }
  1090. /*
  1091. * Account for this latest trip around the retry handler, and decide if
  1092. * we've failed enough times to constitute a permanent failure.
  1093. */
  1094. static bool
  1095. xfs_buf_ioerror_permanent(
  1096. struct xfs_buf *bp,
  1097. struct xfs_error_cfg *cfg)
  1098. {
  1099. struct xfs_mount *mp = bp->b_mount;
  1100. if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
  1101. ++bp->b_retries > cfg->max_retries)
  1102. return true;
  1103. if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
  1104. time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
  1105. return true;
  1106. /* At unmount we may treat errors differently */
  1107. if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
  1108. return true;
  1109. return false;
  1110. }
  1111. /*
  1112. * On a sync write or shutdown we just want to stale the buffer and let the
  1113. * caller handle the error in bp->b_error appropriately.
  1114. *
  1115. * If the write was asynchronous then no one will be looking for the error. If
  1116. * this is the first failure of this type, clear the error state and write the
  1117. * buffer out again. This means we always retry an async write failure at least
  1118. * once, but we also need to set the buffer up to behave correctly now for
  1119. * repeated failures.
  1120. *
  1121. * If we get repeated async write failures, then we take action according to the
  1122. * error configuration we have been set up to use.
  1123. *
  1124. * Returns true if this function took care of error handling and the caller must
  1125. * not touch the buffer again. Return false if the caller should proceed with
  1126. * normal I/O completion handling.
  1127. */
  1128. static bool
  1129. xfs_buf_ioend_handle_error(
  1130. struct xfs_buf *bp)
  1131. {
  1132. struct xfs_mount *mp = bp->b_mount;
  1133. struct xfs_error_cfg *cfg;
  1134. /*
  1135. * If we've already shutdown the journal because of I/O errors, there's
  1136. * no point in giving this a retry.
  1137. */
  1138. if (xlog_is_shutdown(mp->m_log))
  1139. goto out_stale;
  1140. xfs_buf_ioerror_alert_ratelimited(bp);
  1141. /*
  1142. * We're not going to bother about retrying this during recovery.
  1143. * One strike!
  1144. */
  1145. if (bp->b_flags & _XBF_LOGRECOVERY) {
  1146. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1147. return false;
  1148. }
  1149. /*
  1150. * Synchronous writes will have callers process the error.
  1151. */
  1152. if (!(bp->b_flags & XBF_ASYNC))
  1153. goto out_stale;
  1154. trace_xfs_buf_iodone_async(bp, _RET_IP_);
  1155. cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
  1156. if (bp->b_last_error != bp->b_error ||
  1157. !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
  1158. bp->b_last_error = bp->b_error;
  1159. if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
  1160. !bp->b_first_retry_time)
  1161. bp->b_first_retry_time = jiffies;
  1162. goto resubmit;
  1163. }
  1164. /*
  1165. * Permanent error - we need to trigger a shutdown if we haven't already
  1166. * to indicate that inconsistency will result from this action.
  1167. */
  1168. if (xfs_buf_ioerror_permanent(bp, cfg)) {
  1169. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1170. goto out_stale;
  1171. }
  1172. /* Still considered a transient error. Caller will schedule retries. */
  1173. if (bp->b_flags & _XBF_INODES)
  1174. xfs_buf_inode_io_fail(bp);
  1175. else if (bp->b_flags & _XBF_DQUOTS)
  1176. xfs_buf_dquot_io_fail(bp);
  1177. else
  1178. ASSERT(list_empty(&bp->b_li_list));
  1179. xfs_buf_ioerror(bp, 0);
  1180. xfs_buf_relse(bp);
  1181. return true;
  1182. resubmit:
  1183. xfs_buf_ioerror(bp, 0);
  1184. bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
  1185. xfs_buf_submit(bp);
  1186. return true;
  1187. out_stale:
  1188. xfs_buf_stale(bp);
  1189. bp->b_flags |= XBF_DONE;
  1190. bp->b_flags &= ~XBF_WRITE;
  1191. trace_xfs_buf_error_relse(bp, _RET_IP_);
  1192. return false;
  1193. }
  1194. static void
  1195. xfs_buf_ioend(
  1196. struct xfs_buf *bp)
  1197. {
  1198. trace_xfs_buf_iodone(bp, _RET_IP_);
  1199. /*
  1200. * Pull in IO completion errors now. We are guaranteed to be running
  1201. * single threaded, so we don't need the lock to read b_io_error.
  1202. */
  1203. if (!bp->b_error && bp->b_io_error)
  1204. xfs_buf_ioerror(bp, bp->b_io_error);
  1205. if (bp->b_flags & XBF_READ) {
  1206. if (!bp->b_error && bp->b_ops)
  1207. bp->b_ops->verify_read(bp);
  1208. if (!bp->b_error)
  1209. bp->b_flags |= XBF_DONE;
  1210. } else {
  1211. if (!bp->b_error) {
  1212. bp->b_flags &= ~XBF_WRITE_FAIL;
  1213. bp->b_flags |= XBF_DONE;
  1214. }
  1215. if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
  1216. return;
  1217. /* clear the retry state */
  1218. bp->b_last_error = 0;
  1219. bp->b_retries = 0;
  1220. bp->b_first_retry_time = 0;
  1221. /*
  1222. * Note that for things like remote attribute buffers, there may
  1223. * not be a buffer log item here, so processing the buffer log
  1224. * item must remain optional.
  1225. */
  1226. if (bp->b_log_item)
  1227. xfs_buf_item_done(bp);
  1228. if (bp->b_flags & _XBF_INODES)
  1229. xfs_buf_inode_iodone(bp);
  1230. else if (bp->b_flags & _XBF_DQUOTS)
  1231. xfs_buf_dquot_iodone(bp);
  1232. }
  1233. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
  1234. _XBF_LOGRECOVERY);
  1235. if (bp->b_flags & XBF_ASYNC)
  1236. xfs_buf_relse(bp);
  1237. else
  1238. complete(&bp->b_iowait);
  1239. }
  1240. static void
  1241. xfs_buf_ioend_work(
  1242. struct work_struct *work)
  1243. {
  1244. struct xfs_buf *bp =
  1245. container_of(work, struct xfs_buf, b_ioend_work);
  1246. xfs_buf_ioend(bp);
  1247. }
  1248. static void
  1249. xfs_buf_ioend_async(
  1250. struct xfs_buf *bp)
  1251. {
  1252. INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
  1253. queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
  1254. }
  1255. void
  1256. __xfs_buf_ioerror(
  1257. struct xfs_buf *bp,
  1258. int error,
  1259. xfs_failaddr_t failaddr)
  1260. {
  1261. ASSERT(error <= 0 && error >= -1000);
  1262. bp->b_error = error;
  1263. trace_xfs_buf_ioerror(bp, error, failaddr);
  1264. }
  1265. void
  1266. xfs_buf_ioerror_alert(
  1267. struct xfs_buf *bp,
  1268. xfs_failaddr_t func)
  1269. {
  1270. xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
  1271. "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
  1272. func, (uint64_t)xfs_buf_daddr(bp),
  1273. bp->b_length, -bp->b_error);
  1274. }
  1275. /*
  1276. * To simulate an I/O failure, the buffer must be locked and held with at least
  1277. * three references. The LRU reference is dropped by the stale call. The buf
  1278. * item reference is dropped via ioend processing. The third reference is owned
  1279. * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
  1280. */
  1281. void
  1282. xfs_buf_ioend_fail(
  1283. struct xfs_buf *bp)
  1284. {
  1285. bp->b_flags &= ~XBF_DONE;
  1286. xfs_buf_stale(bp);
  1287. xfs_buf_ioerror(bp, -EIO);
  1288. xfs_buf_ioend(bp);
  1289. }
  1290. int
  1291. xfs_bwrite(
  1292. struct xfs_buf *bp)
  1293. {
  1294. int error;
  1295. ASSERT(xfs_buf_islocked(bp));
  1296. bp->b_flags |= XBF_WRITE;
  1297. bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
  1298. XBF_DONE);
  1299. error = xfs_buf_submit(bp);
  1300. if (error)
  1301. xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
  1302. return error;
  1303. }
  1304. static void
  1305. xfs_buf_bio_end_io(
  1306. struct bio *bio)
  1307. {
  1308. struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
  1309. if (!bio->bi_status &&
  1310. (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
  1311. XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
  1312. bio->bi_status = BLK_STS_IOERR;
  1313. /*
  1314. * don't overwrite existing errors - otherwise we can lose errors on
  1315. * buffers that require multiple bios to complete.
  1316. */
  1317. if (bio->bi_status) {
  1318. int error = blk_status_to_errno(bio->bi_status);
  1319. cmpxchg(&bp->b_io_error, 0, error);
  1320. }
  1321. if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1322. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1323. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1324. xfs_buf_ioend_async(bp);
  1325. bio_put(bio);
  1326. }
  1327. static void
  1328. xfs_buf_ioapply_map(
  1329. struct xfs_buf *bp,
  1330. int map,
  1331. int *buf_offset,
  1332. int *count,
  1333. blk_opf_t op)
  1334. {
  1335. int page_index;
  1336. unsigned int total_nr_pages = bp->b_page_count;
  1337. int nr_pages;
  1338. struct bio *bio;
  1339. sector_t sector = bp->b_maps[map].bm_bn;
  1340. int size;
  1341. int offset;
  1342. /* skip the pages in the buffer before the start offset */
  1343. page_index = 0;
  1344. offset = *buf_offset;
  1345. while (offset >= PAGE_SIZE) {
  1346. page_index++;
  1347. offset -= PAGE_SIZE;
  1348. }
  1349. /*
  1350. * Limit the IO size to the length of the current vector, and update the
  1351. * remaining IO count for the next time around.
  1352. */
  1353. size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
  1354. *count -= size;
  1355. *buf_offset += size;
  1356. next_chunk:
  1357. atomic_inc(&bp->b_io_remaining);
  1358. nr_pages = bio_max_segs(total_nr_pages);
  1359. bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
  1360. bio->bi_iter.bi_sector = sector;
  1361. bio->bi_end_io = xfs_buf_bio_end_io;
  1362. bio->bi_private = bp;
  1363. for (; size && nr_pages; nr_pages--, page_index++) {
  1364. int rbytes, nbytes = PAGE_SIZE - offset;
  1365. if (nbytes > size)
  1366. nbytes = size;
  1367. rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
  1368. offset);
  1369. if (rbytes < nbytes)
  1370. break;
  1371. offset = 0;
  1372. sector += BTOBB(nbytes);
  1373. size -= nbytes;
  1374. total_nr_pages--;
  1375. }
  1376. if (likely(bio->bi_iter.bi_size)) {
  1377. if (xfs_buf_is_vmapped(bp)) {
  1378. flush_kernel_vmap_range(bp->b_addr,
  1379. xfs_buf_vmap_len(bp));
  1380. }
  1381. submit_bio(bio);
  1382. if (size)
  1383. goto next_chunk;
  1384. } else {
  1385. /*
  1386. * This is guaranteed not to be the last io reference count
  1387. * because the caller (xfs_buf_submit) holds a count itself.
  1388. */
  1389. atomic_dec(&bp->b_io_remaining);
  1390. xfs_buf_ioerror(bp, -EIO);
  1391. bio_put(bio);
  1392. }
  1393. }
  1394. STATIC void
  1395. _xfs_buf_ioapply(
  1396. struct xfs_buf *bp)
  1397. {
  1398. struct blk_plug plug;
  1399. blk_opf_t op;
  1400. int offset;
  1401. int size;
  1402. int i;
  1403. /*
  1404. * Make sure we capture only current IO errors rather than stale errors
  1405. * left over from previous use of the buffer (e.g. failed readahead).
  1406. */
  1407. bp->b_error = 0;
  1408. if (bp->b_flags & XBF_WRITE) {
  1409. op = REQ_OP_WRITE;
  1410. /*
  1411. * Run the write verifier callback function if it exists. If
  1412. * this function fails it will mark the buffer with an error and
  1413. * the IO should not be dispatched.
  1414. */
  1415. if (bp->b_ops) {
  1416. bp->b_ops->verify_write(bp);
  1417. if (bp->b_error) {
  1418. xfs_force_shutdown(bp->b_mount,
  1419. SHUTDOWN_CORRUPT_INCORE);
  1420. return;
  1421. }
  1422. } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
  1423. struct xfs_mount *mp = bp->b_mount;
  1424. /*
  1425. * non-crc filesystems don't attach verifiers during
  1426. * log recovery, so don't warn for such filesystems.
  1427. */
  1428. if (xfs_has_crc(mp)) {
  1429. xfs_warn(mp,
  1430. "%s: no buf ops on daddr 0x%llx len %d",
  1431. __func__, xfs_buf_daddr(bp),
  1432. bp->b_length);
  1433. xfs_hex_dump(bp->b_addr,
  1434. XFS_CORRUPTION_DUMP_LEN);
  1435. dump_stack();
  1436. }
  1437. }
  1438. } else {
  1439. op = REQ_OP_READ;
  1440. if (bp->b_flags & XBF_READ_AHEAD)
  1441. op |= REQ_RAHEAD;
  1442. }
  1443. /* we only use the buffer cache for meta-data */
  1444. op |= REQ_META;
  1445. /* in-memory targets are directly mapped, no IO required. */
  1446. if (xfs_buftarg_is_mem(bp->b_target)) {
  1447. xfs_buf_ioend(bp);
  1448. return;
  1449. }
  1450. /*
  1451. * Walk all the vectors issuing IO on them. Set up the initial offset
  1452. * into the buffer and the desired IO size before we start -
  1453. * _xfs_buf_ioapply_vec() will modify them appropriately for each
  1454. * subsequent call.
  1455. */
  1456. offset = bp->b_offset;
  1457. size = BBTOB(bp->b_length);
  1458. blk_start_plug(&plug);
  1459. for (i = 0; i < bp->b_map_count; i++) {
  1460. xfs_buf_ioapply_map(bp, i, &offset, &size, op);
  1461. if (bp->b_error)
  1462. break;
  1463. if (size <= 0)
  1464. break; /* all done */
  1465. }
  1466. blk_finish_plug(&plug);
  1467. }
  1468. /*
  1469. * Wait for I/O completion of a sync buffer and return the I/O error code.
  1470. */
  1471. static int
  1472. xfs_buf_iowait(
  1473. struct xfs_buf *bp)
  1474. {
  1475. ASSERT(!(bp->b_flags & XBF_ASYNC));
  1476. trace_xfs_buf_iowait(bp, _RET_IP_);
  1477. wait_for_completion(&bp->b_iowait);
  1478. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1479. return bp->b_error;
  1480. }
  1481. /*
  1482. * Buffer I/O submission path, read or write. Asynchronous submission transfers
  1483. * the buffer lock ownership and the current reference to the IO. It is not
  1484. * safe to reference the buffer after a call to this function unless the caller
  1485. * holds an additional reference itself.
  1486. */
  1487. static int
  1488. __xfs_buf_submit(
  1489. struct xfs_buf *bp,
  1490. bool wait)
  1491. {
  1492. int error = 0;
  1493. trace_xfs_buf_submit(bp, _RET_IP_);
  1494. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1495. /*
  1496. * On log shutdown we stale and complete the buffer immediately. We can
  1497. * be called to read the superblock before the log has been set up, so
  1498. * be careful checking the log state.
  1499. *
  1500. * Checking the mount shutdown state here can result in the log tail
  1501. * moving inappropriately on disk as the log may not yet be shut down.
  1502. * i.e. failing this buffer on mount shutdown can remove it from the AIL
  1503. * and move the tail of the log forwards without having written this
  1504. * buffer to disk. This corrupts the log tail state in memory, and
  1505. * because the log may not be shut down yet, it can then be propagated
  1506. * to disk before the log is shutdown. Hence we check log shutdown
  1507. * state here rather than mount state to avoid corrupting the log tail
  1508. * on shutdown.
  1509. */
  1510. if (bp->b_mount->m_log &&
  1511. xlog_is_shutdown(bp->b_mount->m_log)) {
  1512. xfs_buf_ioend_fail(bp);
  1513. return -EIO;
  1514. }
  1515. /*
  1516. * Grab a reference so the buffer does not go away underneath us. For
  1517. * async buffers, I/O completion drops the callers reference, which
  1518. * could occur before submission returns.
  1519. */
  1520. xfs_buf_hold(bp);
  1521. if (bp->b_flags & XBF_WRITE)
  1522. xfs_buf_wait_unpin(bp);
  1523. /* clear the internal error state to avoid spurious errors */
  1524. bp->b_io_error = 0;
  1525. /*
  1526. * Set the count to 1 initially, this will stop an I/O completion
  1527. * callout which happens before we have started all the I/O from calling
  1528. * xfs_buf_ioend too early.
  1529. */
  1530. atomic_set(&bp->b_io_remaining, 1);
  1531. if (bp->b_flags & XBF_ASYNC)
  1532. xfs_buf_ioacct_inc(bp);
  1533. _xfs_buf_ioapply(bp);
  1534. /*
  1535. * If _xfs_buf_ioapply failed, we can get back here with only the IO
  1536. * reference we took above. If we drop it to zero, run completion so
  1537. * that we don't return to the caller with completion still pending.
  1538. */
  1539. if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
  1540. if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
  1541. xfs_buf_ioend(bp);
  1542. else
  1543. xfs_buf_ioend_async(bp);
  1544. }
  1545. if (wait)
  1546. error = xfs_buf_iowait(bp);
  1547. /*
  1548. * Release the hold that keeps the buffer referenced for the entire
  1549. * I/O. Note that if the buffer is async, it is not safe to reference
  1550. * after this release.
  1551. */
  1552. xfs_buf_rele(bp);
  1553. return error;
  1554. }
  1555. void *
  1556. xfs_buf_offset(
  1557. struct xfs_buf *bp,
  1558. size_t offset)
  1559. {
  1560. struct page *page;
  1561. if (bp->b_addr)
  1562. return bp->b_addr + offset;
  1563. page = bp->b_pages[offset >> PAGE_SHIFT];
  1564. return page_address(page) + (offset & (PAGE_SIZE-1));
  1565. }
  1566. void
  1567. xfs_buf_zero(
  1568. struct xfs_buf *bp,
  1569. size_t boff,
  1570. size_t bsize)
  1571. {
  1572. size_t bend;
  1573. bend = boff + bsize;
  1574. while (boff < bend) {
  1575. struct page *page;
  1576. int page_index, page_offset, csize;
  1577. page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
  1578. page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
  1579. page = bp->b_pages[page_index];
  1580. csize = min_t(size_t, PAGE_SIZE - page_offset,
  1581. BBTOB(bp->b_length) - boff);
  1582. ASSERT((csize + page_offset) <= PAGE_SIZE);
  1583. memset(page_address(page) + page_offset, 0, csize);
  1584. boff += csize;
  1585. }
  1586. }
  1587. /*
  1588. * Log a message about and stale a buffer that a caller has decided is corrupt.
  1589. *
  1590. * This function should be called for the kinds of metadata corruption that
  1591. * cannot be detect from a verifier, such as incorrect inter-block relationship
  1592. * data. Do /not/ call this function from a verifier function.
  1593. *
  1594. * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
  1595. * be marked stale, but b_error will not be set. The caller is responsible for
  1596. * releasing the buffer or fixing it.
  1597. */
  1598. void
  1599. __xfs_buf_mark_corrupt(
  1600. struct xfs_buf *bp,
  1601. xfs_failaddr_t fa)
  1602. {
  1603. ASSERT(bp->b_flags & XBF_DONE);
  1604. xfs_buf_corruption_error(bp, fa);
  1605. xfs_buf_stale(bp);
  1606. }
  1607. /*
  1608. * Handling of buffer targets (buftargs).
  1609. */
  1610. /*
  1611. * Wait for any bufs with callbacks that have been submitted but have not yet
  1612. * returned. These buffers will have an elevated hold count, so wait on those
  1613. * while freeing all the buffers only held by the LRU.
  1614. */
  1615. static enum lru_status
  1616. xfs_buftarg_drain_rele(
  1617. struct list_head *item,
  1618. struct list_lru_one *lru,
  1619. spinlock_t *lru_lock,
  1620. void *arg)
  1621. {
  1622. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1623. struct list_head *dispose = arg;
  1624. if (atomic_read(&bp->b_hold) > 1) {
  1625. /* need to wait, so skip it this pass */
  1626. trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
  1627. return LRU_SKIP;
  1628. }
  1629. if (!spin_trylock(&bp->b_lock))
  1630. return LRU_SKIP;
  1631. /*
  1632. * clear the LRU reference count so the buffer doesn't get
  1633. * ignored in xfs_buf_rele().
  1634. */
  1635. atomic_set(&bp->b_lru_ref, 0);
  1636. bp->b_state |= XFS_BSTATE_DISPOSE;
  1637. list_lru_isolate_move(lru, item, dispose);
  1638. spin_unlock(&bp->b_lock);
  1639. return LRU_REMOVED;
  1640. }
  1641. /*
  1642. * Wait for outstanding I/O on the buftarg to complete.
  1643. */
  1644. void
  1645. xfs_buftarg_wait(
  1646. struct xfs_buftarg *btp)
  1647. {
  1648. /*
  1649. * First wait on the buftarg I/O count for all in-flight buffers to be
  1650. * released. This is critical as new buffers do not make the LRU until
  1651. * they are released.
  1652. *
  1653. * Next, flush the buffer workqueue to ensure all completion processing
  1654. * has finished. Just waiting on buffer locks is not sufficient for
  1655. * async IO as the reference count held over IO is not released until
  1656. * after the buffer lock is dropped. Hence we need to ensure here that
  1657. * all reference counts have been dropped before we start walking the
  1658. * LRU list.
  1659. */
  1660. while (percpu_counter_sum(&btp->bt_io_count))
  1661. delay(100);
  1662. flush_workqueue(btp->bt_mount->m_buf_workqueue);
  1663. }
  1664. void
  1665. xfs_buftarg_drain(
  1666. struct xfs_buftarg *btp)
  1667. {
  1668. LIST_HEAD(dispose);
  1669. int loop = 0;
  1670. bool write_fail = false;
  1671. xfs_buftarg_wait(btp);
  1672. /* loop until there is nothing left on the lru list. */
  1673. while (list_lru_count(&btp->bt_lru)) {
  1674. list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
  1675. &dispose, LONG_MAX);
  1676. while (!list_empty(&dispose)) {
  1677. struct xfs_buf *bp;
  1678. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1679. list_del_init(&bp->b_lru);
  1680. if (bp->b_flags & XBF_WRITE_FAIL) {
  1681. write_fail = true;
  1682. xfs_buf_alert_ratelimited(bp,
  1683. "XFS: Corruption Alert",
  1684. "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
  1685. (long long)xfs_buf_daddr(bp));
  1686. }
  1687. xfs_buf_rele(bp);
  1688. }
  1689. if (loop++ != 0)
  1690. delay(100);
  1691. }
  1692. /*
  1693. * If one or more failed buffers were freed, that means dirty metadata
  1694. * was thrown away. This should only ever happen after I/O completion
  1695. * handling has elevated I/O error(s) to permanent failures and shuts
  1696. * down the journal.
  1697. */
  1698. if (write_fail) {
  1699. ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
  1700. xfs_alert(btp->bt_mount,
  1701. "Please run xfs_repair to determine the extent of the problem.");
  1702. }
  1703. }
  1704. static enum lru_status
  1705. xfs_buftarg_isolate(
  1706. struct list_head *item,
  1707. struct list_lru_one *lru,
  1708. spinlock_t *lru_lock,
  1709. void *arg)
  1710. {
  1711. struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
  1712. struct list_head *dispose = arg;
  1713. /*
  1714. * we are inverting the lru lock/bp->b_lock here, so use a trylock.
  1715. * If we fail to get the lock, just skip it.
  1716. */
  1717. if (!spin_trylock(&bp->b_lock))
  1718. return LRU_SKIP;
  1719. /*
  1720. * Decrement the b_lru_ref count unless the value is already
  1721. * zero. If the value is already zero, we need to reclaim the
  1722. * buffer, otherwise it gets another trip through the LRU.
  1723. */
  1724. if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1725. spin_unlock(&bp->b_lock);
  1726. return LRU_ROTATE;
  1727. }
  1728. bp->b_state |= XFS_BSTATE_DISPOSE;
  1729. list_lru_isolate_move(lru, item, dispose);
  1730. spin_unlock(&bp->b_lock);
  1731. return LRU_REMOVED;
  1732. }
  1733. static unsigned long
  1734. xfs_buftarg_shrink_scan(
  1735. struct shrinker *shrink,
  1736. struct shrink_control *sc)
  1737. {
  1738. struct xfs_buftarg *btp = shrink->private_data;
  1739. LIST_HEAD(dispose);
  1740. unsigned long freed;
  1741. freed = list_lru_shrink_walk(&btp->bt_lru, sc,
  1742. xfs_buftarg_isolate, &dispose);
  1743. while (!list_empty(&dispose)) {
  1744. struct xfs_buf *bp;
  1745. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1746. list_del_init(&bp->b_lru);
  1747. xfs_buf_rele(bp);
  1748. }
  1749. return freed;
  1750. }
  1751. static unsigned long
  1752. xfs_buftarg_shrink_count(
  1753. struct shrinker *shrink,
  1754. struct shrink_control *sc)
  1755. {
  1756. struct xfs_buftarg *btp = shrink->private_data;
  1757. return list_lru_shrink_count(&btp->bt_lru, sc);
  1758. }
  1759. void
  1760. xfs_destroy_buftarg(
  1761. struct xfs_buftarg *btp)
  1762. {
  1763. shrinker_free(btp->bt_shrinker);
  1764. ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
  1765. percpu_counter_destroy(&btp->bt_io_count);
  1766. list_lru_destroy(&btp->bt_lru);
  1767. }
  1768. void
  1769. xfs_free_buftarg(
  1770. struct xfs_buftarg *btp)
  1771. {
  1772. xfs_destroy_buftarg(btp);
  1773. fs_put_dax(btp->bt_daxdev, btp->bt_mount);
  1774. /* the main block device is closed by kill_block_super */
  1775. if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
  1776. bdev_fput(btp->bt_bdev_file);
  1777. kfree(btp);
  1778. }
  1779. int
  1780. xfs_setsize_buftarg(
  1781. struct xfs_buftarg *btp,
  1782. unsigned int sectorsize)
  1783. {
  1784. /* Set up metadata sector size info */
  1785. btp->bt_meta_sectorsize = sectorsize;
  1786. btp->bt_meta_sectormask = sectorsize - 1;
  1787. if (set_blocksize(btp->bt_bdev_file, sectorsize)) {
  1788. xfs_warn(btp->bt_mount,
  1789. "Cannot set_blocksize to %u on device %pg",
  1790. sectorsize, btp->bt_bdev);
  1791. return -EINVAL;
  1792. }
  1793. return 0;
  1794. }
  1795. int
  1796. xfs_init_buftarg(
  1797. struct xfs_buftarg *btp,
  1798. size_t logical_sectorsize,
  1799. const char *descr)
  1800. {
  1801. /* Set up device logical sector size mask */
  1802. btp->bt_logical_sectorsize = logical_sectorsize;
  1803. btp->bt_logical_sectormask = logical_sectorsize - 1;
  1804. /*
  1805. * Buffer IO error rate limiting. Limit it to no more than 10 messages
  1806. * per 30 seconds so as to not spam logs too much on repeated errors.
  1807. */
  1808. ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
  1809. DEFAULT_RATELIMIT_BURST);
  1810. if (list_lru_init(&btp->bt_lru))
  1811. return -ENOMEM;
  1812. if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
  1813. goto out_destroy_lru;
  1814. btp->bt_shrinker =
  1815. shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
  1816. if (!btp->bt_shrinker)
  1817. goto out_destroy_io_count;
  1818. btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
  1819. btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
  1820. btp->bt_shrinker->private_data = btp;
  1821. shrinker_register(btp->bt_shrinker);
  1822. return 0;
  1823. out_destroy_io_count:
  1824. percpu_counter_destroy(&btp->bt_io_count);
  1825. out_destroy_lru:
  1826. list_lru_destroy(&btp->bt_lru);
  1827. return -ENOMEM;
  1828. }
  1829. struct xfs_buftarg *
  1830. xfs_alloc_buftarg(
  1831. struct xfs_mount *mp,
  1832. struct file *bdev_file)
  1833. {
  1834. struct xfs_buftarg *btp;
  1835. const struct dax_holder_operations *ops = NULL;
  1836. #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
  1837. ops = &xfs_dax_holder_operations;
  1838. #endif
  1839. btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
  1840. btp->bt_mount = mp;
  1841. btp->bt_bdev_file = bdev_file;
  1842. btp->bt_bdev = file_bdev(bdev_file);
  1843. btp->bt_dev = btp->bt_bdev->bd_dev;
  1844. btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
  1845. mp, ops);
  1846. /*
  1847. * When allocating the buftargs we have not yet read the super block and
  1848. * thus don't know the file system sector size yet.
  1849. */
  1850. if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)))
  1851. goto error_free;
  1852. if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev),
  1853. mp->m_super->s_id))
  1854. goto error_free;
  1855. return btp;
  1856. error_free:
  1857. kfree(btp);
  1858. return NULL;
  1859. }
  1860. static inline void
  1861. xfs_buf_list_del(
  1862. struct xfs_buf *bp)
  1863. {
  1864. list_del_init(&bp->b_list);
  1865. wake_up_var(&bp->b_list);
  1866. }
  1867. /*
  1868. * Cancel a delayed write list.
  1869. *
  1870. * Remove each buffer from the list, clear the delwri queue flag and drop the
  1871. * associated buffer reference.
  1872. */
  1873. void
  1874. xfs_buf_delwri_cancel(
  1875. struct list_head *list)
  1876. {
  1877. struct xfs_buf *bp;
  1878. while (!list_empty(list)) {
  1879. bp = list_first_entry(list, struct xfs_buf, b_list);
  1880. xfs_buf_lock(bp);
  1881. bp->b_flags &= ~_XBF_DELWRI_Q;
  1882. xfs_buf_list_del(bp);
  1883. xfs_buf_relse(bp);
  1884. }
  1885. }
  1886. /*
  1887. * Add a buffer to the delayed write list.
  1888. *
  1889. * This queues a buffer for writeout if it hasn't already been. Note that
  1890. * neither this routine nor the buffer list submission functions perform
  1891. * any internal synchronization. It is expected that the lists are thread-local
  1892. * to the callers.
  1893. *
  1894. * Returns true if we queued up the buffer, or false if it already had
  1895. * been on the buffer list.
  1896. */
  1897. bool
  1898. xfs_buf_delwri_queue(
  1899. struct xfs_buf *bp,
  1900. struct list_head *list)
  1901. {
  1902. ASSERT(xfs_buf_islocked(bp));
  1903. ASSERT(!(bp->b_flags & XBF_READ));
  1904. /*
  1905. * If the buffer is already marked delwri it already is queued up
  1906. * by someone else for imediate writeout. Just ignore it in that
  1907. * case.
  1908. */
  1909. if (bp->b_flags & _XBF_DELWRI_Q) {
  1910. trace_xfs_buf_delwri_queued(bp, _RET_IP_);
  1911. return false;
  1912. }
  1913. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1914. /*
  1915. * If a buffer gets written out synchronously or marked stale while it
  1916. * is on a delwri list we lazily remove it. To do this, the other party
  1917. * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
  1918. * It remains referenced and on the list. In a rare corner case it
  1919. * might get readded to a delwri list after the synchronous writeout, in
  1920. * which case we need just need to re-add the flag here.
  1921. */
  1922. bp->b_flags |= _XBF_DELWRI_Q;
  1923. if (list_empty(&bp->b_list)) {
  1924. atomic_inc(&bp->b_hold);
  1925. list_add_tail(&bp->b_list, list);
  1926. }
  1927. return true;
  1928. }
  1929. /*
  1930. * Queue a buffer to this delwri list as part of a data integrity operation.
  1931. * If the buffer is on any other delwri list, we'll wait for that to clear
  1932. * so that the caller can submit the buffer for IO and wait for the result.
  1933. * Callers must ensure the buffer is not already on the list.
  1934. */
  1935. void
  1936. xfs_buf_delwri_queue_here(
  1937. struct xfs_buf *bp,
  1938. struct list_head *buffer_list)
  1939. {
  1940. /*
  1941. * We need this buffer to end up on the /caller's/ delwri list, not any
  1942. * old list. This can happen if the buffer is marked stale (which
  1943. * clears DELWRI_Q) after the AIL queues the buffer to its list but
  1944. * before the AIL has a chance to submit the list.
  1945. */
  1946. while (!list_empty(&bp->b_list)) {
  1947. xfs_buf_unlock(bp);
  1948. wait_var_event(&bp->b_list, list_empty(&bp->b_list));
  1949. xfs_buf_lock(bp);
  1950. }
  1951. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1952. xfs_buf_delwri_queue(bp, buffer_list);
  1953. }
  1954. /*
  1955. * Compare function is more complex than it needs to be because
  1956. * the return value is only 32 bits and we are doing comparisons
  1957. * on 64 bit values
  1958. */
  1959. static int
  1960. xfs_buf_cmp(
  1961. void *priv,
  1962. const struct list_head *a,
  1963. const struct list_head *b)
  1964. {
  1965. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1966. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1967. xfs_daddr_t diff;
  1968. diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
  1969. if (diff < 0)
  1970. return -1;
  1971. if (diff > 0)
  1972. return 1;
  1973. return 0;
  1974. }
  1975. /*
  1976. * Submit buffers for write. If wait_list is specified, the buffers are
  1977. * submitted using sync I/O and placed on the wait list such that the caller can
  1978. * iowait each buffer. Otherwise async I/O is used and the buffers are released
  1979. * at I/O completion time. In either case, buffers remain locked until I/O
  1980. * completes and the buffer is released from the queue.
  1981. */
  1982. static int
  1983. xfs_buf_delwri_submit_buffers(
  1984. struct list_head *buffer_list,
  1985. struct list_head *wait_list)
  1986. {
  1987. struct xfs_buf *bp, *n;
  1988. int pinned = 0;
  1989. struct blk_plug plug;
  1990. list_sort(NULL, buffer_list, xfs_buf_cmp);
  1991. blk_start_plug(&plug);
  1992. list_for_each_entry_safe(bp, n, buffer_list, b_list) {
  1993. if (!wait_list) {
  1994. if (!xfs_buf_trylock(bp))
  1995. continue;
  1996. if (xfs_buf_ispinned(bp)) {
  1997. xfs_buf_unlock(bp);
  1998. pinned++;
  1999. continue;
  2000. }
  2001. } else {
  2002. xfs_buf_lock(bp);
  2003. }
  2004. /*
  2005. * Someone else might have written the buffer synchronously or
  2006. * marked it stale in the meantime. In that case only the
  2007. * _XBF_DELWRI_Q flag got cleared, and we have to drop the
  2008. * reference and remove it from the list here.
  2009. */
  2010. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  2011. xfs_buf_list_del(bp);
  2012. xfs_buf_relse(bp);
  2013. continue;
  2014. }
  2015. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  2016. /*
  2017. * If we have a wait list, each buffer (and associated delwri
  2018. * queue reference) transfers to it and is submitted
  2019. * synchronously. Otherwise, drop the buffer from the delwri
  2020. * queue and submit async.
  2021. */
  2022. bp->b_flags &= ~_XBF_DELWRI_Q;
  2023. bp->b_flags |= XBF_WRITE;
  2024. if (wait_list) {
  2025. bp->b_flags &= ~XBF_ASYNC;
  2026. list_move_tail(&bp->b_list, wait_list);
  2027. } else {
  2028. bp->b_flags |= XBF_ASYNC;
  2029. xfs_buf_list_del(bp);
  2030. }
  2031. __xfs_buf_submit(bp, false);
  2032. }
  2033. blk_finish_plug(&plug);
  2034. return pinned;
  2035. }
  2036. /*
  2037. * Write out a buffer list asynchronously.
  2038. *
  2039. * This will take the @buffer_list, write all non-locked and non-pinned buffers
  2040. * out and not wait for I/O completion on any of the buffers. This interface
  2041. * is only safely useable for callers that can track I/O completion by higher
  2042. * level means, e.g. AIL pushing as the @buffer_list is consumed in this
  2043. * function.
  2044. *
  2045. * Note: this function will skip buffers it would block on, and in doing so
  2046. * leaves them on @buffer_list so they can be retried on a later pass. As such,
  2047. * it is up to the caller to ensure that the buffer list is fully submitted or
  2048. * cancelled appropriately when they are finished with the list. Failure to
  2049. * cancel or resubmit the list until it is empty will result in leaked buffers
  2050. * at unmount time.
  2051. */
  2052. int
  2053. xfs_buf_delwri_submit_nowait(
  2054. struct list_head *buffer_list)
  2055. {
  2056. return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
  2057. }
  2058. /*
  2059. * Write out a buffer list synchronously.
  2060. *
  2061. * This will take the @buffer_list, write all buffers out and wait for I/O
  2062. * completion on all of the buffers. @buffer_list is consumed by the function,
  2063. * so callers must have some other way of tracking buffers if they require such
  2064. * functionality.
  2065. */
  2066. int
  2067. xfs_buf_delwri_submit(
  2068. struct list_head *buffer_list)
  2069. {
  2070. LIST_HEAD (wait_list);
  2071. int error = 0, error2;
  2072. struct xfs_buf *bp;
  2073. xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
  2074. /* Wait for IO to complete. */
  2075. while (!list_empty(&wait_list)) {
  2076. bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
  2077. xfs_buf_list_del(bp);
  2078. /*
  2079. * Wait on the locked buffer, check for errors and unlock and
  2080. * release the delwri queue reference.
  2081. */
  2082. error2 = xfs_buf_iowait(bp);
  2083. xfs_buf_relse(bp);
  2084. if (!error)
  2085. error = error2;
  2086. }
  2087. return error;
  2088. }
  2089. /*
  2090. * Push a single buffer on a delwri queue.
  2091. *
  2092. * The purpose of this function is to submit a single buffer of a delwri queue
  2093. * and return with the buffer still on the original queue. The waiting delwri
  2094. * buffer submission infrastructure guarantees transfer of the delwri queue
  2095. * buffer reference to a temporary wait list. We reuse this infrastructure to
  2096. * transfer the buffer back to the original queue.
  2097. *
  2098. * Note the buffer transitions from the queued state, to the submitted and wait
  2099. * listed state and back to the queued state during this call. The buffer
  2100. * locking and queue management logic between _delwri_pushbuf() and
  2101. * _delwri_queue() guarantee that the buffer cannot be queued to another list
  2102. * before returning.
  2103. */
  2104. int
  2105. xfs_buf_delwri_pushbuf(
  2106. struct xfs_buf *bp,
  2107. struct list_head *buffer_list)
  2108. {
  2109. LIST_HEAD (submit_list);
  2110. int error;
  2111. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  2112. trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
  2113. /*
  2114. * Isolate the buffer to a new local list so we can submit it for I/O
  2115. * independently from the rest of the original list.
  2116. */
  2117. xfs_buf_lock(bp);
  2118. list_move(&bp->b_list, &submit_list);
  2119. xfs_buf_unlock(bp);
  2120. /*
  2121. * Delwri submission clears the DELWRI_Q buffer flag and returns with
  2122. * the buffer on the wait list with the original reference. Rather than
  2123. * bounce the buffer from a local wait list back to the original list
  2124. * after I/O completion, reuse the original list as the wait list.
  2125. */
  2126. xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
  2127. /*
  2128. * The buffer is now locked, under I/O and wait listed on the original
  2129. * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
  2130. * return with the buffer unlocked and on the original queue.
  2131. */
  2132. error = xfs_buf_iowait(bp);
  2133. bp->b_flags |= _XBF_DELWRI_Q;
  2134. xfs_buf_unlock(bp);
  2135. return error;
  2136. }
  2137. void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
  2138. {
  2139. /*
  2140. * Set the lru reference count to 0 based on the error injection tag.
  2141. * This allows userspace to disrupt buffer caching for debug/testing
  2142. * purposes.
  2143. */
  2144. if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
  2145. lru_ref = 0;
  2146. atomic_set(&bp->b_lru_ref, lru_ref);
  2147. }
  2148. /*
  2149. * Verify an on-disk magic value against the magic value specified in the
  2150. * verifier structure. The verifier magic is in disk byte order so the caller is
  2151. * expected to pass the value directly from disk.
  2152. */
  2153. bool
  2154. xfs_verify_magic(
  2155. struct xfs_buf *bp,
  2156. __be32 dmagic)
  2157. {
  2158. struct xfs_mount *mp = bp->b_mount;
  2159. int idx;
  2160. idx = xfs_has_crc(mp);
  2161. if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
  2162. return false;
  2163. return dmagic == bp->b_ops->magic[idx];
  2164. }
  2165. /*
  2166. * Verify an on-disk magic value against the magic value specified in the
  2167. * verifier structure. The verifier magic is in disk byte order so the caller is
  2168. * expected to pass the value directly from disk.
  2169. */
  2170. bool
  2171. xfs_verify_magic16(
  2172. struct xfs_buf *bp,
  2173. __be16 dmagic)
  2174. {
  2175. struct xfs_mount *mp = bp->b_mount;
  2176. int idx;
  2177. idx = xfs_has_crc(mp);
  2178. if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
  2179. return false;
  2180. return dmagic == bp->b_ops->magic16[idx];
  2181. }