xfs_log_cil.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
  4. */
  5. #include "xfs.h"
  6. #include "xfs_fs.h"
  7. #include "xfs_format.h"
  8. #include "xfs_log_format.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_extent_busy.h"
  13. #include "xfs_trans.h"
  14. #include "xfs_trans_priv.h"
  15. #include "xfs_log.h"
  16. #include "xfs_log_priv.h"
  17. #include "xfs_trace.h"
  18. #include "xfs_discard.h"
  19. /*
  20. * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  21. * recover, so we don't allow failure here. Also, we allocate in a context that
  22. * we don't want to be issuing transactions from, so we need to tell the
  23. * allocation code this as well.
  24. *
  25. * We don't reserve any space for the ticket - we are going to steal whatever
  26. * space we require from transactions as they commit. To ensure we reserve all
  27. * the space required, we need to set the current reservation of the ticket to
  28. * zero so that we know to steal the initial transaction overhead from the
  29. * first transaction commit.
  30. */
  31. static struct xlog_ticket *
  32. xlog_cil_ticket_alloc(
  33. struct xlog *log)
  34. {
  35. struct xlog_ticket *tic;
  36. tic = xlog_ticket_alloc(log, 0, 1, 0);
  37. /*
  38. * set the current reservation to zero so we know to steal the basic
  39. * transaction overhead reservation from the first transaction commit.
  40. */
  41. tic->t_curr_res = 0;
  42. tic->t_iclog_hdrs = 0;
  43. return tic;
  44. }
  45. static inline void
  46. xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
  47. {
  48. struct xlog *log = cil->xc_log;
  49. atomic_set(&cil->xc_iclog_hdrs,
  50. (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
  51. (log->l_iclog_size - log->l_iclog_hsize)));
  52. }
  53. /*
  54. * Check if the current log item was first committed in this sequence.
  55. * We can't rely on just the log item being in the CIL, we have to check
  56. * the recorded commit sequence number.
  57. *
  58. * Note: for this to be used in a non-racy manner, it has to be called with
  59. * CIL flushing locked out. As a result, it should only be used during the
  60. * transaction commit process when deciding what to format into the item.
  61. */
  62. static bool
  63. xlog_item_in_current_chkpt(
  64. struct xfs_cil *cil,
  65. struct xfs_log_item *lip)
  66. {
  67. if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  68. return false;
  69. /*
  70. * li_seq is written on the first commit of a log item to record the
  71. * first checkpoint it is written to. Hence if it is different to the
  72. * current sequence, we're in a new checkpoint.
  73. */
  74. return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
  75. }
  76. bool
  77. xfs_log_item_in_current_chkpt(
  78. struct xfs_log_item *lip)
  79. {
  80. return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
  81. }
  82. /*
  83. * Unavoidable forward declaration - xlog_cil_push_work() calls
  84. * xlog_cil_ctx_alloc() itself.
  85. */
  86. static void xlog_cil_push_work(struct work_struct *work);
  87. static struct xfs_cil_ctx *
  88. xlog_cil_ctx_alloc(void)
  89. {
  90. struct xfs_cil_ctx *ctx;
  91. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
  92. INIT_LIST_HEAD(&ctx->committing);
  93. INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
  94. INIT_LIST_HEAD(&ctx->log_items);
  95. INIT_LIST_HEAD(&ctx->lv_chain);
  96. INIT_WORK(&ctx->push_work, xlog_cil_push_work);
  97. return ctx;
  98. }
  99. /*
  100. * Aggregate the CIL per cpu structures into global counts, lists, etc and
  101. * clear the percpu state ready for the next context to use. This is called
  102. * from the push code with the context lock held exclusively, hence nothing else
  103. * will be accessing or modifying the per-cpu counters.
  104. */
  105. static void
  106. xlog_cil_push_pcp_aggregate(
  107. struct xfs_cil *cil,
  108. struct xfs_cil_ctx *ctx)
  109. {
  110. struct xlog_cil_pcp *cilpcp;
  111. int cpu;
  112. for_each_cpu(cpu, &ctx->cil_pcpmask) {
  113. cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
  114. ctx->ticket->t_curr_res += cilpcp->space_reserved;
  115. cilpcp->space_reserved = 0;
  116. if (!list_empty(&cilpcp->busy_extents)) {
  117. list_splice_init(&cilpcp->busy_extents,
  118. &ctx->busy_extents.extent_list);
  119. }
  120. if (!list_empty(&cilpcp->log_items))
  121. list_splice_init(&cilpcp->log_items, &ctx->log_items);
  122. /*
  123. * We're in the middle of switching cil contexts. Reset the
  124. * counter we use to detect when the current context is nearing
  125. * full.
  126. */
  127. cilpcp->space_used = 0;
  128. }
  129. }
  130. /*
  131. * Aggregate the CIL per-cpu space used counters into the global atomic value.
  132. * This is called when the per-cpu counter aggregation will first pass the soft
  133. * limit threshold so we can switch to atomic counter aggregation for accurate
  134. * detection of hard limit traversal.
  135. */
  136. static void
  137. xlog_cil_insert_pcp_aggregate(
  138. struct xfs_cil *cil,
  139. struct xfs_cil_ctx *ctx)
  140. {
  141. int cpu;
  142. int count = 0;
  143. /* Trigger atomic updates then aggregate only for the first caller */
  144. if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
  145. return;
  146. /*
  147. * We can race with other cpus setting cil_pcpmask. However, we've
  148. * atomically cleared PCP_SPACE which forces other threads to add to
  149. * the global space used count. cil_pcpmask is a superset of cilpcp
  150. * structures that could have a nonzero space_used.
  151. */
  152. for_each_cpu(cpu, &ctx->cil_pcpmask) {
  153. struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
  154. int old = READ_ONCE(cilpcp->space_used);
  155. while (!try_cmpxchg(&cilpcp->space_used, &old, 0))
  156. ;
  157. count += old;
  158. }
  159. atomic_add(count, &ctx->space_used);
  160. }
  161. static void
  162. xlog_cil_ctx_switch(
  163. struct xfs_cil *cil,
  164. struct xfs_cil_ctx *ctx)
  165. {
  166. xlog_cil_set_iclog_hdr_count(cil);
  167. set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
  168. set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
  169. ctx->sequence = ++cil->xc_current_sequence;
  170. ctx->cil = cil;
  171. cil->xc_ctx = ctx;
  172. }
  173. /*
  174. * After the first stage of log recovery is done, we know where the head and
  175. * tail of the log are. We need this log initialisation done before we can
  176. * initialise the first CIL checkpoint context.
  177. *
  178. * Here we allocate a log ticket to track space usage during a CIL push. This
  179. * ticket is passed to xlog_write() directly so that we don't slowly leak log
  180. * space by failing to account for space used by log headers and additional
  181. * region headers for split regions.
  182. */
  183. void
  184. xlog_cil_init_post_recovery(
  185. struct xlog *log)
  186. {
  187. log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
  188. log->l_cilp->xc_ctx->sequence = 1;
  189. xlog_cil_set_iclog_hdr_count(log->l_cilp);
  190. }
  191. static inline int
  192. xlog_cil_iovec_space(
  193. uint niovecs)
  194. {
  195. return round_up((sizeof(struct xfs_log_vec) +
  196. niovecs * sizeof(struct xfs_log_iovec)),
  197. sizeof(uint64_t));
  198. }
  199. /*
  200. * Allocate or pin log vector buffers for CIL insertion.
  201. *
  202. * The CIL currently uses disposable buffers for copying a snapshot of the
  203. * modified items into the log during a push. The biggest problem with this is
  204. * the requirement to allocate the disposable buffer during the commit if:
  205. * a) does not exist; or
  206. * b) it is too small
  207. *
  208. * If we do this allocation within xlog_cil_insert_format_items(), it is done
  209. * under the xc_ctx_lock, which means that a CIL push cannot occur during
  210. * the memory allocation. This means that we have a potential deadlock situation
  211. * under low memory conditions when we have lots of dirty metadata pinned in
  212. * the CIL and we need a CIL commit to occur to free memory.
  213. *
  214. * To avoid this, we need to move the memory allocation outside the
  215. * xc_ctx_lock, but because the log vector buffers are disposable, that opens
  216. * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
  217. * vector buffers between the check and the formatting of the item into the
  218. * log vector buffer within the xc_ctx_lock.
  219. *
  220. * Because the log vector buffer needs to be unchanged during the CIL push
  221. * process, we cannot share the buffer between the transaction commit (which
  222. * modifies the buffer) and the CIL push context that is writing the changes
  223. * into the log. This means skipping preallocation of buffer space is
  224. * unreliable, but we most definitely do not want to be allocating and freeing
  225. * buffers unnecessarily during commits when overwrites can be done safely.
  226. *
  227. * The simplest solution to this problem is to allocate a shadow buffer when a
  228. * log item is committed for the second time, and then to only use this buffer
  229. * if necessary. The buffer can remain attached to the log item until such time
  230. * it is needed, and this is the buffer that is reallocated to match the size of
  231. * the incoming modification. Then during the formatting of the item we can swap
  232. * the active buffer with the new one if we can't reuse the existing buffer. We
  233. * don't free the old buffer as it may be reused on the next modification if
  234. * it's size is right, otherwise we'll free and reallocate it at that point.
  235. *
  236. * This function builds a vector for the changes in each log item in the
  237. * transaction. It then works out the length of the buffer needed for each log
  238. * item, allocates them and attaches the vector to the log item in preparation
  239. * for the formatting step which occurs under the xc_ctx_lock.
  240. *
  241. * While this means the memory footprint goes up, it avoids the repeated
  242. * alloc/free pattern that repeated modifications of an item would otherwise
  243. * cause, and hence minimises the CPU overhead of such behaviour.
  244. */
  245. static void
  246. xlog_cil_alloc_shadow_bufs(
  247. struct xlog *log,
  248. struct xfs_trans *tp)
  249. {
  250. struct xfs_log_item *lip;
  251. list_for_each_entry(lip, &tp->t_items, li_trans) {
  252. struct xfs_log_vec *lv;
  253. int niovecs = 0;
  254. int nbytes = 0;
  255. int buf_size;
  256. bool ordered = false;
  257. /* Skip items which aren't dirty in this transaction. */
  258. if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
  259. continue;
  260. /* get number of vecs and size of data to be stored */
  261. lip->li_ops->iop_size(lip, &niovecs, &nbytes);
  262. /*
  263. * Ordered items need to be tracked but we do not wish to write
  264. * them. We need a logvec to track the object, but we do not
  265. * need an iovec or buffer to be allocated for copying data.
  266. */
  267. if (niovecs == XFS_LOG_VEC_ORDERED) {
  268. ordered = true;
  269. niovecs = 0;
  270. nbytes = 0;
  271. }
  272. /*
  273. * We 64-bit align the length of each iovec so that the start of
  274. * the next one is naturally aligned. We'll need to account for
  275. * that slack space here.
  276. *
  277. * We also add the xlog_op_header to each region when
  278. * formatting, but that's not accounted to the size of the item
  279. * at this point. Hence we'll need an addition number of bytes
  280. * for each vector to hold an opheader.
  281. *
  282. * Then round nbytes up to 64-bit alignment so that the initial
  283. * buffer alignment is easy to calculate and verify.
  284. */
  285. nbytes += niovecs *
  286. (sizeof(uint64_t) + sizeof(struct xlog_op_header));
  287. nbytes = round_up(nbytes, sizeof(uint64_t));
  288. /*
  289. * The data buffer needs to start 64-bit aligned, so round up
  290. * that space to ensure we can align it appropriately and not
  291. * overrun the buffer.
  292. */
  293. buf_size = nbytes + xlog_cil_iovec_space(niovecs);
  294. /*
  295. * if we have no shadow buffer, or it is too small, we need to
  296. * reallocate it.
  297. */
  298. if (!lip->li_lv_shadow ||
  299. buf_size > lip->li_lv_shadow->lv_size) {
  300. /*
  301. * We free and allocate here as a realloc would copy
  302. * unnecessary data. We don't use kvzalloc() for the
  303. * same reason - we don't need to zero the data area in
  304. * the buffer, only the log vector header and the iovec
  305. * storage.
  306. */
  307. kvfree(lip->li_lv_shadow);
  308. lv = xlog_kvmalloc(buf_size);
  309. memset(lv, 0, xlog_cil_iovec_space(niovecs));
  310. INIT_LIST_HEAD(&lv->lv_list);
  311. lv->lv_item = lip;
  312. lv->lv_size = buf_size;
  313. if (ordered)
  314. lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
  315. else
  316. lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
  317. lip->li_lv_shadow = lv;
  318. } else {
  319. /* same or smaller, optimise common overwrite case */
  320. lv = lip->li_lv_shadow;
  321. if (ordered)
  322. lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
  323. else
  324. lv->lv_buf_len = 0;
  325. lv->lv_bytes = 0;
  326. }
  327. /* Ensure the lv is set up according to ->iop_size */
  328. lv->lv_niovecs = niovecs;
  329. /* The allocated data region lies beyond the iovec region */
  330. lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
  331. }
  332. }
  333. /*
  334. * Prepare the log item for insertion into the CIL. Calculate the difference in
  335. * log space it will consume, and if it is a new item pin it as well.
  336. */
  337. STATIC void
  338. xfs_cil_prepare_item(
  339. struct xlog *log,
  340. struct xfs_log_vec *lv,
  341. struct xfs_log_vec *old_lv,
  342. int *diff_len)
  343. {
  344. /* Account for the new LV being passed in */
  345. if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
  346. *diff_len += lv->lv_bytes;
  347. /*
  348. * If there is no old LV, this is the first time we've seen the item in
  349. * this CIL context and so we need to pin it. If we are replacing the
  350. * old_lv, then remove the space it accounts for and make it the shadow
  351. * buffer for later freeing. In both cases we are now switching to the
  352. * shadow buffer, so update the pointer to it appropriately.
  353. */
  354. if (!old_lv) {
  355. if (lv->lv_item->li_ops->iop_pin)
  356. lv->lv_item->li_ops->iop_pin(lv->lv_item);
  357. lv->lv_item->li_lv_shadow = NULL;
  358. } else if (old_lv != lv) {
  359. ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
  360. *diff_len -= old_lv->lv_bytes;
  361. lv->lv_item->li_lv_shadow = old_lv;
  362. }
  363. /* attach new log vector to log item */
  364. lv->lv_item->li_lv = lv;
  365. /*
  366. * If this is the first time the item is being committed to the
  367. * CIL, store the sequence number on the log item so we can
  368. * tell in future commits whether this is the first checkpoint
  369. * the item is being committed into.
  370. */
  371. if (!lv->lv_item->li_seq)
  372. lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
  373. }
  374. /*
  375. * Format log item into a flat buffers
  376. *
  377. * For delayed logging, we need to hold a formatted buffer containing all the
  378. * changes on the log item. This enables us to relog the item in memory and
  379. * write it out asynchronously without needing to relock the object that was
  380. * modified at the time it gets written into the iclog.
  381. *
  382. * This function takes the prepared log vectors attached to each log item, and
  383. * formats the changes into the log vector buffer. The buffer it uses is
  384. * dependent on the current state of the vector in the CIL - the shadow lv is
  385. * guaranteed to be large enough for the current modification, but we will only
  386. * use that if we can't reuse the existing lv. If we can't reuse the existing
  387. * lv, then simple swap it out for the shadow lv. We don't free it - that is
  388. * done lazily either by th enext modification or the freeing of the log item.
  389. *
  390. * We don't set up region headers during this process; we simply copy the
  391. * regions into the flat buffer. We can do this because we still have to do a
  392. * formatting step to write the regions into the iclog buffer. Writing the
  393. * ophdrs during the iclog write means that we can support splitting large
  394. * regions across iclog boundares without needing a change in the format of the
  395. * item/region encapsulation.
  396. *
  397. * Hence what we need to do now is change the rewrite the vector array to point
  398. * to the copied region inside the buffer we just allocated. This allows us to
  399. * format the regions into the iclog as though they are being formatted
  400. * directly out of the objects themselves.
  401. */
  402. static void
  403. xlog_cil_insert_format_items(
  404. struct xlog *log,
  405. struct xfs_trans *tp,
  406. int *diff_len)
  407. {
  408. struct xfs_log_item *lip;
  409. /* Bail out if we didn't find a log item. */
  410. if (list_empty(&tp->t_items)) {
  411. ASSERT(0);
  412. return;
  413. }
  414. list_for_each_entry(lip, &tp->t_items, li_trans) {
  415. struct xfs_log_vec *lv;
  416. struct xfs_log_vec *old_lv = NULL;
  417. struct xfs_log_vec *shadow;
  418. bool ordered = false;
  419. /* Skip items which aren't dirty in this transaction. */
  420. if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
  421. continue;
  422. /*
  423. * The formatting size information is already attached to
  424. * the shadow lv on the log item.
  425. */
  426. shadow = lip->li_lv_shadow;
  427. if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
  428. ordered = true;
  429. /* Skip items that do not have any vectors for writing */
  430. if (!shadow->lv_niovecs && !ordered)
  431. continue;
  432. /* compare to existing item size */
  433. old_lv = lip->li_lv;
  434. if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
  435. /* same or smaller, optimise common overwrite case */
  436. lv = lip->li_lv;
  437. if (ordered)
  438. goto insert;
  439. /*
  440. * set the item up as though it is a new insertion so
  441. * that the space reservation accounting is correct.
  442. */
  443. *diff_len -= lv->lv_bytes;
  444. /* Ensure the lv is set up according to ->iop_size */
  445. lv->lv_niovecs = shadow->lv_niovecs;
  446. /* reset the lv buffer information for new formatting */
  447. lv->lv_buf_len = 0;
  448. lv->lv_bytes = 0;
  449. lv->lv_buf = (char *)lv +
  450. xlog_cil_iovec_space(lv->lv_niovecs);
  451. } else {
  452. /* switch to shadow buffer! */
  453. lv = shadow;
  454. lv->lv_item = lip;
  455. if (ordered) {
  456. /* track as an ordered logvec */
  457. ASSERT(lip->li_lv == NULL);
  458. goto insert;
  459. }
  460. }
  461. ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
  462. lip->li_ops->iop_format(lip, lv);
  463. insert:
  464. xfs_cil_prepare_item(log, lv, old_lv, diff_len);
  465. }
  466. }
  467. /*
  468. * The use of lockless waitqueue_active() requires that the caller has
  469. * serialised itself against the wakeup call in xlog_cil_push_work(). That
  470. * can be done by either holding the push lock or the context lock.
  471. */
  472. static inline bool
  473. xlog_cil_over_hard_limit(
  474. struct xlog *log,
  475. int32_t space_used)
  476. {
  477. if (waitqueue_active(&log->l_cilp->xc_push_wait))
  478. return true;
  479. if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
  480. return true;
  481. return false;
  482. }
  483. /*
  484. * Insert the log items into the CIL and calculate the difference in space
  485. * consumed by the item. Add the space to the checkpoint ticket and calculate
  486. * if the change requires additional log metadata. If it does, take that space
  487. * as well. Remove the amount of space we added to the checkpoint ticket from
  488. * the current transaction ticket so that the accounting works out correctly.
  489. */
  490. static void
  491. xlog_cil_insert_items(
  492. struct xlog *log,
  493. struct xfs_trans *tp,
  494. uint32_t released_space)
  495. {
  496. struct xfs_cil *cil = log->l_cilp;
  497. struct xfs_cil_ctx *ctx = cil->xc_ctx;
  498. struct xfs_log_item *lip;
  499. int len = 0;
  500. int iovhdr_res = 0, split_res = 0, ctx_res = 0;
  501. int space_used;
  502. int order;
  503. unsigned int cpu_nr;
  504. struct xlog_cil_pcp *cilpcp;
  505. ASSERT(tp);
  506. /*
  507. * We can do this safely because the context can't checkpoint until we
  508. * are done so it doesn't matter exactly how we update the CIL.
  509. */
  510. xlog_cil_insert_format_items(log, tp, &len);
  511. /*
  512. * Subtract the space released by intent cancelation from the space we
  513. * consumed so that we remove it from the CIL space and add it back to
  514. * the current transaction reservation context.
  515. */
  516. len -= released_space;
  517. /*
  518. * Grab the per-cpu pointer for the CIL before we start any accounting.
  519. * That ensures that we are running with pre-emption disabled and so we
  520. * can't be scheduled away between split sample/update operations that
  521. * are done without outside locking to serialise them.
  522. */
  523. cpu_nr = get_cpu();
  524. cilpcp = this_cpu_ptr(cil->xc_pcp);
  525. /* Tell the future push that there was work added by this CPU. */
  526. if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
  527. cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
  528. /*
  529. * We need to take the CIL checkpoint unit reservation on the first
  530. * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
  531. * unnecessarily do an atomic op in the fast path here. We can clear the
  532. * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
  533. * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
  534. */
  535. if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
  536. test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  537. ctx_res = ctx->ticket->t_unit_res;
  538. /*
  539. * Check if we need to steal iclog headers. atomic_read() is not a
  540. * locked atomic operation, so we can check the value before we do any
  541. * real atomic ops in the fast path. If we've already taken the CIL unit
  542. * reservation from this commit, we've already got one iclog header
  543. * space reserved so we have to account for that otherwise we risk
  544. * overrunning the reservation on this ticket.
  545. *
  546. * If the CIL is already at the hard limit, we might need more header
  547. * space that originally reserved. So steal more header space from every
  548. * commit that occurs once we are over the hard limit to ensure the CIL
  549. * push won't run out of reservation space.
  550. *
  551. * This can steal more than we need, but that's OK.
  552. *
  553. * The cil->xc_ctx_lock provides the serialisation necessary for safely
  554. * calling xlog_cil_over_hard_limit() in this context.
  555. */
  556. space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
  557. if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
  558. xlog_cil_over_hard_limit(log, space_used)) {
  559. split_res = log->l_iclog_hsize +
  560. sizeof(struct xlog_op_header);
  561. if (ctx_res)
  562. ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
  563. else
  564. ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
  565. atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
  566. }
  567. cilpcp->space_reserved += ctx_res;
  568. /*
  569. * Accurately account when over the soft limit, otherwise fold the
  570. * percpu count into the global count if over the per-cpu threshold.
  571. */
  572. if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
  573. atomic_add(len, &ctx->space_used);
  574. } else if (cilpcp->space_used + len >
  575. (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
  576. space_used = atomic_add_return(cilpcp->space_used + len,
  577. &ctx->space_used);
  578. cilpcp->space_used = 0;
  579. /*
  580. * If we just transitioned over the soft limit, we need to
  581. * transition to the global atomic counter.
  582. */
  583. if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
  584. xlog_cil_insert_pcp_aggregate(cil, ctx);
  585. } else {
  586. cilpcp->space_used += len;
  587. }
  588. /* attach the transaction to the CIL if it has any busy extents */
  589. if (!list_empty(&tp->t_busy))
  590. list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
  591. /*
  592. * Now update the order of everything modified in the transaction
  593. * and insert items into the CIL if they aren't already there.
  594. * We do this here so we only need to take the CIL lock once during
  595. * the transaction commit.
  596. */
  597. order = atomic_inc_return(&ctx->order_id);
  598. list_for_each_entry(lip, &tp->t_items, li_trans) {
  599. /* Skip items which aren't dirty in this transaction. */
  600. if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
  601. continue;
  602. lip->li_order_id = order;
  603. if (!list_empty(&lip->li_cil))
  604. continue;
  605. list_add_tail(&lip->li_cil, &cilpcp->log_items);
  606. }
  607. put_cpu();
  608. /*
  609. * If we've overrun the reservation, dump the tx details before we move
  610. * the log items. Shutdown is imminent...
  611. */
  612. tp->t_ticket->t_curr_res -= ctx_res + len;
  613. if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
  614. xfs_warn(log->l_mp, "Transaction log reservation overrun:");
  615. xfs_warn(log->l_mp,
  616. " log items: %d bytes (iov hdrs: %d bytes)",
  617. len, iovhdr_res);
  618. xfs_warn(log->l_mp, " split region headers: %d bytes",
  619. split_res);
  620. xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
  621. xlog_print_trans(tp);
  622. xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
  623. }
  624. }
  625. static inline void
  626. xlog_cil_ail_insert_batch(
  627. struct xfs_ail *ailp,
  628. struct xfs_ail_cursor *cur,
  629. struct xfs_log_item **log_items,
  630. int nr_items,
  631. xfs_lsn_t commit_lsn)
  632. {
  633. int i;
  634. spin_lock(&ailp->ail_lock);
  635. /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
  636. xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
  637. for (i = 0; i < nr_items; i++) {
  638. struct xfs_log_item *lip = log_items[i];
  639. if (lip->li_ops->iop_unpin)
  640. lip->li_ops->iop_unpin(lip, 0);
  641. }
  642. }
  643. /*
  644. * Take the checkpoint's log vector chain of items and insert the attached log
  645. * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
  646. * traffic.
  647. *
  648. * The AIL tracks log items via the start record LSN of the checkpoint,
  649. * not the commit record LSN. This is because we can pipeline multiple
  650. * checkpoints, and so the start record of checkpoint N+1 can be
  651. * written before the commit record of checkpoint N. i.e:
  652. *
  653. * start N commit N
  654. * +-------------+------------+----------------+
  655. * start N+1 commit N+1
  656. *
  657. * The tail of the log cannot be moved to the LSN of commit N when all
  658. * the items of that checkpoint are written back, because then the
  659. * start record for N+1 is no longer in the active portion of the log
  660. * and recovery will fail/corrupt the filesystem.
  661. *
  662. * Hence when all the log items in checkpoint N are written back, the
  663. * tail of the log most now only move as far forwards as the start LSN
  664. * of checkpoint N+1.
  665. *
  666. * If we are called with the aborted flag set, it is because a log write during
  667. * a CIL checkpoint commit has failed. In this case, all the items in the
  668. * checkpoint have already gone through iop_committed and iop_committing, which
  669. * means that checkpoint commit abort handling is treated exactly the same as an
  670. * iclog write error even though we haven't started any IO yet. Hence in this
  671. * case all we need to do is iop_committed processing, followed by an
  672. * iop_unpin(aborted) call.
  673. *
  674. * The AIL cursor is used to optimise the insert process. If commit_lsn is not
  675. * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
  676. * find the insertion point on every xfs_log_item_batch_insert() call. This
  677. * saves a lot of needless list walking and is a net win, even though it
  678. * slightly increases that amount of AIL lock traffic to set it up and tear it
  679. * down.
  680. */
  681. static void
  682. xlog_cil_ail_insert(
  683. struct xfs_cil_ctx *ctx,
  684. bool aborted)
  685. {
  686. #define LOG_ITEM_BATCH_SIZE 32
  687. struct xfs_ail *ailp = ctx->cil->xc_log->l_ailp;
  688. struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
  689. struct xfs_log_vec *lv;
  690. struct xfs_ail_cursor cur;
  691. xfs_lsn_t old_head;
  692. int i = 0;
  693. /*
  694. * Update the AIL head LSN with the commit record LSN of this
  695. * checkpoint. As iclogs are always completed in order, this should
  696. * always be the same (as iclogs can contain multiple commit records) or
  697. * higher LSN than the current head. We do this before insertion of the
  698. * items so that log space checks during insertion will reflect the
  699. * space that this checkpoint has already consumed. We call
  700. * xfs_ail_update_finish() so that tail space and space-based wakeups
  701. * will be recalculated appropriately.
  702. */
  703. ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 ||
  704. aborted);
  705. spin_lock(&ailp->ail_lock);
  706. xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
  707. old_head = ailp->ail_head_lsn;
  708. ailp->ail_head_lsn = ctx->commit_lsn;
  709. /* xfs_ail_update_finish() drops the ail_lock */
  710. xfs_ail_update_finish(ailp, NULLCOMMITLSN);
  711. /*
  712. * We move the AIL head forwards to account for the space used in the
  713. * log before we remove that space from the grant heads. This prevents a
  714. * transient condition where reservation space appears to become
  715. * available on return, only for it to disappear again immediately as
  716. * the AIL head update accounts in the log tail space.
  717. */
  718. smp_wmb(); /* paired with smp_rmb in xlog_grant_space_left */
  719. xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn);
  720. /* unpin all the log items */
  721. list_for_each_entry(lv, &ctx->lv_chain, lv_list) {
  722. struct xfs_log_item *lip = lv->lv_item;
  723. xfs_lsn_t item_lsn;
  724. if (aborted)
  725. set_bit(XFS_LI_ABORTED, &lip->li_flags);
  726. if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
  727. lip->li_ops->iop_release(lip);
  728. continue;
  729. }
  730. if (lip->li_ops->iop_committed)
  731. item_lsn = lip->li_ops->iop_committed(lip,
  732. ctx->start_lsn);
  733. else
  734. item_lsn = ctx->start_lsn;
  735. /* item_lsn of -1 means the item needs no further processing */
  736. if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
  737. continue;
  738. /*
  739. * if we are aborting the operation, no point in inserting the
  740. * object into the AIL as we are in a shutdown situation.
  741. */
  742. if (aborted) {
  743. ASSERT(xlog_is_shutdown(ailp->ail_log));
  744. if (lip->li_ops->iop_unpin)
  745. lip->li_ops->iop_unpin(lip, 1);
  746. continue;
  747. }
  748. if (item_lsn != ctx->start_lsn) {
  749. /*
  750. * Not a bulk update option due to unusual item_lsn.
  751. * Push into AIL immediately, rechecking the lsn once
  752. * we have the ail lock. Then unpin the item. This does
  753. * not affect the AIL cursor the bulk insert path is
  754. * using.
  755. */
  756. spin_lock(&ailp->ail_lock);
  757. if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
  758. xfs_trans_ail_update(ailp, lip, item_lsn);
  759. else
  760. spin_unlock(&ailp->ail_lock);
  761. if (lip->li_ops->iop_unpin)
  762. lip->li_ops->iop_unpin(lip, 0);
  763. continue;
  764. }
  765. /* Item is a candidate for bulk AIL insert. */
  766. log_items[i++] = lv->lv_item;
  767. if (i >= LOG_ITEM_BATCH_SIZE) {
  768. xlog_cil_ail_insert_batch(ailp, &cur, log_items,
  769. LOG_ITEM_BATCH_SIZE, ctx->start_lsn);
  770. i = 0;
  771. }
  772. }
  773. /* make sure we insert the remainder! */
  774. if (i)
  775. xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
  776. ctx->start_lsn);
  777. spin_lock(&ailp->ail_lock);
  778. xfs_trans_ail_cursor_done(&cur);
  779. spin_unlock(&ailp->ail_lock);
  780. }
  781. static void
  782. xlog_cil_free_logvec(
  783. struct list_head *lv_chain)
  784. {
  785. struct xfs_log_vec *lv;
  786. while (!list_empty(lv_chain)) {
  787. lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
  788. list_del_init(&lv->lv_list);
  789. kvfree(lv);
  790. }
  791. }
  792. /*
  793. * Mark all items committed and clear busy extents. We free the log vector
  794. * chains in a separate pass so that we unpin the log items as quickly as
  795. * possible.
  796. */
  797. static void
  798. xlog_cil_committed(
  799. struct xfs_cil_ctx *ctx)
  800. {
  801. struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
  802. bool abort = xlog_is_shutdown(ctx->cil->xc_log);
  803. /*
  804. * If the I/O failed, we're aborting the commit and already shutdown.
  805. * Wake any commit waiters before aborting the log items so we don't
  806. * block async log pushers on callbacks. Async log pushers explicitly do
  807. * not wait on log force completion because they may be holding locks
  808. * required to unpin items.
  809. */
  810. if (abort) {
  811. spin_lock(&ctx->cil->xc_push_lock);
  812. wake_up_all(&ctx->cil->xc_start_wait);
  813. wake_up_all(&ctx->cil->xc_commit_wait);
  814. spin_unlock(&ctx->cil->xc_push_lock);
  815. }
  816. xlog_cil_ail_insert(ctx, abort);
  817. xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
  818. xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
  819. xfs_has_discard(mp) && !abort);
  820. spin_lock(&ctx->cil->xc_push_lock);
  821. list_del(&ctx->committing);
  822. spin_unlock(&ctx->cil->xc_push_lock);
  823. xlog_cil_free_logvec(&ctx->lv_chain);
  824. if (!list_empty(&ctx->busy_extents.extent_list)) {
  825. ctx->busy_extents.mount = mp;
  826. ctx->busy_extents.owner = ctx;
  827. xfs_discard_extents(mp, &ctx->busy_extents);
  828. return;
  829. }
  830. kfree(ctx);
  831. }
  832. void
  833. xlog_cil_process_committed(
  834. struct list_head *list)
  835. {
  836. struct xfs_cil_ctx *ctx;
  837. while ((ctx = list_first_entry_or_null(list,
  838. struct xfs_cil_ctx, iclog_entry))) {
  839. list_del(&ctx->iclog_entry);
  840. xlog_cil_committed(ctx);
  841. }
  842. }
  843. /*
  844. * Record the LSN of the iclog we were just granted space to start writing into.
  845. * If the context doesn't have a start_lsn recorded, then this iclog will
  846. * contain the start record for the checkpoint. Otherwise this write contains
  847. * the commit record for the checkpoint.
  848. */
  849. void
  850. xlog_cil_set_ctx_write_state(
  851. struct xfs_cil_ctx *ctx,
  852. struct xlog_in_core *iclog)
  853. {
  854. struct xfs_cil *cil = ctx->cil;
  855. xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
  856. ASSERT(!ctx->commit_lsn);
  857. if (!ctx->start_lsn) {
  858. spin_lock(&cil->xc_push_lock);
  859. /*
  860. * The LSN we need to pass to the log items on transaction
  861. * commit is the LSN reported by the first log vector write, not
  862. * the commit lsn. If we use the commit record lsn then we can
  863. * move the grant write head beyond the tail LSN and overwrite
  864. * it.
  865. */
  866. ctx->start_lsn = lsn;
  867. wake_up_all(&cil->xc_start_wait);
  868. spin_unlock(&cil->xc_push_lock);
  869. /*
  870. * Make sure the metadata we are about to overwrite in the log
  871. * has been flushed to stable storage before this iclog is
  872. * issued.
  873. */
  874. spin_lock(&cil->xc_log->l_icloglock);
  875. iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
  876. spin_unlock(&cil->xc_log->l_icloglock);
  877. return;
  878. }
  879. /*
  880. * Take a reference to the iclog for the context so that we still hold
  881. * it when xlog_write is done and has released it. This means the
  882. * context controls when the iclog is released for IO.
  883. */
  884. atomic_inc(&iclog->ic_refcnt);
  885. /*
  886. * xlog_state_get_iclog_space() guarantees there is enough space in the
  887. * iclog for an entire commit record, so we can attach the context
  888. * callbacks now. This needs to be done before we make the commit_lsn
  889. * visible to waiters so that checkpoints with commit records in the
  890. * same iclog order their IO completion callbacks in the same order that
  891. * the commit records appear in the iclog.
  892. */
  893. spin_lock(&cil->xc_log->l_icloglock);
  894. list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
  895. spin_unlock(&cil->xc_log->l_icloglock);
  896. /*
  897. * Now we can record the commit LSN and wake anyone waiting for this
  898. * sequence to have the ordered commit record assigned to a physical
  899. * location in the log.
  900. */
  901. spin_lock(&cil->xc_push_lock);
  902. ctx->commit_iclog = iclog;
  903. ctx->commit_lsn = lsn;
  904. wake_up_all(&cil->xc_commit_wait);
  905. spin_unlock(&cil->xc_push_lock);
  906. }
  907. /*
  908. * Ensure that the order of log writes follows checkpoint sequence order. This
  909. * relies on the context LSN being zero until the log write has guaranteed the
  910. * LSN that the log write will start at via xlog_state_get_iclog_space().
  911. */
  912. enum _record_type {
  913. _START_RECORD,
  914. _COMMIT_RECORD,
  915. };
  916. static int
  917. xlog_cil_order_write(
  918. struct xfs_cil *cil,
  919. xfs_csn_t sequence,
  920. enum _record_type record)
  921. {
  922. struct xfs_cil_ctx *ctx;
  923. restart:
  924. spin_lock(&cil->xc_push_lock);
  925. list_for_each_entry(ctx, &cil->xc_committing, committing) {
  926. /*
  927. * Avoid getting stuck in this loop because we were woken by the
  928. * shutdown, but then went back to sleep once already in the
  929. * shutdown state.
  930. */
  931. if (xlog_is_shutdown(cil->xc_log)) {
  932. spin_unlock(&cil->xc_push_lock);
  933. return -EIO;
  934. }
  935. /*
  936. * Higher sequences will wait for this one so skip them.
  937. * Don't wait for our own sequence, either.
  938. */
  939. if (ctx->sequence >= sequence)
  940. continue;
  941. /* Wait until the LSN for the record has been recorded. */
  942. switch (record) {
  943. case _START_RECORD:
  944. if (!ctx->start_lsn) {
  945. xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
  946. goto restart;
  947. }
  948. break;
  949. case _COMMIT_RECORD:
  950. if (!ctx->commit_lsn) {
  951. xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
  952. goto restart;
  953. }
  954. break;
  955. }
  956. }
  957. spin_unlock(&cil->xc_push_lock);
  958. return 0;
  959. }
  960. /*
  961. * Write out the log vector change now attached to the CIL context. This will
  962. * write a start record that needs to be strictly ordered in ascending CIL
  963. * sequence order so that log recovery will always use in-order start LSNs when
  964. * replaying checkpoints.
  965. */
  966. static int
  967. xlog_cil_write_chain(
  968. struct xfs_cil_ctx *ctx,
  969. uint32_t chain_len)
  970. {
  971. struct xlog *log = ctx->cil->xc_log;
  972. int error;
  973. error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
  974. if (error)
  975. return error;
  976. return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
  977. }
  978. /*
  979. * Write out the commit record of a checkpoint transaction to close off a
  980. * running log write. These commit records are strictly ordered in ascending CIL
  981. * sequence order so that log recovery will always replay the checkpoints in the
  982. * correct order.
  983. */
  984. static int
  985. xlog_cil_write_commit_record(
  986. struct xfs_cil_ctx *ctx)
  987. {
  988. struct xlog *log = ctx->cil->xc_log;
  989. struct xlog_op_header ophdr = {
  990. .oh_clientid = XFS_TRANSACTION,
  991. .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
  992. .oh_flags = XLOG_COMMIT_TRANS,
  993. };
  994. struct xfs_log_iovec reg = {
  995. .i_addr = &ophdr,
  996. .i_len = sizeof(struct xlog_op_header),
  997. .i_type = XLOG_REG_TYPE_COMMIT,
  998. };
  999. struct xfs_log_vec vec = {
  1000. .lv_niovecs = 1,
  1001. .lv_iovecp = &reg,
  1002. };
  1003. int error;
  1004. LIST_HEAD(lv_chain);
  1005. list_add(&vec.lv_list, &lv_chain);
  1006. if (xlog_is_shutdown(log))
  1007. return -EIO;
  1008. error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
  1009. if (error)
  1010. return error;
  1011. /* account for space used by record data */
  1012. ctx->ticket->t_curr_res -= reg.i_len;
  1013. error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
  1014. if (error)
  1015. xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
  1016. return error;
  1017. }
  1018. struct xlog_cil_trans_hdr {
  1019. struct xlog_op_header oph[2];
  1020. struct xfs_trans_header thdr;
  1021. struct xfs_log_iovec lhdr[2];
  1022. };
  1023. /*
  1024. * Build a checkpoint transaction header to begin the journal transaction. We
  1025. * need to account for the space used by the transaction header here as it is
  1026. * not accounted for in xlog_write().
  1027. *
  1028. * This is the only place we write a transaction header, so we also build the
  1029. * log opheaders that indicate the start of a log transaction and wrap the
  1030. * transaction header. We keep the start record in it's own log vector rather
  1031. * than compacting them into a single region as this ends up making the logic
  1032. * in xlog_write() for handling empty opheaders for start, commit and unmount
  1033. * records much simpler.
  1034. */
  1035. static void
  1036. xlog_cil_build_trans_hdr(
  1037. struct xfs_cil_ctx *ctx,
  1038. struct xlog_cil_trans_hdr *hdr,
  1039. struct xfs_log_vec *lvhdr,
  1040. int num_iovecs)
  1041. {
  1042. struct xlog_ticket *tic = ctx->ticket;
  1043. __be32 tid = cpu_to_be32(tic->t_tid);
  1044. memset(hdr, 0, sizeof(*hdr));
  1045. /* Log start record */
  1046. hdr->oph[0].oh_tid = tid;
  1047. hdr->oph[0].oh_clientid = XFS_TRANSACTION;
  1048. hdr->oph[0].oh_flags = XLOG_START_TRANS;
  1049. /* log iovec region pointer */
  1050. hdr->lhdr[0].i_addr = &hdr->oph[0];
  1051. hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
  1052. hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
  1053. /* log opheader */
  1054. hdr->oph[1].oh_tid = tid;
  1055. hdr->oph[1].oh_clientid = XFS_TRANSACTION;
  1056. hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
  1057. /* transaction header in host byte order format */
  1058. hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
  1059. hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
  1060. hdr->thdr.th_tid = tic->t_tid;
  1061. hdr->thdr.th_num_items = num_iovecs;
  1062. /* log iovec region pointer */
  1063. hdr->lhdr[1].i_addr = &hdr->oph[1];
  1064. hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
  1065. sizeof(struct xfs_trans_header);
  1066. hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
  1067. lvhdr->lv_niovecs = 2;
  1068. lvhdr->lv_iovecp = &hdr->lhdr[0];
  1069. lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
  1070. tic->t_curr_res -= lvhdr->lv_bytes;
  1071. }
  1072. /*
  1073. * CIL item reordering compare function. We want to order in ascending ID order,
  1074. * but we want to leave items with the same ID in the order they were added to
  1075. * the list. This is important for operations like reflink where we log 4 order
  1076. * dependent intents in a single transaction when we overwrite an existing
  1077. * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
  1078. * CUI (inc), BUI(remap)...
  1079. */
  1080. static int
  1081. xlog_cil_order_cmp(
  1082. void *priv,
  1083. const struct list_head *a,
  1084. const struct list_head *b)
  1085. {
  1086. struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
  1087. struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
  1088. return l1->lv_order_id > l2->lv_order_id;
  1089. }
  1090. /*
  1091. * Pull all the log vectors off the items in the CIL, and remove the items from
  1092. * the CIL. We don't need the CIL lock here because it's only needed on the
  1093. * transaction commit side which is currently locked out by the flush lock.
  1094. *
  1095. * If a log item is marked with a whiteout, we do not need to write it to the
  1096. * journal and so we just move them to the whiteout list for the caller to
  1097. * dispose of appropriately.
  1098. */
  1099. static void
  1100. xlog_cil_build_lv_chain(
  1101. struct xfs_cil_ctx *ctx,
  1102. struct list_head *whiteouts,
  1103. uint32_t *num_iovecs,
  1104. uint32_t *num_bytes)
  1105. {
  1106. while (!list_empty(&ctx->log_items)) {
  1107. struct xfs_log_item *item;
  1108. struct xfs_log_vec *lv;
  1109. item = list_first_entry(&ctx->log_items,
  1110. struct xfs_log_item, li_cil);
  1111. if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
  1112. list_move(&item->li_cil, whiteouts);
  1113. trace_xfs_cil_whiteout_skip(item);
  1114. continue;
  1115. }
  1116. lv = item->li_lv;
  1117. lv->lv_order_id = item->li_order_id;
  1118. /* we don't write ordered log vectors */
  1119. if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
  1120. *num_bytes += lv->lv_bytes;
  1121. *num_iovecs += lv->lv_niovecs;
  1122. list_add_tail(&lv->lv_list, &ctx->lv_chain);
  1123. list_del_init(&item->li_cil);
  1124. item->li_order_id = 0;
  1125. item->li_lv = NULL;
  1126. }
  1127. }
  1128. static void
  1129. xlog_cil_cleanup_whiteouts(
  1130. struct list_head *whiteouts)
  1131. {
  1132. while (!list_empty(whiteouts)) {
  1133. struct xfs_log_item *item = list_first_entry(whiteouts,
  1134. struct xfs_log_item, li_cil);
  1135. list_del_init(&item->li_cil);
  1136. trace_xfs_cil_whiteout_unpin(item);
  1137. item->li_ops->iop_unpin(item, 1);
  1138. }
  1139. }
  1140. /*
  1141. * Push the Committed Item List to the log.
  1142. *
  1143. * If the current sequence is the same as xc_push_seq we need to do a flush. If
  1144. * xc_push_seq is less than the current sequence, then it has already been
  1145. * flushed and we don't need to do anything - the caller will wait for it to
  1146. * complete if necessary.
  1147. *
  1148. * xc_push_seq is checked unlocked against the sequence number for a match.
  1149. * Hence we can allow log forces to run racily and not issue pushes for the
  1150. * same sequence twice. If we get a race between multiple pushes for the same
  1151. * sequence they will block on the first one and then abort, hence avoiding
  1152. * needless pushes.
  1153. *
  1154. * This runs from a workqueue so it does not inherent any specific memory
  1155. * allocation context. However, we do not want to block on memory reclaim
  1156. * recursing back into the filesystem because this push may have been triggered
  1157. * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
  1158. * contraints here.
  1159. */
  1160. static void
  1161. xlog_cil_push_work(
  1162. struct work_struct *work)
  1163. {
  1164. unsigned int nofs_flags = memalloc_nofs_save();
  1165. struct xfs_cil_ctx *ctx =
  1166. container_of(work, struct xfs_cil_ctx, push_work);
  1167. struct xfs_cil *cil = ctx->cil;
  1168. struct xlog *log = cil->xc_log;
  1169. struct xfs_cil_ctx *new_ctx;
  1170. int num_iovecs = 0;
  1171. int num_bytes = 0;
  1172. int error = 0;
  1173. struct xlog_cil_trans_hdr thdr;
  1174. struct xfs_log_vec lvhdr = {};
  1175. xfs_csn_t push_seq;
  1176. bool push_commit_stable;
  1177. LIST_HEAD (whiteouts);
  1178. struct xlog_ticket *ticket;
  1179. new_ctx = xlog_cil_ctx_alloc();
  1180. new_ctx->ticket = xlog_cil_ticket_alloc(log);
  1181. down_write(&cil->xc_ctx_lock);
  1182. spin_lock(&cil->xc_push_lock);
  1183. push_seq = cil->xc_push_seq;
  1184. ASSERT(push_seq <= ctx->sequence);
  1185. push_commit_stable = cil->xc_push_commit_stable;
  1186. cil->xc_push_commit_stable = false;
  1187. /*
  1188. * As we are about to switch to a new, empty CIL context, we no longer
  1189. * need to throttle tasks on CIL space overruns. Wake any waiters that
  1190. * the hard push throttle may have caught so they can start committing
  1191. * to the new context. The ctx->xc_push_lock provides the serialisation
  1192. * necessary for safely using the lockless waitqueue_active() check in
  1193. * this context.
  1194. */
  1195. if (waitqueue_active(&cil->xc_push_wait))
  1196. wake_up_all(&cil->xc_push_wait);
  1197. xlog_cil_push_pcp_aggregate(cil, ctx);
  1198. /*
  1199. * Check if we've anything to push. If there is nothing, then we don't
  1200. * move on to a new sequence number and so we have to be able to push
  1201. * this sequence again later.
  1202. */
  1203. if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
  1204. cil->xc_push_seq = 0;
  1205. spin_unlock(&cil->xc_push_lock);
  1206. goto out_skip;
  1207. }
  1208. /* check for a previously pushed sequence */
  1209. if (push_seq < ctx->sequence) {
  1210. spin_unlock(&cil->xc_push_lock);
  1211. goto out_skip;
  1212. }
  1213. /*
  1214. * We are now going to push this context, so add it to the committing
  1215. * list before we do anything else. This ensures that anyone waiting on
  1216. * this push can easily detect the difference between a "push in
  1217. * progress" and "CIL is empty, nothing to do".
  1218. *
  1219. * IOWs, a wait loop can now check for:
  1220. * the current sequence not being found on the committing list;
  1221. * an empty CIL; and
  1222. * an unchanged sequence number
  1223. * to detect a push that had nothing to do and therefore does not need
  1224. * waiting on. If the CIL is not empty, we get put on the committing
  1225. * list before emptying the CIL and bumping the sequence number. Hence
  1226. * an empty CIL and an unchanged sequence number means we jumped out
  1227. * above after doing nothing.
  1228. *
  1229. * Hence the waiter will either find the commit sequence on the
  1230. * committing list or the sequence number will be unchanged and the CIL
  1231. * still dirty. In that latter case, the push has not yet started, and
  1232. * so the waiter will have to continue trying to check the CIL
  1233. * committing list until it is found. In extreme cases of delay, the
  1234. * sequence may fully commit between the attempts the wait makes to wait
  1235. * on the commit sequence.
  1236. */
  1237. list_add(&ctx->committing, &cil->xc_committing);
  1238. spin_unlock(&cil->xc_push_lock);
  1239. xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
  1240. /*
  1241. * Switch the contexts so we can drop the context lock and move out
  1242. * of a shared context. We can't just go straight to the commit record,
  1243. * though - we need to synchronise with previous and future commits so
  1244. * that the commit records are correctly ordered in the log to ensure
  1245. * that we process items during log IO completion in the correct order.
  1246. *
  1247. * For example, if we get an EFI in one checkpoint and the EFD in the
  1248. * next (e.g. due to log forces), we do not want the checkpoint with
  1249. * the EFD to be committed before the checkpoint with the EFI. Hence
  1250. * we must strictly order the commit records of the checkpoints so
  1251. * that: a) the checkpoint callbacks are attached to the iclogs in the
  1252. * correct order; and b) the checkpoints are replayed in correct order
  1253. * in log recovery.
  1254. *
  1255. * Hence we need to add this context to the committing context list so
  1256. * that higher sequences will wait for us to write out a commit record
  1257. * before they do.
  1258. *
  1259. * xfs_log_force_seq requires us to mirror the new sequence into the cil
  1260. * structure atomically with the addition of this sequence to the
  1261. * committing list. This also ensures that we can do unlocked checks
  1262. * against the current sequence in log forces without risking
  1263. * deferencing a freed context pointer.
  1264. */
  1265. spin_lock(&cil->xc_push_lock);
  1266. xlog_cil_ctx_switch(cil, new_ctx);
  1267. spin_unlock(&cil->xc_push_lock);
  1268. up_write(&cil->xc_ctx_lock);
  1269. /*
  1270. * Sort the log vector chain before we add the transaction headers.
  1271. * This ensures we always have the transaction headers at the start
  1272. * of the chain.
  1273. */
  1274. list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
  1275. /*
  1276. * Build a checkpoint transaction header and write it to the log to
  1277. * begin the transaction. We need to account for the space used by the
  1278. * transaction header here as it is not accounted for in xlog_write().
  1279. * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
  1280. * it gets written into the iclog first.
  1281. */
  1282. xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
  1283. num_bytes += lvhdr.lv_bytes;
  1284. list_add(&lvhdr.lv_list, &ctx->lv_chain);
  1285. /*
  1286. * Take the lvhdr back off the lv_chain immediately after calling
  1287. * xlog_cil_write_chain() as it should not be passed to log IO
  1288. * completion.
  1289. */
  1290. error = xlog_cil_write_chain(ctx, num_bytes);
  1291. list_del(&lvhdr.lv_list);
  1292. if (error)
  1293. goto out_abort_free_ticket;
  1294. error = xlog_cil_write_commit_record(ctx);
  1295. if (error)
  1296. goto out_abort_free_ticket;
  1297. /*
  1298. * Grab the ticket from the ctx so we can ungrant it after releasing the
  1299. * commit_iclog. The ctx may be freed by the time we return from
  1300. * releasing the commit_iclog (i.e. checkpoint has been completed and
  1301. * callback run) so we can't reference the ctx after the call to
  1302. * xlog_state_release_iclog().
  1303. */
  1304. ticket = ctx->ticket;
  1305. /*
  1306. * If the checkpoint spans multiple iclogs, wait for all previous iclogs
  1307. * to complete before we submit the commit_iclog. We can't use state
  1308. * checks for this - ACTIVE can be either a past completed iclog or a
  1309. * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
  1310. * past or future iclog awaiting IO or ordered IO completion to be run.
  1311. * In the latter case, if it's a future iclog and we wait on it, the we
  1312. * will hang because it won't get processed through to ic_force_wait
  1313. * wakeup until this commit_iclog is written to disk. Hence we use the
  1314. * iclog header lsn and compare it to the commit lsn to determine if we
  1315. * need to wait on iclogs or not.
  1316. */
  1317. spin_lock(&log->l_icloglock);
  1318. if (ctx->start_lsn != ctx->commit_lsn) {
  1319. xfs_lsn_t plsn;
  1320. plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
  1321. if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
  1322. /*
  1323. * Waiting on ic_force_wait orders the completion of
  1324. * iclogs older than ic_prev. Hence we only need to wait
  1325. * on the most recent older iclog here.
  1326. */
  1327. xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
  1328. spin_lock(&log->l_icloglock);
  1329. }
  1330. /*
  1331. * We need to issue a pre-flush so that the ordering for this
  1332. * checkpoint is correctly preserved down to stable storage.
  1333. */
  1334. ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
  1335. }
  1336. /*
  1337. * The commit iclog must be written to stable storage to guarantee
  1338. * journal IO vs metadata writeback IO is correctly ordered on stable
  1339. * storage.
  1340. *
  1341. * If the push caller needs the commit to be immediately stable and the
  1342. * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
  1343. * will be written when released, switch it's state to WANT_SYNC right
  1344. * now.
  1345. */
  1346. ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
  1347. if (push_commit_stable &&
  1348. ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
  1349. xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
  1350. ticket = ctx->ticket;
  1351. xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
  1352. /* Not safe to reference ctx now! */
  1353. spin_unlock(&log->l_icloglock);
  1354. xlog_cil_cleanup_whiteouts(&whiteouts);
  1355. xfs_log_ticket_ungrant(log, ticket);
  1356. memalloc_nofs_restore(nofs_flags);
  1357. return;
  1358. out_skip:
  1359. up_write(&cil->xc_ctx_lock);
  1360. xfs_log_ticket_put(new_ctx->ticket);
  1361. kfree(new_ctx);
  1362. memalloc_nofs_restore(nofs_flags);
  1363. return;
  1364. out_abort_free_ticket:
  1365. ASSERT(xlog_is_shutdown(log));
  1366. xlog_cil_cleanup_whiteouts(&whiteouts);
  1367. if (!ctx->commit_iclog) {
  1368. xfs_log_ticket_ungrant(log, ctx->ticket);
  1369. xlog_cil_committed(ctx);
  1370. memalloc_nofs_restore(nofs_flags);
  1371. return;
  1372. }
  1373. spin_lock(&log->l_icloglock);
  1374. ticket = ctx->ticket;
  1375. xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
  1376. /* Not safe to reference ctx now! */
  1377. spin_unlock(&log->l_icloglock);
  1378. xfs_log_ticket_ungrant(log, ticket);
  1379. memalloc_nofs_restore(nofs_flags);
  1380. }
  1381. /*
  1382. * We need to push CIL every so often so we don't cache more than we can fit in
  1383. * the log. The limit really is that a checkpoint can't be more than half the
  1384. * log (the current checkpoint is not allowed to overwrite the previous
  1385. * checkpoint), but commit latency and memory usage limit this to a smaller
  1386. * size.
  1387. */
  1388. static void
  1389. xlog_cil_push_background(
  1390. struct xlog *log)
  1391. {
  1392. struct xfs_cil *cil = log->l_cilp;
  1393. int space_used = atomic_read(&cil->xc_ctx->space_used);
  1394. /*
  1395. * The cil won't be empty because we are called while holding the
  1396. * context lock so whatever we added to the CIL will still be there.
  1397. */
  1398. ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
  1399. /*
  1400. * We are done if:
  1401. * - we haven't used up all the space available yet; or
  1402. * - we've already queued up a push; and
  1403. * - we're not over the hard limit; and
  1404. * - nothing has been over the hard limit.
  1405. *
  1406. * If so, we don't need to take the push lock as there's nothing to do.
  1407. */
  1408. if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
  1409. (cil->xc_push_seq == cil->xc_current_sequence &&
  1410. space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
  1411. !waitqueue_active(&cil->xc_push_wait))) {
  1412. up_read(&cil->xc_ctx_lock);
  1413. return;
  1414. }
  1415. spin_lock(&cil->xc_push_lock);
  1416. if (cil->xc_push_seq < cil->xc_current_sequence) {
  1417. cil->xc_push_seq = cil->xc_current_sequence;
  1418. queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
  1419. }
  1420. /*
  1421. * Drop the context lock now, we can't hold that if we need to sleep
  1422. * because we are over the blocking threshold. The push_lock is still
  1423. * held, so blocking threshold sleep/wakeup is still correctly
  1424. * serialised here.
  1425. */
  1426. up_read(&cil->xc_ctx_lock);
  1427. /*
  1428. * If we are well over the space limit, throttle the work that is being
  1429. * done until the push work on this context has begun. Enforce the hard
  1430. * throttle on all transaction commits once it has been activated, even
  1431. * if the committing transactions have resulted in the space usage
  1432. * dipping back down under the hard limit.
  1433. *
  1434. * The ctx->xc_push_lock provides the serialisation necessary for safely
  1435. * calling xlog_cil_over_hard_limit() in this context.
  1436. */
  1437. if (xlog_cil_over_hard_limit(log, space_used)) {
  1438. trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
  1439. ASSERT(space_used < log->l_logsize);
  1440. xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
  1441. return;
  1442. }
  1443. spin_unlock(&cil->xc_push_lock);
  1444. }
  1445. /*
  1446. * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
  1447. * number that is passed. When it returns, the work will be queued for
  1448. * @push_seq, but it won't be completed.
  1449. *
  1450. * If the caller is performing a synchronous force, we will flush the workqueue
  1451. * to get previously queued work moving to minimise the wait time they will
  1452. * undergo waiting for all outstanding pushes to complete. The caller is
  1453. * expected to do the required waiting for push_seq to complete.
  1454. *
  1455. * If the caller is performing an async push, we need to ensure that the
  1456. * checkpoint is fully flushed out of the iclogs when we finish the push. If we
  1457. * don't do this, then the commit record may remain sitting in memory in an
  1458. * ACTIVE iclog. This then requires another full log force to push to disk,
  1459. * which defeats the purpose of having an async, non-blocking CIL force
  1460. * mechanism. Hence in this case we need to pass a flag to the push work to
  1461. * indicate it needs to flush the commit record itself.
  1462. */
  1463. static void
  1464. xlog_cil_push_now(
  1465. struct xlog *log,
  1466. xfs_lsn_t push_seq,
  1467. bool async)
  1468. {
  1469. struct xfs_cil *cil = log->l_cilp;
  1470. if (!cil)
  1471. return;
  1472. ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
  1473. /* start on any pending background push to minimise wait time on it */
  1474. if (!async)
  1475. flush_workqueue(cil->xc_push_wq);
  1476. spin_lock(&cil->xc_push_lock);
  1477. /*
  1478. * If this is an async flush request, we always need to set the
  1479. * xc_push_commit_stable flag even if something else has already queued
  1480. * a push. The flush caller is asking for the CIL to be on stable
  1481. * storage when the next push completes, so regardless of who has queued
  1482. * the push, the flush requires stable semantics from it.
  1483. */
  1484. cil->xc_push_commit_stable = async;
  1485. /*
  1486. * If the CIL is empty or we've already pushed the sequence then
  1487. * there's no more work that we need to do.
  1488. */
  1489. if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
  1490. push_seq <= cil->xc_push_seq) {
  1491. spin_unlock(&cil->xc_push_lock);
  1492. return;
  1493. }
  1494. cil->xc_push_seq = push_seq;
  1495. queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
  1496. spin_unlock(&cil->xc_push_lock);
  1497. }
  1498. bool
  1499. xlog_cil_empty(
  1500. struct xlog *log)
  1501. {
  1502. struct xfs_cil *cil = log->l_cilp;
  1503. bool empty = false;
  1504. spin_lock(&cil->xc_push_lock);
  1505. if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  1506. empty = true;
  1507. spin_unlock(&cil->xc_push_lock);
  1508. return empty;
  1509. }
  1510. /*
  1511. * If there are intent done items in this transaction and the related intent was
  1512. * committed in the current (same) CIL checkpoint, we don't need to write either
  1513. * the intent or intent done item to the journal as the change will be
  1514. * journalled atomically within this checkpoint. As we cannot remove items from
  1515. * the CIL here, mark the related intent with a whiteout so that the CIL push
  1516. * can remove it rather than writing it to the journal. Then remove the intent
  1517. * done item from the current transaction and release it so it doesn't get put
  1518. * into the CIL at all.
  1519. */
  1520. static uint32_t
  1521. xlog_cil_process_intents(
  1522. struct xfs_cil *cil,
  1523. struct xfs_trans *tp)
  1524. {
  1525. struct xfs_log_item *lip, *ilip, *next;
  1526. uint32_t len = 0;
  1527. list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
  1528. if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
  1529. continue;
  1530. ilip = lip->li_ops->iop_intent(lip);
  1531. if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
  1532. continue;
  1533. set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
  1534. trace_xfs_cil_whiteout_mark(ilip);
  1535. len += ilip->li_lv->lv_bytes;
  1536. kvfree(ilip->li_lv);
  1537. ilip->li_lv = NULL;
  1538. xfs_trans_del_item(lip);
  1539. lip->li_ops->iop_release(lip);
  1540. }
  1541. return len;
  1542. }
  1543. /*
  1544. * Commit a transaction with the given vector to the Committed Item List.
  1545. *
  1546. * To do this, we need to format the item, pin it in memory if required and
  1547. * account for the space used by the transaction. Once we have done that we
  1548. * need to release the unused reservation for the transaction, attach the
  1549. * transaction to the checkpoint context so we carry the busy extents through
  1550. * to checkpoint completion, and then unlock all the items in the transaction.
  1551. *
  1552. * Called with the context lock already held in read mode to lock out
  1553. * background commit, returns without it held once background commits are
  1554. * allowed again.
  1555. */
  1556. void
  1557. xlog_cil_commit(
  1558. struct xlog *log,
  1559. struct xfs_trans *tp,
  1560. xfs_csn_t *commit_seq,
  1561. bool regrant)
  1562. {
  1563. struct xfs_cil *cil = log->l_cilp;
  1564. struct xfs_log_item *lip, *next;
  1565. uint32_t released_space = 0;
  1566. /*
  1567. * Do all necessary memory allocation before we lock the CIL.
  1568. * This ensures the allocation does not deadlock with a CIL
  1569. * push in memory reclaim (e.g. from kswapd).
  1570. */
  1571. xlog_cil_alloc_shadow_bufs(log, tp);
  1572. /* lock out background commit */
  1573. down_read(&cil->xc_ctx_lock);
  1574. if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
  1575. released_space = xlog_cil_process_intents(cil, tp);
  1576. xlog_cil_insert_items(log, tp, released_space);
  1577. if (regrant && !xlog_is_shutdown(log))
  1578. xfs_log_ticket_regrant(log, tp->t_ticket);
  1579. else
  1580. xfs_log_ticket_ungrant(log, tp->t_ticket);
  1581. tp->t_ticket = NULL;
  1582. xfs_trans_unreserve_and_mod_sb(tp);
  1583. /*
  1584. * Once all the items of the transaction have been copied to the CIL,
  1585. * the items can be unlocked and possibly freed.
  1586. *
  1587. * This needs to be done before we drop the CIL context lock because we
  1588. * have to update state in the log items and unlock them before they go
  1589. * to disk. If we don't, then the CIL checkpoint can race with us and
  1590. * we can run checkpoint completion before we've updated and unlocked
  1591. * the log items. This affects (at least) processing of stale buffers,
  1592. * inodes and EFIs.
  1593. */
  1594. trace_xfs_trans_commit_items(tp, _RET_IP_);
  1595. list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
  1596. xfs_trans_del_item(lip);
  1597. if (lip->li_ops->iop_committing)
  1598. lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
  1599. }
  1600. if (commit_seq)
  1601. *commit_seq = cil->xc_ctx->sequence;
  1602. /* xlog_cil_push_background() releases cil->xc_ctx_lock */
  1603. xlog_cil_push_background(log);
  1604. }
  1605. /*
  1606. * Flush the CIL to stable storage but don't wait for it to complete. This
  1607. * requires the CIL push to ensure the commit record for the push hits the disk,
  1608. * but otherwise is no different to a push done from a log force.
  1609. */
  1610. void
  1611. xlog_cil_flush(
  1612. struct xlog *log)
  1613. {
  1614. xfs_csn_t seq = log->l_cilp->xc_current_sequence;
  1615. trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
  1616. xlog_cil_push_now(log, seq, true);
  1617. /*
  1618. * If the CIL is empty, make sure that any previous checkpoint that may
  1619. * still be in an active iclog is pushed to stable storage.
  1620. */
  1621. if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
  1622. xfs_log_force(log->l_mp, 0);
  1623. }
  1624. /*
  1625. * Conditionally push the CIL based on the sequence passed in.
  1626. *
  1627. * We only need to push if we haven't already pushed the sequence number given.
  1628. * Hence the only time we will trigger a push here is if the push sequence is
  1629. * the same as the current context.
  1630. *
  1631. * We return the current commit lsn to allow the callers to determine if a
  1632. * iclog flush is necessary following this call.
  1633. */
  1634. xfs_lsn_t
  1635. xlog_cil_force_seq(
  1636. struct xlog *log,
  1637. xfs_csn_t sequence)
  1638. {
  1639. struct xfs_cil *cil = log->l_cilp;
  1640. struct xfs_cil_ctx *ctx;
  1641. xfs_lsn_t commit_lsn = NULLCOMMITLSN;
  1642. ASSERT(sequence <= cil->xc_current_sequence);
  1643. if (!sequence)
  1644. sequence = cil->xc_current_sequence;
  1645. trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
  1646. /*
  1647. * check to see if we need to force out the current context.
  1648. * xlog_cil_push() handles racing pushes for the same sequence,
  1649. * so no need to deal with it here.
  1650. */
  1651. restart:
  1652. xlog_cil_push_now(log, sequence, false);
  1653. /*
  1654. * See if we can find a previous sequence still committing.
  1655. * We need to wait for all previous sequence commits to complete
  1656. * before allowing the force of push_seq to go ahead. Hence block
  1657. * on commits for those as well.
  1658. */
  1659. spin_lock(&cil->xc_push_lock);
  1660. list_for_each_entry(ctx, &cil->xc_committing, committing) {
  1661. /*
  1662. * Avoid getting stuck in this loop because we were woken by the
  1663. * shutdown, but then went back to sleep once already in the
  1664. * shutdown state.
  1665. */
  1666. if (xlog_is_shutdown(log))
  1667. goto out_shutdown;
  1668. if (ctx->sequence > sequence)
  1669. continue;
  1670. if (!ctx->commit_lsn) {
  1671. /*
  1672. * It is still being pushed! Wait for the push to
  1673. * complete, then start again from the beginning.
  1674. */
  1675. XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
  1676. xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
  1677. goto restart;
  1678. }
  1679. if (ctx->sequence != sequence)
  1680. continue;
  1681. /* found it! */
  1682. commit_lsn = ctx->commit_lsn;
  1683. }
  1684. /*
  1685. * The call to xlog_cil_push_now() executes the push in the background.
  1686. * Hence by the time we have got here it our sequence may not have been
  1687. * pushed yet. This is true if the current sequence still matches the
  1688. * push sequence after the above wait loop and the CIL still contains
  1689. * dirty objects. This is guaranteed by the push code first adding the
  1690. * context to the committing list before emptying the CIL.
  1691. *
  1692. * Hence if we don't find the context in the committing list and the
  1693. * current sequence number is unchanged then the CIL contents are
  1694. * significant. If the CIL is empty, if means there was nothing to push
  1695. * and that means there is nothing to wait for. If the CIL is not empty,
  1696. * it means we haven't yet started the push, because if it had started
  1697. * we would have found the context on the committing list.
  1698. */
  1699. if (sequence == cil->xc_current_sequence &&
  1700. !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
  1701. spin_unlock(&cil->xc_push_lock);
  1702. goto restart;
  1703. }
  1704. spin_unlock(&cil->xc_push_lock);
  1705. return commit_lsn;
  1706. /*
  1707. * We detected a shutdown in progress. We need to trigger the log force
  1708. * to pass through it's iclog state machine error handling, even though
  1709. * we are already in a shutdown state. Hence we can't return
  1710. * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
  1711. * LSN is already stable), so we return a zero LSN instead.
  1712. */
  1713. out_shutdown:
  1714. spin_unlock(&cil->xc_push_lock);
  1715. return 0;
  1716. }
  1717. /*
  1718. * Perform initial CIL structure initialisation.
  1719. */
  1720. int
  1721. xlog_cil_init(
  1722. struct xlog *log)
  1723. {
  1724. struct xfs_cil *cil;
  1725. struct xfs_cil_ctx *ctx;
  1726. struct xlog_cil_pcp *cilpcp;
  1727. int cpu;
  1728. cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  1729. if (!cil)
  1730. return -ENOMEM;
  1731. /*
  1732. * Limit the CIL pipeline depth to 4 concurrent works to bound the
  1733. * concurrency the log spinlocks will be exposed to.
  1734. */
  1735. cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
  1736. XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
  1737. 4, log->l_mp->m_super->s_id);
  1738. if (!cil->xc_push_wq)
  1739. goto out_destroy_cil;
  1740. cil->xc_log = log;
  1741. cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
  1742. if (!cil->xc_pcp)
  1743. goto out_destroy_wq;
  1744. for_each_possible_cpu(cpu) {
  1745. cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
  1746. INIT_LIST_HEAD(&cilpcp->busy_extents);
  1747. INIT_LIST_HEAD(&cilpcp->log_items);
  1748. }
  1749. INIT_LIST_HEAD(&cil->xc_committing);
  1750. spin_lock_init(&cil->xc_push_lock);
  1751. init_waitqueue_head(&cil->xc_push_wait);
  1752. init_rwsem(&cil->xc_ctx_lock);
  1753. init_waitqueue_head(&cil->xc_start_wait);
  1754. init_waitqueue_head(&cil->xc_commit_wait);
  1755. log->l_cilp = cil;
  1756. ctx = xlog_cil_ctx_alloc();
  1757. xlog_cil_ctx_switch(cil, ctx);
  1758. return 0;
  1759. out_destroy_wq:
  1760. destroy_workqueue(cil->xc_push_wq);
  1761. out_destroy_cil:
  1762. kfree(cil);
  1763. return -ENOMEM;
  1764. }
  1765. void
  1766. xlog_cil_destroy(
  1767. struct xlog *log)
  1768. {
  1769. struct xfs_cil *cil = log->l_cilp;
  1770. if (cil->xc_ctx) {
  1771. if (cil->xc_ctx->ticket)
  1772. xfs_log_ticket_put(cil->xc_ctx->ticket);
  1773. kfree(cil->xc_ctx);
  1774. }
  1775. ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
  1776. free_percpu(cil->xc_pcp);
  1777. destroy_workqueue(cil->xc_push_wq);
  1778. kfree(cil);
  1779. }