xfs_trans_dquot.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_inode.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_trans_priv.h"
  16. #include "xfs_quota.h"
  17. #include "xfs_qm.h"
  18. #include "xfs_trace.h"
  19. #include "xfs_error.h"
  20. #include "xfs_health.h"
  21. STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
  22. /*
  23. * Add the locked dquot to the transaction.
  24. * The dquot must be locked, and it cannot be associated with any
  25. * transaction.
  26. */
  27. void
  28. xfs_trans_dqjoin(
  29. struct xfs_trans *tp,
  30. struct xfs_dquot *dqp)
  31. {
  32. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  33. ASSERT(dqp->q_logitem.qli_dquot == dqp);
  34. /*
  35. * Get a log_item_desc to point at the new item.
  36. */
  37. xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
  38. }
  39. /*
  40. * This is called to mark the dquot as needing
  41. * to be logged when the transaction is committed. The dquot must
  42. * already be associated with the given transaction.
  43. * Note that it marks the entire transaction as dirty. In the ordinary
  44. * case, this gets called via xfs_trans_commit, after the transaction
  45. * is already dirty. However, there's nothing stop this from getting
  46. * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
  47. * flag.
  48. */
  49. void
  50. xfs_trans_log_dquot(
  51. struct xfs_trans *tp,
  52. struct xfs_dquot *dqp)
  53. {
  54. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  55. /* Upgrade the dquot to bigtime format if possible. */
  56. if (dqp->q_id != 0 &&
  57. xfs_has_bigtime(tp->t_mountp) &&
  58. !(dqp->q_type & XFS_DQTYPE_BIGTIME))
  59. dqp->q_type |= XFS_DQTYPE_BIGTIME;
  60. tp->t_flags |= XFS_TRANS_DIRTY;
  61. set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
  62. }
  63. /*
  64. * Carry forward whatever is left of the quota blk reservation to
  65. * the spanky new transaction
  66. */
  67. void
  68. xfs_trans_dup_dqinfo(
  69. struct xfs_trans *otp,
  70. struct xfs_trans *ntp)
  71. {
  72. struct xfs_dqtrx *oq, *nq;
  73. int i, j;
  74. struct xfs_dqtrx *oqa, *nqa;
  75. uint64_t blk_res_used;
  76. if (!otp->t_dqinfo)
  77. return;
  78. xfs_trans_alloc_dqinfo(ntp);
  79. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  80. oqa = otp->t_dqinfo->dqs[j];
  81. nqa = ntp->t_dqinfo->dqs[j];
  82. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  83. blk_res_used = 0;
  84. if (oqa[i].qt_dquot == NULL)
  85. break;
  86. oq = &oqa[i];
  87. nq = &nqa[i];
  88. if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
  89. blk_res_used = oq->qt_bcount_delta;
  90. nq->qt_dquot = oq->qt_dquot;
  91. nq->qt_bcount_delta = nq->qt_icount_delta = 0;
  92. nq->qt_rtbcount_delta = 0;
  93. /*
  94. * Transfer whatever is left of the reservations.
  95. */
  96. nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
  97. oq->qt_blk_res = blk_res_used;
  98. nq->qt_rtblk_res = oq->qt_rtblk_res -
  99. oq->qt_rtblk_res_used;
  100. oq->qt_rtblk_res = oq->qt_rtblk_res_used;
  101. nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
  102. oq->qt_ino_res = oq->qt_ino_res_used;
  103. }
  104. }
  105. }
  106. #ifdef CONFIG_XFS_LIVE_HOOKS
  107. /*
  108. * Use a static key here to reduce the overhead of quota live updates. If the
  109. * compiler supports jump labels, the static branch will be replaced by a nop
  110. * sled when there are no hook users. Online fsck is currently the only
  111. * caller, so this is a reasonable tradeoff.
  112. *
  113. * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
  114. * parts of the kernel allocate memory with that lock held, which means that
  115. * XFS callers cannot hold any locks that might be used by memory reclaim or
  116. * writeback when calling the static_branch_{inc,dec} functions.
  117. */
  118. DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dqtrx_hooks_switch);
  119. void
  120. xfs_dqtrx_hook_disable(void)
  121. {
  122. xfs_hooks_switch_off(&xfs_dqtrx_hooks_switch);
  123. }
  124. void
  125. xfs_dqtrx_hook_enable(void)
  126. {
  127. xfs_hooks_switch_on(&xfs_dqtrx_hooks_switch);
  128. }
  129. /* Schedule a transactional dquot update on behalf of an inode. */
  130. void
  131. xfs_trans_mod_ino_dquot(
  132. struct xfs_trans *tp,
  133. struct xfs_inode *ip,
  134. struct xfs_dquot *dqp,
  135. unsigned int field,
  136. int64_t delta)
  137. {
  138. xfs_trans_mod_dquot(tp, dqp, field, delta);
  139. if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
  140. struct xfs_mod_ino_dqtrx_params p = {
  141. .tx_id = (uintptr_t)tp,
  142. .ino = ip->i_ino,
  143. .q_type = xfs_dquot_type(dqp),
  144. .q_id = dqp->q_id,
  145. .delta = delta
  146. };
  147. struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
  148. xfs_hooks_call(&qi->qi_mod_ino_dqtrx_hooks, field, &p);
  149. }
  150. }
  151. /* Call the specified functions during a dquot counter update. */
  152. int
  153. xfs_dqtrx_hook_add(
  154. struct xfs_quotainfo *qi,
  155. struct xfs_dqtrx_hook *hook)
  156. {
  157. int error;
  158. /*
  159. * Transactional dquot updates first call the mod hook when changes
  160. * are attached to the transaction and then call the apply hook when
  161. * those changes are committed (or canceled).
  162. *
  163. * The apply hook must be installed before the mod hook so that we
  164. * never fail to catch the end of a quota update sequence.
  165. */
  166. error = xfs_hooks_add(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
  167. if (error)
  168. goto out;
  169. error = xfs_hooks_add(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
  170. if (error)
  171. goto out_apply;
  172. return 0;
  173. out_apply:
  174. xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
  175. out:
  176. return error;
  177. }
  178. /* Stop calling the specified function during a dquot counter update. */
  179. void
  180. xfs_dqtrx_hook_del(
  181. struct xfs_quotainfo *qi,
  182. struct xfs_dqtrx_hook *hook)
  183. {
  184. /*
  185. * The mod hook must be removed before apply hook to avoid giving the
  186. * hook consumer with an incomplete update. No hooks should be running
  187. * after these functions return.
  188. */
  189. xfs_hooks_del(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
  190. xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
  191. }
  192. /* Configure dquot update hook functions. */
  193. void
  194. xfs_dqtrx_hook_setup(
  195. struct xfs_dqtrx_hook *hook,
  196. notifier_fn_t mod_fn,
  197. notifier_fn_t apply_fn)
  198. {
  199. xfs_hook_setup(&hook->mod_hook, mod_fn);
  200. xfs_hook_setup(&hook->apply_hook, apply_fn);
  201. }
  202. #endif /* CONFIG_XFS_LIVE_HOOKS */
  203. /*
  204. * Wrap around mod_dquot to account for both user and group quotas.
  205. */
  206. void
  207. xfs_trans_mod_dquot_byino(
  208. xfs_trans_t *tp,
  209. xfs_inode_t *ip,
  210. uint field,
  211. int64_t delta)
  212. {
  213. xfs_mount_t *mp = tp->t_mountp;
  214. if (!XFS_IS_QUOTA_ON(mp) ||
  215. xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  216. return;
  217. if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
  218. xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta);
  219. if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
  220. xfs_trans_mod_ino_dquot(tp, ip, ip->i_gdquot, field, delta);
  221. if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
  222. xfs_trans_mod_ino_dquot(tp, ip, ip->i_pdquot, field, delta);
  223. }
  224. STATIC struct xfs_dqtrx *
  225. xfs_trans_get_dqtrx(
  226. struct xfs_trans *tp,
  227. struct xfs_dquot *dqp)
  228. {
  229. int i;
  230. struct xfs_dqtrx *qa;
  231. switch (xfs_dquot_type(dqp)) {
  232. case XFS_DQTYPE_USER:
  233. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
  234. break;
  235. case XFS_DQTYPE_GROUP:
  236. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
  237. break;
  238. case XFS_DQTYPE_PROJ:
  239. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
  240. break;
  241. default:
  242. return NULL;
  243. }
  244. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  245. if (qa[i].qt_dquot == NULL ||
  246. qa[i].qt_dquot == dqp)
  247. return &qa[i];
  248. }
  249. return NULL;
  250. }
  251. /*
  252. * Make the changes in the transaction structure.
  253. * The moral equivalent to xfs_trans_mod_sb().
  254. * We don't touch any fields in the dquot, so we don't care
  255. * if it's locked or not (most of the time it won't be).
  256. */
  257. void
  258. xfs_trans_mod_dquot(
  259. struct xfs_trans *tp,
  260. struct xfs_dquot *dqp,
  261. uint field,
  262. int64_t delta)
  263. {
  264. struct xfs_dqtrx *qtrx;
  265. ASSERT(tp);
  266. ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp));
  267. qtrx = NULL;
  268. if (!delta)
  269. return;
  270. if (tp->t_dqinfo == NULL)
  271. xfs_trans_alloc_dqinfo(tp);
  272. /*
  273. * Find either the first free slot or the slot that belongs
  274. * to this dquot.
  275. */
  276. qtrx = xfs_trans_get_dqtrx(tp, dqp);
  277. ASSERT(qtrx);
  278. if (qtrx->qt_dquot == NULL)
  279. qtrx->qt_dquot = dqp;
  280. trace_xfs_trans_mod_dquot_before(qtrx);
  281. trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
  282. switch (field) {
  283. /* regular disk blk reservation */
  284. case XFS_TRANS_DQ_RES_BLKS:
  285. qtrx->qt_blk_res += delta;
  286. break;
  287. /* inode reservation */
  288. case XFS_TRANS_DQ_RES_INOS:
  289. qtrx->qt_ino_res += delta;
  290. break;
  291. /* disk blocks used. */
  292. case XFS_TRANS_DQ_BCOUNT:
  293. qtrx->qt_bcount_delta += delta;
  294. break;
  295. case XFS_TRANS_DQ_DELBCOUNT:
  296. qtrx->qt_delbcnt_delta += delta;
  297. break;
  298. /* Inode Count */
  299. case XFS_TRANS_DQ_ICOUNT:
  300. if (qtrx->qt_ino_res && delta > 0) {
  301. qtrx->qt_ino_res_used += delta;
  302. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  303. }
  304. qtrx->qt_icount_delta += delta;
  305. break;
  306. /* rtblk reservation */
  307. case XFS_TRANS_DQ_RES_RTBLKS:
  308. qtrx->qt_rtblk_res += delta;
  309. break;
  310. /* rtblk count */
  311. case XFS_TRANS_DQ_RTBCOUNT:
  312. if (qtrx->qt_rtblk_res && delta > 0) {
  313. qtrx->qt_rtblk_res_used += delta;
  314. ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
  315. }
  316. qtrx->qt_rtbcount_delta += delta;
  317. break;
  318. case XFS_TRANS_DQ_DELRTBCOUNT:
  319. qtrx->qt_delrtb_delta += delta;
  320. break;
  321. default:
  322. ASSERT(0);
  323. }
  324. trace_xfs_trans_mod_dquot_after(qtrx);
  325. }
  326. /*
  327. * Given an array of dqtrx structures, lock all the dquots associated and join
  328. * them to the transaction, provided they have been modified.
  329. */
  330. STATIC void
  331. xfs_trans_dqlockedjoin(
  332. struct xfs_trans *tp,
  333. struct xfs_dqtrx *q)
  334. {
  335. unsigned int i;
  336. ASSERT(q[0].qt_dquot != NULL);
  337. if (q[1].qt_dquot == NULL) {
  338. xfs_dqlock(q[0].qt_dquot);
  339. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  340. } else if (q[2].qt_dquot == NULL) {
  341. xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
  342. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  343. xfs_trans_dqjoin(tp, q[1].qt_dquot);
  344. } else {
  345. xfs_dqlockn(q);
  346. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  347. if (q[i].qt_dquot == NULL)
  348. break;
  349. xfs_trans_dqjoin(tp, q[i].qt_dquot);
  350. }
  351. }
  352. }
  353. /* Apply dqtrx changes to the quota reservation counters. */
  354. static inline void
  355. xfs_apply_quota_reservation_deltas(
  356. struct xfs_dquot_res *res,
  357. uint64_t reserved,
  358. int64_t res_used,
  359. int64_t count_delta)
  360. {
  361. if (reserved != 0) {
  362. /*
  363. * Subtle math here: If reserved > res_used (the normal case),
  364. * we're simply subtracting the unused transaction quota
  365. * reservation from the dquot reservation.
  366. *
  367. * If, however, res_used > reserved, then we have allocated
  368. * more quota blocks than were reserved for the transaction.
  369. * We must add that excess to the dquot reservation since it
  370. * tracks (usage + resv) and by definition we didn't reserve
  371. * that excess.
  372. */
  373. res->reserved -= abs(reserved - res_used);
  374. } else if (count_delta != 0) {
  375. /*
  376. * These blks were never reserved, either inside a transaction
  377. * or outside one (in a delayed allocation). Also, this isn't
  378. * always a negative number since we sometimes deliberately
  379. * skip quota reservations.
  380. */
  381. res->reserved += count_delta;
  382. }
  383. }
  384. #ifdef CONFIG_XFS_LIVE_HOOKS
  385. /* Call downstream hooks now that it's time to apply dquot deltas. */
  386. static inline void
  387. xfs_trans_apply_dquot_deltas_hook(
  388. struct xfs_trans *tp,
  389. struct xfs_dquot *dqp)
  390. {
  391. if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
  392. struct xfs_apply_dqtrx_params p = {
  393. .tx_id = (uintptr_t)tp,
  394. .q_type = xfs_dquot_type(dqp),
  395. .q_id = dqp->q_id,
  396. };
  397. struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
  398. xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
  399. XFS_APPLY_DQTRX_COMMIT, &p);
  400. }
  401. }
  402. #else
  403. # define xfs_trans_apply_dquot_deltas_hook(tp, dqp) ((void)0)
  404. #endif /* CONFIG_XFS_LIVE_HOOKS */
  405. /*
  406. * Called by xfs_trans_commit() and similar in spirit to
  407. * xfs_trans_apply_sb_deltas().
  408. * Go thru all the dquots belonging to this transaction and modify the
  409. * INCORE dquot to reflect the actual usages.
  410. * Unreserve just the reservations done by this transaction.
  411. * dquot is still left locked at exit.
  412. */
  413. void
  414. xfs_trans_apply_dquot_deltas(
  415. struct xfs_trans *tp)
  416. {
  417. int i, j;
  418. struct xfs_dquot *dqp;
  419. struct xfs_dqtrx *qtrx, *qa;
  420. int64_t totalbdelta;
  421. int64_t totalrtbdelta;
  422. if (!tp->t_dqinfo)
  423. return;
  424. ASSERT(tp->t_dqinfo);
  425. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  426. qa = tp->t_dqinfo->dqs[j];
  427. if (qa[0].qt_dquot == NULL)
  428. continue;
  429. /*
  430. * Lock all of the dquots and join them to the transaction.
  431. */
  432. xfs_trans_dqlockedjoin(tp, qa);
  433. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  434. uint64_t blk_res_used;
  435. qtrx = &qa[i];
  436. /*
  437. * The array of dquots is filled
  438. * sequentially, not sparsely.
  439. */
  440. if ((dqp = qtrx->qt_dquot) == NULL)
  441. break;
  442. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  443. xfs_trans_apply_dquot_deltas_hook(tp, dqp);
  444. /*
  445. * adjust the actual number of blocks used
  446. */
  447. /*
  448. * The issue here is - sometimes we don't make a blkquota
  449. * reservation intentionally to be fair to users
  450. * (when the amount is small). On the other hand,
  451. * delayed allocs do make reservations, but that's
  452. * outside of a transaction, so we have no
  453. * idea how much was really reserved.
  454. * So, here we've accumulated delayed allocation blks and
  455. * non-delay blks. The assumption is that the
  456. * delayed ones are always reserved (outside of a
  457. * transaction), and the others may or may not have
  458. * quota reservations.
  459. */
  460. totalbdelta = qtrx->qt_bcount_delta +
  461. qtrx->qt_delbcnt_delta;
  462. totalrtbdelta = qtrx->qt_rtbcount_delta +
  463. qtrx->qt_delrtb_delta;
  464. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  465. qtrx->qt_icount_delta != 0) {
  466. trace_xfs_trans_apply_dquot_deltas_before(dqp);
  467. trace_xfs_trans_apply_dquot_deltas(qtrx);
  468. }
  469. #ifdef DEBUG
  470. if (totalbdelta < 0)
  471. ASSERT(dqp->q_blk.count >= -totalbdelta);
  472. if (totalrtbdelta < 0)
  473. ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
  474. if (qtrx->qt_icount_delta < 0)
  475. ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
  476. #endif
  477. if (totalbdelta)
  478. dqp->q_blk.count += totalbdelta;
  479. if (qtrx->qt_icount_delta)
  480. dqp->q_ino.count += qtrx->qt_icount_delta;
  481. if (totalrtbdelta)
  482. dqp->q_rtb.count += totalrtbdelta;
  483. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  484. qtrx->qt_icount_delta != 0)
  485. trace_xfs_trans_apply_dquot_deltas_after(dqp);
  486. /*
  487. * Get any default limits in use.
  488. * Start/reset the timer(s) if needed.
  489. */
  490. if (dqp->q_id) {
  491. xfs_qm_adjust_dqlimits(dqp);
  492. xfs_qm_adjust_dqtimers(dqp);
  493. }
  494. dqp->q_flags |= XFS_DQFLAG_DIRTY;
  495. /*
  496. * add this to the list of items to get logged
  497. */
  498. xfs_trans_log_dquot(tp, dqp);
  499. /*
  500. * Take off what's left of the original reservation.
  501. * In case of delayed allocations, there's no
  502. * reservation that a transaction structure knows of.
  503. */
  504. blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
  505. xfs_apply_quota_reservation_deltas(&dqp->q_blk,
  506. qtrx->qt_blk_res, blk_res_used,
  507. qtrx->qt_bcount_delta);
  508. /*
  509. * Adjust the RT reservation.
  510. */
  511. xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
  512. qtrx->qt_rtblk_res,
  513. qtrx->qt_rtblk_res_used,
  514. qtrx->qt_rtbcount_delta);
  515. /*
  516. * Adjust the inode reservation.
  517. */
  518. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  519. xfs_apply_quota_reservation_deltas(&dqp->q_ino,
  520. qtrx->qt_ino_res,
  521. qtrx->qt_ino_res_used,
  522. qtrx->qt_icount_delta);
  523. ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
  524. ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
  525. ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
  526. /*
  527. * We've applied the count changes and given back
  528. * whatever reservation we didn't use. Zero out the
  529. * dqtrx fields.
  530. */
  531. qtrx->qt_blk_res = 0;
  532. qtrx->qt_bcount_delta = 0;
  533. qtrx->qt_delbcnt_delta = 0;
  534. qtrx->qt_rtblk_res = 0;
  535. qtrx->qt_rtblk_res_used = 0;
  536. qtrx->qt_rtbcount_delta = 0;
  537. qtrx->qt_delrtb_delta = 0;
  538. qtrx->qt_ino_res = 0;
  539. qtrx->qt_ino_res_used = 0;
  540. qtrx->qt_icount_delta = 0;
  541. }
  542. }
  543. }
  544. #ifdef CONFIG_XFS_LIVE_HOOKS
  545. /* Call downstream hooks now that it's time to cancel dquot deltas. */
  546. static inline void
  547. xfs_trans_unreserve_and_mod_dquots_hook(
  548. struct xfs_trans *tp,
  549. struct xfs_dquot *dqp)
  550. {
  551. if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
  552. struct xfs_apply_dqtrx_params p = {
  553. .tx_id = (uintptr_t)tp,
  554. .q_type = xfs_dquot_type(dqp),
  555. .q_id = dqp->q_id,
  556. };
  557. struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
  558. xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
  559. XFS_APPLY_DQTRX_UNRESERVE, &p);
  560. }
  561. }
  562. #else
  563. # define xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp) ((void)0)
  564. #endif /* CONFIG_XFS_LIVE_HOOKS */
  565. /*
  566. * Release the reservations, and adjust the dquots accordingly.
  567. * This is called only when the transaction is being aborted. If by
  568. * any chance we have done dquot modifications incore (ie. deltas) already,
  569. * we simply throw those away, since that's the expected behavior
  570. * when a transaction is curtailed without a commit.
  571. */
  572. void
  573. xfs_trans_unreserve_and_mod_dquots(
  574. struct xfs_trans *tp,
  575. bool already_locked)
  576. {
  577. int i, j;
  578. struct xfs_dquot *dqp;
  579. struct xfs_dqtrx *qtrx, *qa;
  580. bool locked;
  581. if (!tp->t_dqinfo)
  582. return;
  583. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  584. qa = tp->t_dqinfo->dqs[j];
  585. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  586. qtrx = &qa[i];
  587. /*
  588. * We assume that the array of dquots is filled
  589. * sequentially, not sparsely.
  590. */
  591. if ((dqp = qtrx->qt_dquot) == NULL)
  592. break;
  593. xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp);
  594. /*
  595. * Unreserve the original reservation. We don't care
  596. * about the number of blocks used field, or deltas.
  597. * Also we don't bother to zero the fields.
  598. */
  599. locked = already_locked;
  600. if (qtrx->qt_blk_res) {
  601. if (!locked) {
  602. xfs_dqlock(dqp);
  603. locked = true;
  604. }
  605. dqp->q_blk.reserved -=
  606. (xfs_qcnt_t)qtrx->qt_blk_res;
  607. }
  608. if (qtrx->qt_ino_res) {
  609. if (!locked) {
  610. xfs_dqlock(dqp);
  611. locked = true;
  612. }
  613. dqp->q_ino.reserved -=
  614. (xfs_qcnt_t)qtrx->qt_ino_res;
  615. }
  616. if (qtrx->qt_rtblk_res) {
  617. if (!locked) {
  618. xfs_dqlock(dqp);
  619. locked = true;
  620. }
  621. dqp->q_rtb.reserved -=
  622. (xfs_qcnt_t)qtrx->qt_rtblk_res;
  623. }
  624. if (locked && !already_locked)
  625. xfs_dqunlock(dqp);
  626. }
  627. }
  628. }
  629. STATIC void
  630. xfs_quota_warn(
  631. struct xfs_mount *mp,
  632. struct xfs_dquot *dqp,
  633. int type)
  634. {
  635. enum quota_type qtype;
  636. switch (xfs_dquot_type(dqp)) {
  637. case XFS_DQTYPE_PROJ:
  638. qtype = PRJQUOTA;
  639. break;
  640. case XFS_DQTYPE_USER:
  641. qtype = USRQUOTA;
  642. break;
  643. case XFS_DQTYPE_GROUP:
  644. qtype = GRPQUOTA;
  645. break;
  646. default:
  647. return;
  648. }
  649. quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
  650. mp->m_super->s_dev, type);
  651. }
  652. /*
  653. * Decide if we can make an additional reservation against a quota resource.
  654. * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
  655. *
  656. * Note that we assume that the numeric difference between the inode and block
  657. * warning codes will always be 3 since it's userspace ABI now, and will never
  658. * decrease the quota reservation, so the *BELOW messages are irrelevant.
  659. */
  660. static inline int
  661. xfs_dqresv_check(
  662. struct xfs_dquot_res *res,
  663. struct xfs_quota_limits *qlim,
  664. int64_t delta,
  665. bool *fatal)
  666. {
  667. xfs_qcnt_t hardlimit = res->hardlimit;
  668. xfs_qcnt_t softlimit = res->softlimit;
  669. xfs_qcnt_t total_count = res->reserved + delta;
  670. BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
  671. BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
  672. BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
  673. *fatal = false;
  674. if (delta <= 0)
  675. return QUOTA_NL_NOWARN;
  676. if (!hardlimit)
  677. hardlimit = qlim->hard;
  678. if (!softlimit)
  679. softlimit = qlim->soft;
  680. if (hardlimit && total_count > hardlimit) {
  681. *fatal = true;
  682. return QUOTA_NL_IHARDWARN;
  683. }
  684. if (softlimit && total_count > softlimit) {
  685. time64_t now = ktime_get_real_seconds();
  686. if (res->timer != 0 && now > res->timer) {
  687. *fatal = true;
  688. return QUOTA_NL_ISOFTLONGWARN;
  689. }
  690. return QUOTA_NL_ISOFTWARN;
  691. }
  692. return QUOTA_NL_NOWARN;
  693. }
  694. /*
  695. * This reserves disk blocks and inodes against a dquot.
  696. * Flags indicate if the dquot is to be locked here and also
  697. * if the blk reservation is for RT or regular blocks.
  698. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
  699. */
  700. STATIC int
  701. xfs_trans_dqresv(
  702. struct xfs_trans *tp,
  703. struct xfs_mount *mp,
  704. struct xfs_dquot *dqp,
  705. int64_t nblks,
  706. long ninos,
  707. uint flags)
  708. {
  709. struct xfs_quotainfo *q = mp->m_quotainfo;
  710. struct xfs_def_quota *defq;
  711. struct xfs_dquot_res *blkres;
  712. struct xfs_quota_limits *qlim;
  713. xfs_dqlock(dqp);
  714. defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
  715. if (flags & XFS_TRANS_DQ_RES_BLKS) {
  716. blkres = &dqp->q_blk;
  717. qlim = &defq->blk;
  718. } else {
  719. blkres = &dqp->q_rtb;
  720. qlim = &defq->rtb;
  721. }
  722. if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
  723. xfs_dquot_is_enforced(dqp)) {
  724. int quota_nl;
  725. bool fatal;
  726. /*
  727. * dquot is locked already. See if we'd go over the hardlimit
  728. * or exceed the timelimit if we'd reserve resources.
  729. */
  730. quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
  731. if (quota_nl != QUOTA_NL_NOWARN) {
  732. /*
  733. * Quota block warning codes are 3 more than the inode
  734. * codes, which we check above.
  735. */
  736. xfs_quota_warn(mp, dqp, quota_nl + 3);
  737. if (fatal)
  738. goto error_return;
  739. }
  740. quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
  741. &fatal);
  742. if (quota_nl != QUOTA_NL_NOWARN) {
  743. xfs_quota_warn(mp, dqp, quota_nl);
  744. if (fatal)
  745. goto error_return;
  746. }
  747. }
  748. /*
  749. * Change the reservation, but not the actual usage.
  750. * Note that q_blk.reserved = q_blk.count + resv
  751. */
  752. blkres->reserved += (xfs_qcnt_t)nblks;
  753. dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
  754. /*
  755. * note the reservation amt in the trans struct too,
  756. * so that the transaction knows how much was reserved by
  757. * it against this particular dquot.
  758. * We don't do this when we are reserving for a delayed allocation,
  759. * because we don't have the luxury of a transaction envelope then.
  760. */
  761. if (tp) {
  762. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  763. xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
  764. nblks);
  765. xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
  766. }
  767. if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
  768. XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
  769. XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
  770. goto error_corrupt;
  771. xfs_dqunlock(dqp);
  772. return 0;
  773. error_return:
  774. xfs_dqunlock(dqp);
  775. if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
  776. return -ENOSPC;
  777. return -EDQUOT;
  778. error_corrupt:
  779. xfs_dqunlock(dqp);
  780. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  781. xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
  782. return -EFSCORRUPTED;
  783. }
  784. /*
  785. * Given dquot(s), make disk block and/or inode reservations against them.
  786. * The fact that this does the reservation against user, group and
  787. * project quotas is important, because this follows a all-or-nothing
  788. * approach.
  789. *
  790. * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
  791. * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
  792. * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
  793. * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
  794. * dquots are unlocked on return, if they were not locked by caller.
  795. */
  796. int
  797. xfs_trans_reserve_quota_bydquots(
  798. struct xfs_trans *tp,
  799. struct xfs_mount *mp,
  800. struct xfs_dquot *udqp,
  801. struct xfs_dquot *gdqp,
  802. struct xfs_dquot *pdqp,
  803. int64_t nblks,
  804. long ninos,
  805. uint flags)
  806. {
  807. int error;
  808. if (!XFS_IS_QUOTA_ON(mp))
  809. return 0;
  810. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  811. if (udqp) {
  812. error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
  813. if (error)
  814. return error;
  815. }
  816. if (gdqp) {
  817. error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
  818. if (error)
  819. goto unwind_usr;
  820. }
  821. if (pdqp) {
  822. error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
  823. if (error)
  824. goto unwind_grp;
  825. }
  826. /*
  827. * Didn't change anything critical, so, no need to log
  828. */
  829. return 0;
  830. unwind_grp:
  831. flags |= XFS_QMOPT_FORCE_RES;
  832. if (gdqp)
  833. xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
  834. unwind_usr:
  835. flags |= XFS_QMOPT_FORCE_RES;
  836. if (udqp)
  837. xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
  838. return error;
  839. }
  840. /*
  841. * Lock the dquot and change the reservation if we can.
  842. * This doesn't change the actual usage, just the reservation.
  843. * The inode sent in is locked.
  844. */
  845. int
  846. xfs_trans_reserve_quota_nblks(
  847. struct xfs_trans *tp,
  848. struct xfs_inode *ip,
  849. int64_t dblocks,
  850. int64_t rblocks,
  851. bool force)
  852. {
  853. struct xfs_mount *mp = ip->i_mount;
  854. unsigned int qflags = 0;
  855. int error;
  856. if (!XFS_IS_QUOTA_ON(mp))
  857. return 0;
  858. ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
  859. xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
  860. if (force)
  861. qflags |= XFS_QMOPT_FORCE_RES;
  862. /* Reserve data device quota against the inode's dquots. */
  863. error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  864. ip->i_gdquot, ip->i_pdquot, dblocks, 0,
  865. XFS_QMOPT_RES_REGBLKS | qflags);
  866. if (error)
  867. return error;
  868. /* Do the same but for realtime blocks. */
  869. error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  870. ip->i_gdquot, ip->i_pdquot, rblocks, 0,
  871. XFS_QMOPT_RES_RTBLKS | qflags);
  872. if (error) {
  873. xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  874. ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
  875. XFS_QMOPT_RES_REGBLKS);
  876. return error;
  877. }
  878. return 0;
  879. }
  880. /* Change the quota reservations for an inode creation activity. */
  881. int
  882. xfs_trans_reserve_quota_icreate(
  883. struct xfs_trans *tp,
  884. struct xfs_dquot *udqp,
  885. struct xfs_dquot *gdqp,
  886. struct xfs_dquot *pdqp,
  887. int64_t dblocks)
  888. {
  889. struct xfs_mount *mp = tp->t_mountp;
  890. if (!XFS_IS_QUOTA_ON(mp))
  891. return 0;
  892. return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
  893. dblocks, 1, XFS_QMOPT_RES_REGBLKS);
  894. }
  895. STATIC void
  896. xfs_trans_alloc_dqinfo(
  897. xfs_trans_t *tp)
  898. {
  899. tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache,
  900. GFP_KERNEL | __GFP_NOFAIL);
  901. }
  902. void
  903. xfs_trans_free_dqinfo(
  904. xfs_trans_t *tp)
  905. {
  906. if (!tp->t_dqinfo)
  907. return;
  908. kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo);
  909. tp->t_dqinfo = NULL;
  910. }