quota_tree.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vfsv0 quota IO operations on file
  4. */
  5. #include <linux/errno.h>
  6. #include <linux/fs.h>
  7. #include <linux/mount.h>
  8. #include <linux/dqblk_v2.h>
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/quotaops.h>
  14. #include <asm/byteorder.h>
  15. #include "quota_tree.h"
  16. MODULE_AUTHOR("Jan Kara");
  17. MODULE_DESCRIPTION("Quota trie support");
  18. MODULE_LICENSE("GPL");
  19. /*
  20. * Maximum quota tree depth we support. Only to limit recursion when working
  21. * with the tree.
  22. */
  23. #define MAX_QTREE_DEPTH 6
  24. #define __QUOTA_QT_PARANOIA
  25. static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
  26. {
  27. unsigned int epb = info->dqi_usable_bs >> 2;
  28. depth = info->dqi_qtree_depth - depth - 1;
  29. while (depth--)
  30. id /= epb;
  31. return id % epb;
  32. }
  33. static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
  34. {
  35. qid_t id = from_kqid(&init_user_ns, qid);
  36. return __get_index(info, id, depth);
  37. }
  38. /* Number of entries in one blocks */
  39. static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
  40. {
  41. return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
  42. / info->dqi_entry_size;
  43. }
  44. static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  45. {
  46. struct super_block *sb = info->dqi_sb;
  47. memset(buf, 0, info->dqi_usable_bs);
  48. return sb->s_op->quota_read(sb, info->dqi_type, buf,
  49. info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
  50. }
  51. static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  52. {
  53. struct super_block *sb = info->dqi_sb;
  54. ssize_t ret;
  55. ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
  56. info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
  57. if (ret != info->dqi_usable_bs) {
  58. quota_error(sb, "dquota write failed");
  59. if (ret >= 0)
  60. ret = -EIO;
  61. }
  62. return ret;
  63. }
  64. static inline int do_check_range(struct super_block *sb, const char *val_name,
  65. uint val, uint min_val, uint max_val)
  66. {
  67. if (val < min_val || val > max_val) {
  68. quota_error(sb, "Getting %s %u out of range %u-%u",
  69. val_name, val, min_val, max_val);
  70. return -EUCLEAN;
  71. }
  72. return 0;
  73. }
  74. static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
  75. struct qt_disk_dqdbheader *dh)
  76. {
  77. int err = 0;
  78. err = do_check_range(info->dqi_sb, "dqdh_next_free",
  79. le32_to_cpu(dh->dqdh_next_free), 0,
  80. info->dqi_blocks - 1);
  81. if (err)
  82. return err;
  83. err = do_check_range(info->dqi_sb, "dqdh_prev_free",
  84. le32_to_cpu(dh->dqdh_prev_free), 0,
  85. info->dqi_blocks - 1);
  86. if (err)
  87. return err;
  88. err = do_check_range(info->dqi_sb, "dqdh_entries",
  89. le16_to_cpu(dh->dqdh_entries), 0,
  90. qtree_dqstr_in_blk(info));
  91. return err;
  92. }
  93. /* Remove empty block from list and return it */
  94. static int get_free_dqblk(struct qtree_mem_dqinfo *info)
  95. {
  96. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  97. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  98. int ret, blk;
  99. if (!buf)
  100. return -ENOMEM;
  101. if (info->dqi_free_blk) {
  102. blk = info->dqi_free_blk;
  103. ret = read_blk(info, blk, buf);
  104. if (ret < 0)
  105. goto out_buf;
  106. ret = check_dquot_block_header(info, dh);
  107. if (ret)
  108. goto out_buf;
  109. info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
  110. }
  111. else {
  112. memset(buf, 0, info->dqi_usable_bs);
  113. /* Assure block allocation... */
  114. ret = write_blk(info, info->dqi_blocks, buf);
  115. if (ret < 0)
  116. goto out_buf;
  117. blk = info->dqi_blocks++;
  118. }
  119. mark_info_dirty(info->dqi_sb, info->dqi_type);
  120. ret = blk;
  121. out_buf:
  122. kfree(buf);
  123. return ret;
  124. }
  125. /* Insert empty block to the list */
  126. static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
  127. {
  128. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  129. int err;
  130. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
  131. dh->dqdh_prev_free = cpu_to_le32(0);
  132. dh->dqdh_entries = cpu_to_le16(0);
  133. err = write_blk(info, blk, buf);
  134. if (err < 0)
  135. return err;
  136. info->dqi_free_blk = blk;
  137. mark_info_dirty(info->dqi_sb, info->dqi_type);
  138. return 0;
  139. }
  140. /* Remove given block from the list of blocks with free entries */
  141. static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  142. uint blk)
  143. {
  144. char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  145. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  146. uint nextblk = le32_to_cpu(dh->dqdh_next_free);
  147. uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
  148. int err;
  149. if (!tmpbuf)
  150. return -ENOMEM;
  151. if (nextblk) {
  152. err = read_blk(info, nextblk, tmpbuf);
  153. if (err < 0)
  154. goto out_buf;
  155. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  156. dh->dqdh_prev_free;
  157. err = write_blk(info, nextblk, tmpbuf);
  158. if (err < 0)
  159. goto out_buf;
  160. }
  161. if (prevblk) {
  162. err = read_blk(info, prevblk, tmpbuf);
  163. if (err < 0)
  164. goto out_buf;
  165. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
  166. dh->dqdh_next_free;
  167. err = write_blk(info, prevblk, tmpbuf);
  168. if (err < 0)
  169. goto out_buf;
  170. } else {
  171. info->dqi_free_entry = nextblk;
  172. mark_info_dirty(info->dqi_sb, info->dqi_type);
  173. }
  174. kfree(tmpbuf);
  175. dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
  176. /* No matter whether write succeeds block is out of list */
  177. if (write_blk(info, blk, buf) < 0)
  178. quota_error(info->dqi_sb, "Can't write block (%u) "
  179. "with free entries", blk);
  180. return 0;
  181. out_buf:
  182. kfree(tmpbuf);
  183. return err;
  184. }
  185. /* Insert given block to the beginning of list with free entries */
  186. static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  187. uint blk)
  188. {
  189. char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  190. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  191. int err;
  192. if (!tmpbuf)
  193. return -ENOMEM;
  194. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
  195. dh->dqdh_prev_free = cpu_to_le32(0);
  196. err = write_blk(info, blk, buf);
  197. if (err < 0)
  198. goto out_buf;
  199. if (info->dqi_free_entry) {
  200. err = read_blk(info, info->dqi_free_entry, tmpbuf);
  201. if (err < 0)
  202. goto out_buf;
  203. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  204. cpu_to_le32(blk);
  205. err = write_blk(info, info->dqi_free_entry, tmpbuf);
  206. if (err < 0)
  207. goto out_buf;
  208. }
  209. kfree(tmpbuf);
  210. info->dqi_free_entry = blk;
  211. mark_info_dirty(info->dqi_sb, info->dqi_type);
  212. return 0;
  213. out_buf:
  214. kfree(tmpbuf);
  215. return err;
  216. }
  217. /* Is the entry in the block free? */
  218. int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
  219. {
  220. int i;
  221. for (i = 0; i < info->dqi_entry_size; i++)
  222. if (disk[i])
  223. return 0;
  224. return 1;
  225. }
  226. EXPORT_SYMBOL(qtree_entry_unused);
  227. /* Find space for dquot */
  228. static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
  229. struct dquot *dquot, int *err)
  230. {
  231. uint blk, i;
  232. struct qt_disk_dqdbheader *dh;
  233. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  234. char *ddquot;
  235. *err = 0;
  236. if (!buf) {
  237. *err = -ENOMEM;
  238. return 0;
  239. }
  240. dh = (struct qt_disk_dqdbheader *)buf;
  241. if (info->dqi_free_entry) {
  242. blk = info->dqi_free_entry;
  243. *err = read_blk(info, blk, buf);
  244. if (*err < 0)
  245. goto out_buf;
  246. *err = check_dquot_block_header(info, dh);
  247. if (*err)
  248. goto out_buf;
  249. } else {
  250. blk = get_free_dqblk(info);
  251. if ((int)blk < 0) {
  252. *err = blk;
  253. kfree(buf);
  254. return 0;
  255. }
  256. memset(buf, 0, info->dqi_usable_bs);
  257. /* This is enough as the block is already zeroed and the entry
  258. * list is empty... */
  259. info->dqi_free_entry = blk;
  260. mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
  261. }
  262. /* Block will be full? */
  263. if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
  264. *err = remove_free_dqentry(info, buf, blk);
  265. if (*err < 0) {
  266. quota_error(dquot->dq_sb, "Can't remove block (%u) "
  267. "from entry free list", blk);
  268. goto out_buf;
  269. }
  270. }
  271. le16_add_cpu(&dh->dqdh_entries, 1);
  272. /* Find free structure in block */
  273. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  274. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  275. if (qtree_entry_unused(info, ddquot))
  276. break;
  277. ddquot += info->dqi_entry_size;
  278. }
  279. #ifdef __QUOTA_QT_PARANOIA
  280. if (i == qtree_dqstr_in_blk(info)) {
  281. quota_error(dquot->dq_sb, "Data block full but it shouldn't");
  282. *err = -EIO;
  283. goto out_buf;
  284. }
  285. #endif
  286. *err = write_blk(info, blk, buf);
  287. if (*err < 0) {
  288. quota_error(dquot->dq_sb, "Can't write quota data block %u",
  289. blk);
  290. goto out_buf;
  291. }
  292. dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
  293. sizeof(struct qt_disk_dqdbheader) +
  294. i * info->dqi_entry_size;
  295. kfree(buf);
  296. return blk;
  297. out_buf:
  298. kfree(buf);
  299. return 0;
  300. }
  301. /* Insert reference to structure into the trie */
  302. static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  303. uint *blks, int depth)
  304. {
  305. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  306. int ret = 0, newson = 0, newact = 0;
  307. __le32 *ref;
  308. uint newblk;
  309. int i;
  310. if (!buf)
  311. return -ENOMEM;
  312. if (!blks[depth]) {
  313. ret = get_free_dqblk(info);
  314. if (ret < 0)
  315. goto out_buf;
  316. for (i = 0; i < depth; i++)
  317. if (ret == blks[i]) {
  318. quota_error(dquot->dq_sb,
  319. "Free block already used in tree: block %u",
  320. ret);
  321. ret = -EIO;
  322. goto out_buf;
  323. }
  324. blks[depth] = ret;
  325. memset(buf, 0, info->dqi_usable_bs);
  326. newact = 1;
  327. } else {
  328. ret = read_blk(info, blks[depth], buf);
  329. if (ret < 0) {
  330. quota_error(dquot->dq_sb, "Can't read tree quota "
  331. "block %u", blks[depth]);
  332. goto out_buf;
  333. }
  334. }
  335. ref = (__le32 *)buf;
  336. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  337. ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
  338. info->dqi_blocks - 1);
  339. if (ret)
  340. goto out_buf;
  341. if (!newblk) {
  342. newson = 1;
  343. } else {
  344. for (i = 0; i <= depth; i++)
  345. if (newblk == blks[i]) {
  346. quota_error(dquot->dq_sb,
  347. "Cycle in quota tree detected: block %u index %u",
  348. blks[depth],
  349. get_index(info, dquot->dq_id, depth));
  350. ret = -EIO;
  351. goto out_buf;
  352. }
  353. }
  354. blks[depth + 1] = newblk;
  355. if (depth == info->dqi_qtree_depth - 1) {
  356. #ifdef __QUOTA_QT_PARANOIA
  357. if (newblk) {
  358. quota_error(dquot->dq_sb, "Inserting already present "
  359. "quota entry (block %u)",
  360. le32_to_cpu(ref[get_index(info,
  361. dquot->dq_id, depth)]));
  362. ret = -EIO;
  363. goto out_buf;
  364. }
  365. #endif
  366. blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
  367. } else {
  368. ret = do_insert_tree(info, dquot, blks, depth + 1);
  369. }
  370. if (newson && ret >= 0) {
  371. ref[get_index(info, dquot->dq_id, depth)] =
  372. cpu_to_le32(blks[depth + 1]);
  373. ret = write_blk(info, blks[depth], buf);
  374. } else if (newact && ret < 0) {
  375. put_free_dqblk(info, buf, blks[depth]);
  376. }
  377. out_buf:
  378. kfree(buf);
  379. return ret;
  380. }
  381. /* Wrapper for inserting quota structure into tree */
  382. static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  383. struct dquot *dquot)
  384. {
  385. uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
  386. #ifdef __QUOTA_QT_PARANOIA
  387. if (info->dqi_blocks <= QT_TREEOFF) {
  388. quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
  389. return -EIO;
  390. }
  391. #endif
  392. if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
  393. quota_error(dquot->dq_sb, "Quota tree depth too big!");
  394. return -EIO;
  395. }
  396. return do_insert_tree(info, dquot, blks, 0);
  397. }
  398. /*
  399. * We don't have to be afraid of deadlocks as we never have quotas on quota
  400. * files...
  401. */
  402. int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  403. {
  404. int type = dquot->dq_id.type;
  405. struct super_block *sb = dquot->dq_sb;
  406. ssize_t ret;
  407. char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
  408. if (!ddquot)
  409. return -ENOMEM;
  410. /* dq_off is guarded by dqio_sem */
  411. if (!dquot->dq_off) {
  412. ret = dq_insert_tree(info, dquot);
  413. if (ret < 0) {
  414. quota_error(sb, "Error %zd occurred while creating "
  415. "quota", ret);
  416. kfree(ddquot);
  417. return ret;
  418. }
  419. }
  420. spin_lock(&dquot->dq_dqb_lock);
  421. info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
  422. spin_unlock(&dquot->dq_dqb_lock);
  423. ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
  424. dquot->dq_off);
  425. if (ret != info->dqi_entry_size) {
  426. quota_error(sb, "dquota write failed");
  427. if (ret >= 0)
  428. ret = -ENOSPC;
  429. } else {
  430. ret = 0;
  431. }
  432. dqstats_inc(DQST_WRITES);
  433. kfree(ddquot);
  434. return ret;
  435. }
  436. EXPORT_SYMBOL(qtree_write_dquot);
  437. /* Free dquot entry in data block */
  438. static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  439. uint blk)
  440. {
  441. struct qt_disk_dqdbheader *dh;
  442. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  443. int ret = 0;
  444. if (!buf)
  445. return -ENOMEM;
  446. if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
  447. quota_error(dquot->dq_sb, "Quota structure has offset to "
  448. "other block (%u) than it should (%u)", blk,
  449. (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
  450. ret = -EIO;
  451. goto out_buf;
  452. }
  453. ret = read_blk(info, blk, buf);
  454. if (ret < 0) {
  455. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  456. blk);
  457. goto out_buf;
  458. }
  459. dh = (struct qt_disk_dqdbheader *)buf;
  460. ret = check_dquot_block_header(info, dh);
  461. if (ret)
  462. goto out_buf;
  463. le16_add_cpu(&dh->dqdh_entries, -1);
  464. if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
  465. ret = remove_free_dqentry(info, buf, blk);
  466. if (ret >= 0)
  467. ret = put_free_dqblk(info, buf, blk);
  468. if (ret < 0) {
  469. quota_error(dquot->dq_sb, "Can't move quota data block "
  470. "(%u) to free list", blk);
  471. goto out_buf;
  472. }
  473. } else {
  474. memset(buf +
  475. (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
  476. 0, info->dqi_entry_size);
  477. if (le16_to_cpu(dh->dqdh_entries) ==
  478. qtree_dqstr_in_blk(info) - 1) {
  479. /* Insert will write block itself */
  480. ret = insert_free_dqentry(info, buf, blk);
  481. if (ret < 0) {
  482. quota_error(dquot->dq_sb, "Can't insert quota "
  483. "data block (%u) to free entry list", blk);
  484. goto out_buf;
  485. }
  486. } else {
  487. ret = write_blk(info, blk, buf);
  488. if (ret < 0) {
  489. quota_error(dquot->dq_sb, "Can't write quota "
  490. "data block %u", blk);
  491. goto out_buf;
  492. }
  493. }
  494. }
  495. dquot->dq_off = 0; /* Quota is now unattached */
  496. out_buf:
  497. kfree(buf);
  498. return ret;
  499. }
  500. /* Remove reference to dquot from tree */
  501. static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  502. uint *blks, int depth)
  503. {
  504. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  505. int ret = 0;
  506. uint newblk;
  507. __le32 *ref = (__le32 *)buf;
  508. int i;
  509. if (!buf)
  510. return -ENOMEM;
  511. ret = read_blk(info, blks[depth], buf);
  512. if (ret < 0) {
  513. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  514. blks[depth]);
  515. goto out_buf;
  516. }
  517. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  518. ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
  519. info->dqi_blocks - 1);
  520. if (ret)
  521. goto out_buf;
  522. for (i = 0; i <= depth; i++)
  523. if (newblk == blks[i]) {
  524. quota_error(dquot->dq_sb,
  525. "Cycle in quota tree detected: block %u index %u",
  526. blks[depth],
  527. get_index(info, dquot->dq_id, depth));
  528. ret = -EIO;
  529. goto out_buf;
  530. }
  531. if (depth == info->dqi_qtree_depth - 1) {
  532. ret = free_dqentry(info, dquot, newblk);
  533. blks[depth + 1] = 0;
  534. } else {
  535. blks[depth + 1] = newblk;
  536. ret = remove_tree(info, dquot, blks, depth + 1);
  537. }
  538. if (ret >= 0 && !blks[depth + 1]) {
  539. ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
  540. /* Block got empty? */
  541. for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
  542. ;
  543. /* Don't put the root block into the free block list */
  544. if (i == (info->dqi_usable_bs >> 2)
  545. && blks[depth] != QT_TREEOFF) {
  546. put_free_dqblk(info, buf, blks[depth]);
  547. blks[depth] = 0;
  548. } else {
  549. ret = write_blk(info, blks[depth], buf);
  550. if (ret < 0)
  551. quota_error(dquot->dq_sb,
  552. "Can't write quota tree block %u",
  553. blks[depth]);
  554. }
  555. }
  556. out_buf:
  557. kfree(buf);
  558. return ret;
  559. }
  560. /* Delete dquot from tree */
  561. int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  562. {
  563. uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
  564. if (!dquot->dq_off) /* Even not allocated? */
  565. return 0;
  566. if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
  567. quota_error(dquot->dq_sb, "Quota tree depth too big!");
  568. return -EIO;
  569. }
  570. return remove_tree(info, dquot, blks, 0);
  571. }
  572. EXPORT_SYMBOL(qtree_delete_dquot);
  573. /* Find entry in block */
  574. static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
  575. struct dquot *dquot, uint blk)
  576. {
  577. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  578. loff_t ret = 0;
  579. int i;
  580. char *ddquot;
  581. if (!buf)
  582. return -ENOMEM;
  583. ret = read_blk(info, blk, buf);
  584. if (ret < 0) {
  585. quota_error(dquot->dq_sb, "Can't read quota tree "
  586. "block %u", blk);
  587. goto out_buf;
  588. }
  589. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  590. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  591. if (info->dqi_ops->is_id(ddquot, dquot))
  592. break;
  593. ddquot += info->dqi_entry_size;
  594. }
  595. if (i == qtree_dqstr_in_blk(info)) {
  596. quota_error(dquot->dq_sb,
  597. "Quota for id %u referenced but not present",
  598. from_kqid(&init_user_ns, dquot->dq_id));
  599. ret = -EIO;
  600. goto out_buf;
  601. } else {
  602. ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
  603. qt_disk_dqdbheader) + i * info->dqi_entry_size;
  604. }
  605. out_buf:
  606. kfree(buf);
  607. return ret;
  608. }
  609. /* Find entry for given id in the tree */
  610. static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
  611. struct dquot *dquot, uint *blks, int depth)
  612. {
  613. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  614. loff_t ret = 0;
  615. __le32 *ref = (__le32 *)buf;
  616. uint blk;
  617. int i;
  618. if (!buf)
  619. return -ENOMEM;
  620. ret = read_blk(info, blks[depth], buf);
  621. if (ret < 0) {
  622. quota_error(dquot->dq_sb, "Can't read quota tree block %u",
  623. blks[depth]);
  624. goto out_buf;
  625. }
  626. ret = 0;
  627. blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  628. if (!blk) /* No reference? */
  629. goto out_buf;
  630. ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
  631. info->dqi_blocks - 1);
  632. if (ret)
  633. goto out_buf;
  634. /* Check for cycles in the tree */
  635. for (i = 0; i <= depth; i++)
  636. if (blk == blks[i]) {
  637. quota_error(dquot->dq_sb,
  638. "Cycle in quota tree detected: block %u index %u",
  639. blks[depth],
  640. get_index(info, dquot->dq_id, depth));
  641. ret = -EIO;
  642. goto out_buf;
  643. }
  644. blks[depth + 1] = blk;
  645. if (depth < info->dqi_qtree_depth - 1)
  646. ret = find_tree_dqentry(info, dquot, blks, depth + 1);
  647. else
  648. ret = find_block_dqentry(info, dquot, blk);
  649. out_buf:
  650. kfree(buf);
  651. return ret;
  652. }
  653. /* Find entry for given id in the tree - wrapper function */
  654. static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
  655. struct dquot *dquot)
  656. {
  657. uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
  658. if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
  659. quota_error(dquot->dq_sb, "Quota tree depth too big!");
  660. return -EIO;
  661. }
  662. return find_tree_dqentry(info, dquot, blks, 0);
  663. }
  664. int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  665. {
  666. int type = dquot->dq_id.type;
  667. struct super_block *sb = dquot->dq_sb;
  668. loff_t offset;
  669. char *ddquot;
  670. int ret = 0;
  671. #ifdef __QUOTA_QT_PARANOIA
  672. /* Invalidated quota? */
  673. if (!sb_dqopt(dquot->dq_sb)->files[type]) {
  674. quota_error(sb, "Quota invalidated while reading!");
  675. return -EIO;
  676. }
  677. #endif
  678. /* Do we know offset of the dquot entry in the quota file? */
  679. if (!dquot->dq_off) {
  680. offset = find_dqentry(info, dquot);
  681. if (offset <= 0) { /* Entry not present? */
  682. if (offset < 0)
  683. quota_error(sb,"Can't read quota structure "
  684. "for id %u",
  685. from_kqid(&init_user_ns,
  686. dquot->dq_id));
  687. dquot->dq_off = 0;
  688. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  689. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  690. ret = offset;
  691. goto out;
  692. }
  693. dquot->dq_off = offset;
  694. }
  695. ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
  696. if (!ddquot)
  697. return -ENOMEM;
  698. ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
  699. dquot->dq_off);
  700. if (ret != info->dqi_entry_size) {
  701. if (ret >= 0)
  702. ret = -EIO;
  703. quota_error(sb, "Error while reading quota structure for id %u",
  704. from_kqid(&init_user_ns, dquot->dq_id));
  705. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  706. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  707. kfree(ddquot);
  708. goto out;
  709. }
  710. spin_lock(&dquot->dq_dqb_lock);
  711. info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
  712. if (!dquot->dq_dqb.dqb_bhardlimit &&
  713. !dquot->dq_dqb.dqb_bsoftlimit &&
  714. !dquot->dq_dqb.dqb_ihardlimit &&
  715. !dquot->dq_dqb.dqb_isoftlimit)
  716. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  717. spin_unlock(&dquot->dq_dqb_lock);
  718. kfree(ddquot);
  719. out:
  720. dqstats_inc(DQST_READS);
  721. return ret;
  722. }
  723. EXPORT_SYMBOL(qtree_read_dquot);
  724. /* Check whether dquot should not be deleted. We know we are
  725. * the only one operating on dquot (thanks to dq_lock) */
  726. int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  727. {
  728. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
  729. !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
  730. return qtree_delete_dquot(info, dquot);
  731. return 0;
  732. }
  733. EXPORT_SYMBOL(qtree_release_dquot);
  734. static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
  735. unsigned int blk, int depth)
  736. {
  737. char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
  738. __le32 *ref = (__le32 *)buf;
  739. ssize_t ret;
  740. unsigned int epb = info->dqi_usable_bs >> 2;
  741. unsigned int level_inc = 1;
  742. int i;
  743. if (!buf)
  744. return -ENOMEM;
  745. for (i = depth; i < info->dqi_qtree_depth - 1; i++)
  746. level_inc *= epb;
  747. ret = read_blk(info, blk, buf);
  748. if (ret < 0) {
  749. quota_error(info->dqi_sb,
  750. "Can't read quota tree block %u", blk);
  751. goto out_buf;
  752. }
  753. for (i = __get_index(info, *id, depth); i < epb; i++) {
  754. uint blk_no = le32_to_cpu(ref[i]);
  755. if (blk_no == 0) {
  756. *id += level_inc;
  757. continue;
  758. }
  759. ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
  760. info->dqi_blocks - 1);
  761. if (ret)
  762. goto out_buf;
  763. if (depth == info->dqi_qtree_depth - 1) {
  764. ret = 0;
  765. goto out_buf;
  766. }
  767. ret = find_next_id(info, id, blk_no, depth + 1);
  768. if (ret != -ENOENT)
  769. break;
  770. }
  771. if (i == epb) {
  772. ret = -ENOENT;
  773. goto out_buf;
  774. }
  775. out_buf:
  776. kfree(buf);
  777. return ret;
  778. }
  779. int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
  780. {
  781. qid_t id = from_kqid(&init_user_ns, *qid);
  782. int ret;
  783. ret = find_next_id(info, &id, QT_TREEOFF, 0);
  784. if (ret < 0)
  785. return ret;
  786. *qid = make_kqid(&init_user_ns, qid->type, id);
  787. return 0;
  788. }
  789. EXPORT_SYMBOL(qtree_get_next_id);