xattr.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017-2018 HUAWEI, Inc.
  4. * https://www.huawei.com/
  5. * Copyright (C) 2021-2022, Alibaba Cloud
  6. */
  7. #include <linux/security.h>
  8. #include <linux/xxhash.h>
  9. #include "xattr.h"
  10. struct erofs_xattr_iter {
  11. struct super_block *sb;
  12. struct erofs_buf buf;
  13. erofs_off_t pos;
  14. void *kaddr;
  15. char *buffer;
  16. int buffer_size, buffer_ofs;
  17. /* getxattr */
  18. int index, infix_len;
  19. struct qstr name;
  20. /* listxattr */
  21. struct dentry *dentry;
  22. };
  23. static int erofs_init_inode_xattrs(struct inode *inode)
  24. {
  25. struct erofs_inode *const vi = EROFS_I(inode);
  26. struct erofs_xattr_iter it;
  27. unsigned int i;
  28. struct erofs_xattr_ibody_header *ih;
  29. struct super_block *sb = inode->i_sb;
  30. int ret = 0;
  31. /* the most case is that xattrs of this inode are initialized. */
  32. if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
  33. /*
  34. * paired with smp_mb() at the end of the function to ensure
  35. * fields will only be observed after the bit is set.
  36. */
  37. smp_mb();
  38. return 0;
  39. }
  40. if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
  41. return -ERESTARTSYS;
  42. /* someone has initialized xattrs for us? */
  43. if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
  44. goto out_unlock;
  45. /*
  46. * bypass all xattr operations if ->xattr_isize is not greater than
  47. * sizeof(struct erofs_xattr_ibody_header), in detail:
  48. * 1) it is not enough to contain erofs_xattr_ibody_header then
  49. * ->xattr_isize should be 0 (it means no xattr);
  50. * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
  51. * undefined right now (maybe use later with some new sb feature).
  52. */
  53. if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
  54. erofs_err(sb,
  55. "xattr_isize %d of nid %llu is not supported yet",
  56. vi->xattr_isize, vi->nid);
  57. ret = -EOPNOTSUPP;
  58. goto out_unlock;
  59. } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
  60. if (vi->xattr_isize) {
  61. erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
  62. DBG_BUGON(1);
  63. ret = -EFSCORRUPTED;
  64. goto out_unlock; /* xattr ondisk layout error */
  65. }
  66. ret = -ENOATTR;
  67. goto out_unlock;
  68. }
  69. it.buf = __EROFS_BUF_INITIALIZER;
  70. erofs_init_metabuf(&it.buf, sb);
  71. it.pos = erofs_iloc(inode) + vi->inode_isize;
  72. /* read in shared xattr array (non-atomic, see kmalloc below) */
  73. it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
  74. if (IS_ERR(it.kaddr)) {
  75. ret = PTR_ERR(it.kaddr);
  76. goto out_unlock;
  77. }
  78. ih = it.kaddr;
  79. vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
  80. vi->xattr_shared_count = ih->h_shared_count;
  81. vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
  82. sizeof(uint), GFP_KERNEL);
  83. if (!vi->xattr_shared_xattrs) {
  84. erofs_put_metabuf(&it.buf);
  85. ret = -ENOMEM;
  86. goto out_unlock;
  87. }
  88. /* let's skip ibody header */
  89. it.pos += sizeof(struct erofs_xattr_ibody_header);
  90. for (i = 0; i < vi->xattr_shared_count; ++i) {
  91. it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
  92. if (IS_ERR(it.kaddr)) {
  93. kfree(vi->xattr_shared_xattrs);
  94. vi->xattr_shared_xattrs = NULL;
  95. ret = PTR_ERR(it.kaddr);
  96. goto out_unlock;
  97. }
  98. vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr);
  99. it.pos += sizeof(__le32);
  100. }
  101. erofs_put_metabuf(&it.buf);
  102. /* paired with smp_mb() at the beginning of the function. */
  103. smp_mb();
  104. set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
  105. out_unlock:
  106. clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
  107. return ret;
  108. }
  109. static bool erofs_xattr_user_list(struct dentry *dentry)
  110. {
  111. return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
  112. }
  113. static bool erofs_xattr_trusted_list(struct dentry *dentry)
  114. {
  115. return capable(CAP_SYS_ADMIN);
  116. }
  117. static int erofs_xattr_generic_get(const struct xattr_handler *handler,
  118. struct dentry *unused, struct inode *inode,
  119. const char *name, void *buffer, size_t size)
  120. {
  121. if (handler->flags == EROFS_XATTR_INDEX_USER &&
  122. !test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
  123. return -EOPNOTSUPP;
  124. return erofs_getxattr(inode, handler->flags, name, buffer, size);
  125. }
  126. const struct xattr_handler erofs_xattr_user_handler = {
  127. .prefix = XATTR_USER_PREFIX,
  128. .flags = EROFS_XATTR_INDEX_USER,
  129. .list = erofs_xattr_user_list,
  130. .get = erofs_xattr_generic_get,
  131. };
  132. const struct xattr_handler erofs_xattr_trusted_handler = {
  133. .prefix = XATTR_TRUSTED_PREFIX,
  134. .flags = EROFS_XATTR_INDEX_TRUSTED,
  135. .list = erofs_xattr_trusted_list,
  136. .get = erofs_xattr_generic_get,
  137. };
  138. #ifdef CONFIG_EROFS_FS_SECURITY
  139. const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
  140. .prefix = XATTR_SECURITY_PREFIX,
  141. .flags = EROFS_XATTR_INDEX_SECURITY,
  142. .get = erofs_xattr_generic_get,
  143. };
  144. #endif
  145. const struct xattr_handler * const erofs_xattr_handlers[] = {
  146. &erofs_xattr_user_handler,
  147. &erofs_xattr_trusted_handler,
  148. #ifdef CONFIG_EROFS_FS_SECURITY
  149. &erofs_xattr_security_handler,
  150. #endif
  151. NULL,
  152. };
  153. static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
  154. unsigned int len)
  155. {
  156. unsigned int slice, processed;
  157. struct super_block *sb = it->sb;
  158. void *src;
  159. for (processed = 0; processed < len; processed += slice) {
  160. it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
  161. if (IS_ERR(it->kaddr))
  162. return PTR_ERR(it->kaddr);
  163. src = it->kaddr;
  164. slice = min_t(unsigned int, sb->s_blocksize -
  165. erofs_blkoff(sb, it->pos), len - processed);
  166. memcpy(it->buffer + it->buffer_ofs, src, slice);
  167. it->buffer_ofs += slice;
  168. it->pos += slice;
  169. }
  170. return 0;
  171. }
  172. static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
  173. {
  174. struct erofs_xattr_entry entry;
  175. unsigned int base_index, name_total, prefix_len, infix_len = 0;
  176. const char *prefix, *infix = NULL;
  177. int err;
  178. /* 1. handle xattr entry */
  179. entry = *(struct erofs_xattr_entry *)it->kaddr;
  180. it->pos += sizeof(struct erofs_xattr_entry);
  181. base_index = entry.e_name_index;
  182. if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
  183. struct erofs_sb_info *sbi = EROFS_SB(it->sb);
  184. struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
  185. (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
  186. if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
  187. return 0;
  188. infix = pf->prefix->infix;
  189. infix_len = pf->infix_len;
  190. base_index = pf->prefix->base_index;
  191. }
  192. prefix = erofs_xattr_prefix(base_index, it->dentry);
  193. if (!prefix)
  194. return 0;
  195. prefix_len = strlen(prefix);
  196. name_total = prefix_len + infix_len + entry.e_name_len + 1;
  197. if (!it->buffer) {
  198. it->buffer_ofs += name_total;
  199. return 0;
  200. }
  201. if (it->buffer_ofs + name_total > it->buffer_size)
  202. return -ERANGE;
  203. memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
  204. memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
  205. it->buffer_ofs += prefix_len + infix_len;
  206. /* 2. handle xattr name */
  207. err = erofs_xattr_copy_to_buffer(it, entry.e_name_len);
  208. if (err)
  209. return err;
  210. it->buffer[it->buffer_ofs++] = '\0';
  211. return 0;
  212. }
  213. static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
  214. {
  215. struct super_block *sb = it->sb;
  216. struct erofs_xattr_entry entry;
  217. unsigned int slice, processed, value_sz;
  218. /* 1. handle xattr entry */
  219. entry = *(struct erofs_xattr_entry *)it->kaddr;
  220. it->pos += sizeof(struct erofs_xattr_entry);
  221. value_sz = le16_to_cpu(entry.e_value_size);
  222. /* should also match the infix for long name prefixes */
  223. if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
  224. struct erofs_sb_info *sbi = EROFS_SB(sb);
  225. struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
  226. (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
  227. if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
  228. return -ENOATTR;
  229. if (it->index != pf->prefix->base_index ||
  230. it->name.len != entry.e_name_len + pf->infix_len)
  231. return -ENOATTR;
  232. if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
  233. return -ENOATTR;
  234. it->infix_len = pf->infix_len;
  235. } else {
  236. if (it->index != entry.e_name_index ||
  237. it->name.len != entry.e_name_len)
  238. return -ENOATTR;
  239. it->infix_len = 0;
  240. }
  241. /* 2. handle xattr name */
  242. for (processed = 0; processed < entry.e_name_len; processed += slice) {
  243. it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
  244. if (IS_ERR(it->kaddr))
  245. return PTR_ERR(it->kaddr);
  246. slice = min_t(unsigned int,
  247. sb->s_blocksize - erofs_blkoff(sb, it->pos),
  248. entry.e_name_len - processed);
  249. if (memcmp(it->name.name + it->infix_len + processed,
  250. it->kaddr, slice))
  251. return -ENOATTR;
  252. it->pos += slice;
  253. }
  254. /* 3. handle xattr value */
  255. if (!it->buffer) {
  256. it->buffer_ofs = value_sz;
  257. return 0;
  258. }
  259. if (it->buffer_size < value_sz)
  260. return -ERANGE;
  261. return erofs_xattr_copy_to_buffer(it, value_sz);
  262. }
  263. static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
  264. struct inode *inode, bool getxattr)
  265. {
  266. struct erofs_inode *const vi = EROFS_I(inode);
  267. unsigned int xattr_header_sz, remaining, entry_sz;
  268. erofs_off_t next_pos;
  269. int ret;
  270. xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
  271. sizeof(u32) * vi->xattr_shared_count;
  272. if (xattr_header_sz >= vi->xattr_isize) {
  273. DBG_BUGON(xattr_header_sz > vi->xattr_isize);
  274. return -ENOATTR;
  275. }
  276. remaining = vi->xattr_isize - xattr_header_sz;
  277. it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
  278. while (remaining) {
  279. it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
  280. if (IS_ERR(it->kaddr))
  281. return PTR_ERR(it->kaddr);
  282. entry_sz = erofs_xattr_entry_size(it->kaddr);
  283. /* xattr on-disk corruption: xattr entry beyond xattr_isize */
  284. if (remaining < entry_sz) {
  285. DBG_BUGON(1);
  286. return -EFSCORRUPTED;
  287. }
  288. remaining -= entry_sz;
  289. next_pos = it->pos + entry_sz;
  290. if (getxattr)
  291. ret = erofs_getxattr_foreach(it);
  292. else
  293. ret = erofs_listxattr_foreach(it);
  294. if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
  295. break;
  296. it->pos = next_pos;
  297. }
  298. return ret;
  299. }
  300. static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
  301. struct inode *inode, bool getxattr)
  302. {
  303. struct erofs_inode *const vi = EROFS_I(inode);
  304. struct super_block *const sb = it->sb;
  305. struct erofs_sb_info *sbi = EROFS_SB(sb);
  306. unsigned int i;
  307. int ret = -ENOATTR;
  308. for (i = 0; i < vi->xattr_shared_count; ++i) {
  309. it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
  310. vi->xattr_shared_xattrs[i] * sizeof(__le32);
  311. it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
  312. if (IS_ERR(it->kaddr))
  313. return PTR_ERR(it->kaddr);
  314. if (getxattr)
  315. ret = erofs_getxattr_foreach(it);
  316. else
  317. ret = erofs_listxattr_foreach(it);
  318. if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
  319. break;
  320. }
  321. return ret;
  322. }
  323. int erofs_getxattr(struct inode *inode, int index, const char *name,
  324. void *buffer, size_t buffer_size)
  325. {
  326. int ret;
  327. unsigned int hashbit;
  328. struct erofs_xattr_iter it;
  329. struct erofs_inode *vi = EROFS_I(inode);
  330. struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
  331. if (!name)
  332. return -EINVAL;
  333. ret = erofs_init_inode_xattrs(inode);
  334. if (ret)
  335. return ret;
  336. /* reserved flag is non-zero if there's any change of on-disk format */
  337. if (erofs_sb_has_xattr_filter(sbi) && !sbi->xattr_filter_reserved) {
  338. hashbit = xxh32(name, strlen(name),
  339. EROFS_XATTR_FILTER_SEED + index);
  340. hashbit &= EROFS_XATTR_FILTER_BITS - 1;
  341. if (vi->xattr_name_filter & (1U << hashbit))
  342. return -ENOATTR;
  343. }
  344. it.index = index;
  345. it.name = QSTR(name);
  346. if (it.name.len > EROFS_NAME_LEN)
  347. return -ERANGE;
  348. it.sb = inode->i_sb;
  349. it.buf = __EROFS_BUF_INITIALIZER;
  350. erofs_init_metabuf(&it.buf, it.sb);
  351. it.buffer = buffer;
  352. it.buffer_size = buffer_size;
  353. it.buffer_ofs = 0;
  354. ret = erofs_xattr_iter_inline(&it, inode, true);
  355. if (ret == -ENOATTR)
  356. ret = erofs_xattr_iter_shared(&it, inode, true);
  357. erofs_put_metabuf(&it.buf);
  358. return ret ? ret : it.buffer_ofs;
  359. }
  360. ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
  361. {
  362. int ret;
  363. struct erofs_xattr_iter it;
  364. struct inode *inode = d_inode(dentry);
  365. ret = erofs_init_inode_xattrs(inode);
  366. if (ret == -ENOATTR)
  367. return 0;
  368. if (ret)
  369. return ret;
  370. it.sb = dentry->d_sb;
  371. it.buf = __EROFS_BUF_INITIALIZER;
  372. erofs_init_metabuf(&it.buf, it.sb);
  373. it.dentry = dentry;
  374. it.buffer = buffer;
  375. it.buffer_size = buffer_size;
  376. it.buffer_ofs = 0;
  377. ret = erofs_xattr_iter_inline(&it, inode, false);
  378. if (!ret || ret == -ENOATTR)
  379. ret = erofs_xattr_iter_shared(&it, inode, false);
  380. if (ret == -ENOATTR)
  381. ret = 0;
  382. erofs_put_metabuf(&it.buf);
  383. return ret ? ret : it.buffer_ofs;
  384. }
  385. void erofs_xattr_prefixes_cleanup(struct super_block *sb)
  386. {
  387. struct erofs_sb_info *sbi = EROFS_SB(sb);
  388. int i;
  389. if (sbi->xattr_prefixes) {
  390. for (i = 0; i < sbi->xattr_prefix_count; i++)
  391. kfree(sbi->xattr_prefixes[i].prefix);
  392. kfree(sbi->xattr_prefixes);
  393. sbi->xattr_prefixes = NULL;
  394. }
  395. }
  396. int erofs_xattr_prefixes_init(struct super_block *sb)
  397. {
  398. struct erofs_sb_info *sbi = EROFS_SB(sb);
  399. struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
  400. erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
  401. struct erofs_xattr_prefix_item *pfs;
  402. int ret = 0, i, len;
  403. if (!sbi->xattr_prefix_count)
  404. return 0;
  405. pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
  406. if (!pfs)
  407. return -ENOMEM;
  408. if (sbi->packed_inode)
  409. buf.mapping = sbi->packed_inode->i_mapping;
  410. else
  411. erofs_init_metabuf(&buf, sb);
  412. for (i = 0; i < sbi->xattr_prefix_count; i++) {
  413. void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
  414. if (IS_ERR(ptr)) {
  415. ret = PTR_ERR(ptr);
  416. break;
  417. } else if (len < sizeof(*pfs->prefix) ||
  418. len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
  419. kfree(ptr);
  420. ret = -EFSCORRUPTED;
  421. break;
  422. }
  423. pfs[i].prefix = ptr;
  424. pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
  425. }
  426. erofs_put_metabuf(&buf);
  427. sbi->xattr_prefixes = pfs;
  428. if (ret)
  429. erofs_xattr_prefixes_cleanup(sb);
  430. return ret;
  431. }
  432. #ifdef CONFIG_EROFS_FS_POSIX_ACL
  433. struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
  434. {
  435. struct posix_acl *acl;
  436. int prefix, rc;
  437. char *value = NULL;
  438. if (rcu)
  439. return ERR_PTR(-ECHILD);
  440. switch (type) {
  441. case ACL_TYPE_ACCESS:
  442. prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
  443. break;
  444. case ACL_TYPE_DEFAULT:
  445. prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
  446. break;
  447. default:
  448. return ERR_PTR(-EINVAL);
  449. }
  450. rc = erofs_getxattr(inode, prefix, "", NULL, 0);
  451. if (rc > 0) {
  452. value = kmalloc(rc, GFP_KERNEL);
  453. if (!value)
  454. return ERR_PTR(-ENOMEM);
  455. rc = erofs_getxattr(inode, prefix, "", value, rc);
  456. }
  457. if (rc == -ENOATTR)
  458. acl = NULL;
  459. else if (rc < 0)
  460. acl = ERR_PTR(rc);
  461. else
  462. acl = posix_acl_from_xattr(&init_user_ns, value, rc);
  463. kfree(value);
  464. return acl;
  465. }
  466. #endif