record.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
  5. *
  6. */
  7. #include <linux/fs.h>
  8. #include "debug.h"
  9. #include "ntfs.h"
  10. #include "ntfs_fs.h"
  11. static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
  12. const __le16 *name, u8 name_len,
  13. const u16 *upcase)
  14. {
  15. /* First, compare the type codes. */
  16. int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
  17. if (diff)
  18. return diff;
  19. /* They have the same type code, so we have to compare the names. */
  20. return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
  21. upcase, true);
  22. }
  23. /*
  24. * mi_new_attt_id
  25. *
  26. * Return: Unused attribute id that is less than mrec->next_attr_id.
  27. */
  28. static __le16 mi_new_attt_id(struct mft_inode *mi)
  29. {
  30. u16 free_id, max_id, t16;
  31. struct MFT_REC *rec = mi->mrec;
  32. struct ATTRIB *attr;
  33. __le16 id;
  34. id = rec->next_attr_id;
  35. free_id = le16_to_cpu(id);
  36. if (free_id < 0x7FFF) {
  37. rec->next_attr_id = cpu_to_le16(free_id + 1);
  38. return id;
  39. }
  40. /* One record can store up to 1024/24 ~= 42 attributes. */
  41. free_id = 0;
  42. max_id = 0;
  43. attr = NULL;
  44. for (;;) {
  45. attr = mi_enum_attr(mi, attr);
  46. if (!attr) {
  47. rec->next_attr_id = cpu_to_le16(max_id + 1);
  48. mi->dirty = true;
  49. return cpu_to_le16(free_id);
  50. }
  51. t16 = le16_to_cpu(attr->id);
  52. if (t16 == free_id) {
  53. free_id += 1;
  54. attr = NULL;
  55. } else if (max_id < t16)
  56. max_id = t16;
  57. }
  58. }
  59. int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
  60. {
  61. int err;
  62. struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
  63. if (!m)
  64. return -ENOMEM;
  65. err = mi_init(m, sbi, rno);
  66. if (err) {
  67. kfree(m);
  68. return err;
  69. }
  70. err = mi_read(m, false);
  71. if (err) {
  72. mi_put(m);
  73. return err;
  74. }
  75. *mi = m;
  76. return 0;
  77. }
  78. void mi_put(struct mft_inode *mi)
  79. {
  80. mi_clear(mi);
  81. kfree(mi);
  82. }
  83. int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
  84. {
  85. mi->sbi = sbi;
  86. mi->rno = rno;
  87. mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
  88. if (!mi->mrec)
  89. return -ENOMEM;
  90. return 0;
  91. }
  92. /*
  93. * mi_read - Read MFT data.
  94. */
  95. int mi_read(struct mft_inode *mi, bool is_mft)
  96. {
  97. int err;
  98. struct MFT_REC *rec = mi->mrec;
  99. struct ntfs_sb_info *sbi = mi->sbi;
  100. u32 bpr = sbi->record_size;
  101. u64 vbo = (u64)mi->rno << sbi->record_bits;
  102. struct ntfs_inode *mft_ni = sbi->mft.ni;
  103. struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
  104. struct rw_semaphore *rw_lock = NULL;
  105. if (is_mounted(sbi)) {
  106. if (!is_mft && mft_ni) {
  107. rw_lock = &mft_ni->file.run_lock;
  108. down_read(rw_lock);
  109. }
  110. }
  111. err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
  112. if (rw_lock)
  113. up_read(rw_lock);
  114. if (!err)
  115. goto ok;
  116. if (err == -E_NTFS_FIXUP) {
  117. mi->dirty = true;
  118. goto ok;
  119. }
  120. if (err != -ENOENT)
  121. goto out;
  122. if (rw_lock) {
  123. ni_lock(mft_ni);
  124. down_write(rw_lock);
  125. }
  126. err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
  127. vbo >> sbi->cluster_bits);
  128. if (rw_lock) {
  129. up_write(rw_lock);
  130. ni_unlock(mft_ni);
  131. }
  132. if (err)
  133. goto out;
  134. if (rw_lock)
  135. down_read(rw_lock);
  136. err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
  137. if (rw_lock)
  138. up_read(rw_lock);
  139. if (err == -E_NTFS_FIXUP) {
  140. mi->dirty = true;
  141. goto ok;
  142. }
  143. if (err)
  144. goto out;
  145. ok:
  146. /* Check field 'total' only here. */
  147. if (le32_to_cpu(rec->total) != bpr) {
  148. err = -EINVAL;
  149. goto out;
  150. }
  151. return 0;
  152. out:
  153. if (err == -E_NTFS_CORRUPT) {
  154. ntfs_err(sbi->sb, "mft corrupted");
  155. ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
  156. err = -EINVAL;
  157. }
  158. return err;
  159. }
  160. /*
  161. * mi_enum_attr - start/continue attributes enumeration in record.
  162. *
  163. * NOTE: mi->mrec - memory of size sbi->record_size
  164. * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
  165. */
  166. struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
  167. {
  168. const struct MFT_REC *rec = mi->mrec;
  169. u32 used = le32_to_cpu(rec->used);
  170. u32 t32, off, asize, prev_type;
  171. u16 t16;
  172. u64 data_size, alloc_size, tot_size;
  173. if (!attr) {
  174. u32 total = le32_to_cpu(rec->total);
  175. off = le16_to_cpu(rec->attr_off);
  176. if (used > total)
  177. return NULL;
  178. if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
  179. !IS_ALIGNED(off, 4)) {
  180. return NULL;
  181. }
  182. /* Skip non-resident records. */
  183. if (!is_rec_inuse(rec))
  184. return NULL;
  185. prev_type = 0;
  186. attr = Add2Ptr(rec, off);
  187. } else {
  188. /*
  189. * We don't need to check previous attr here. There is
  190. * a bounds checking in the previous round.
  191. */
  192. off = PtrOffset(rec, attr);
  193. asize = le32_to_cpu(attr->size);
  194. prev_type = le32_to_cpu(attr->type);
  195. attr = Add2Ptr(attr, asize);
  196. off += asize;
  197. }
  198. /* Can we use the first field (attr->type). */
  199. /* NOTE: this code also checks attr->size availability. */
  200. if (off + 8 > used) {
  201. static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
  202. return NULL;
  203. }
  204. if (attr->type == ATTR_END) {
  205. /* End of enumeration. */
  206. return NULL;
  207. }
  208. /* 0x100 is last known attribute for now. */
  209. t32 = le32_to_cpu(attr->type);
  210. if (!t32 || (t32 & 0xf) || (t32 > 0x100))
  211. return NULL;
  212. /* attributes in record must be ordered by type */
  213. if (t32 < prev_type)
  214. return NULL;
  215. asize = le32_to_cpu(attr->size);
  216. /* Check overflow and boundary. */
  217. if (off + asize < off || off + asize > used)
  218. return NULL;
  219. /* Check size of attribute. */
  220. if (!attr->non_res) {
  221. /* Check resident fields. */
  222. if (asize < SIZEOF_RESIDENT)
  223. return NULL;
  224. t16 = le16_to_cpu(attr->res.data_off);
  225. if (t16 > asize)
  226. return NULL;
  227. if (le32_to_cpu(attr->res.data_size) > asize - t16)
  228. return NULL;
  229. t32 = sizeof(short) * attr->name_len;
  230. if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
  231. return NULL;
  232. return attr;
  233. }
  234. /* Check nonresident fields. */
  235. if (attr->non_res != 1)
  236. return NULL;
  237. /* Can we use memory including attr->nres.valid_size? */
  238. if (asize < SIZEOF_NONRESIDENT)
  239. return NULL;
  240. t16 = le16_to_cpu(attr->nres.run_off);
  241. if (t16 > asize)
  242. return NULL;
  243. t32 = sizeof(short) * attr->name_len;
  244. if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
  245. return NULL;
  246. /* Check start/end vcn. */
  247. if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
  248. return NULL;
  249. data_size = le64_to_cpu(attr->nres.data_size);
  250. if (le64_to_cpu(attr->nres.valid_size) > data_size)
  251. return NULL;
  252. alloc_size = le64_to_cpu(attr->nres.alloc_size);
  253. if (data_size > alloc_size)
  254. return NULL;
  255. t32 = mi->sbi->cluster_mask;
  256. if (alloc_size & t32)
  257. return NULL;
  258. if (!attr->nres.svcn && is_attr_ext(attr)) {
  259. /* First segment of sparse/compressed attribute */
  260. /* Can we use memory including attr->nres.total_size? */
  261. if (asize < SIZEOF_NONRESIDENT_EX)
  262. return NULL;
  263. tot_size = le64_to_cpu(attr->nres.total_size);
  264. if (tot_size & t32)
  265. return NULL;
  266. if (tot_size > alloc_size)
  267. return NULL;
  268. } else {
  269. if (attr->nres.c_unit)
  270. return NULL;
  271. if (alloc_size > mi->sbi->volume.size)
  272. return NULL;
  273. }
  274. return attr;
  275. }
  276. /*
  277. * mi_find_attr - Find the attribute by type and name and id.
  278. */
  279. struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
  280. enum ATTR_TYPE type, const __le16 *name,
  281. u8 name_len, const __le16 *id)
  282. {
  283. u32 type_in = le32_to_cpu(type);
  284. u32 atype;
  285. next_attr:
  286. attr = mi_enum_attr(mi, attr);
  287. if (!attr)
  288. return NULL;
  289. atype = le32_to_cpu(attr->type);
  290. if (atype > type_in)
  291. return NULL;
  292. if (atype < type_in)
  293. goto next_attr;
  294. if (attr->name_len != name_len)
  295. goto next_attr;
  296. if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
  297. goto next_attr;
  298. if (id && *id != attr->id)
  299. goto next_attr;
  300. return attr;
  301. }
  302. int mi_write(struct mft_inode *mi, int wait)
  303. {
  304. struct MFT_REC *rec;
  305. int err;
  306. struct ntfs_sb_info *sbi;
  307. if (!mi->dirty)
  308. return 0;
  309. sbi = mi->sbi;
  310. rec = mi->mrec;
  311. err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
  312. if (err)
  313. return err;
  314. if (mi->rno < sbi->mft.recs_mirr)
  315. sbi->flags |= NTFS_FLAGS_MFTMIRR;
  316. mi->dirty = false;
  317. return 0;
  318. }
  319. int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
  320. __le16 flags, bool is_mft)
  321. {
  322. int err;
  323. u16 seq = 1;
  324. struct MFT_REC *rec;
  325. u64 vbo = (u64)rno << sbi->record_bits;
  326. err = mi_init(mi, sbi, rno);
  327. if (err)
  328. return err;
  329. rec = mi->mrec;
  330. if (rno == MFT_REC_MFT) {
  331. ;
  332. } else if (rno < MFT_REC_FREE) {
  333. seq = rno;
  334. } else if (rno >= sbi->mft.used) {
  335. ;
  336. } else if (mi_read(mi, is_mft)) {
  337. ;
  338. } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
  339. /* Record is reused. Update its sequence number. */
  340. seq = le16_to_cpu(rec->seq) + 1;
  341. if (!seq)
  342. seq = 1;
  343. }
  344. memcpy(rec, sbi->new_rec, sbi->record_size);
  345. rec->seq = cpu_to_le16(seq);
  346. rec->flags = RECORD_FLAG_IN_USE | flags;
  347. if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
  348. rec->mft_record = cpu_to_le32(rno);
  349. mi->dirty = true;
  350. if (!mi->nb.nbufs) {
  351. struct ntfs_inode *ni = sbi->mft.ni;
  352. bool lock = false;
  353. if (is_mounted(sbi) && !is_mft) {
  354. down_read(&ni->file.run_lock);
  355. lock = true;
  356. }
  357. err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
  358. &mi->nb);
  359. if (lock)
  360. up_read(&ni->file.run_lock);
  361. }
  362. return err;
  363. }
  364. /*
  365. * mi_insert_attr - Reserve space for new attribute.
  366. *
  367. * Return: Not full constructed attribute or NULL if not possible to create.
  368. */
  369. struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
  370. const __le16 *name, u8 name_len, u32 asize,
  371. u16 name_off)
  372. {
  373. size_t tail;
  374. struct ATTRIB *attr;
  375. __le16 id;
  376. struct MFT_REC *rec = mi->mrec;
  377. struct ntfs_sb_info *sbi = mi->sbi;
  378. u32 used = le32_to_cpu(rec->used);
  379. const u16 *upcase = sbi->upcase;
  380. /* Can we insert mi attribute? */
  381. if (used + asize > sbi->record_size)
  382. return NULL;
  383. /*
  384. * Scan through the list of attributes to find the point
  385. * at which we should insert it.
  386. */
  387. attr = NULL;
  388. while ((attr = mi_enum_attr(mi, attr))) {
  389. int diff = compare_attr(attr, type, name, name_len, upcase);
  390. if (diff < 0)
  391. continue;
  392. if (!diff && !is_attr_indexed(attr))
  393. return NULL;
  394. break;
  395. }
  396. if (!attr) {
  397. /* Append. */
  398. tail = 8;
  399. attr = Add2Ptr(rec, used - 8);
  400. } else {
  401. /* Insert before 'attr'. */
  402. tail = used - PtrOffset(rec, attr);
  403. }
  404. id = mi_new_attt_id(mi);
  405. memmove(Add2Ptr(attr, asize), attr, tail);
  406. memset(attr, 0, asize);
  407. attr->type = type;
  408. attr->size = cpu_to_le32(asize);
  409. attr->name_len = name_len;
  410. attr->name_off = cpu_to_le16(name_off);
  411. attr->id = id;
  412. memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
  413. rec->used = cpu_to_le32(used + asize);
  414. mi->dirty = true;
  415. return attr;
  416. }
  417. /*
  418. * mi_remove_attr - Remove the attribute from record.
  419. *
  420. * NOTE: The source attr will point to next attribute.
  421. */
  422. bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
  423. struct ATTRIB *attr)
  424. {
  425. struct MFT_REC *rec = mi->mrec;
  426. u32 aoff = PtrOffset(rec, attr);
  427. u32 used = le32_to_cpu(rec->used);
  428. u32 asize = le32_to_cpu(attr->size);
  429. if (aoff + asize > used)
  430. return false;
  431. if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
  432. u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
  433. if (!links) {
  434. /* minor error. Not critical. */
  435. } else {
  436. ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
  437. ni->mi.dirty = true;
  438. }
  439. }
  440. used -= asize;
  441. memmove(attr, Add2Ptr(attr, asize), used - aoff);
  442. rec->used = cpu_to_le32(used);
  443. mi->dirty = true;
  444. return true;
  445. }
  446. /* bytes = "new attribute size" - "old attribute size" */
  447. bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
  448. {
  449. struct MFT_REC *rec = mi->mrec;
  450. u32 aoff = PtrOffset(rec, attr);
  451. u32 total, used = le32_to_cpu(rec->used);
  452. u32 nsize, asize = le32_to_cpu(attr->size);
  453. u32 rsize = le32_to_cpu(attr->res.data_size);
  454. int tail = (int)(used - aoff - asize);
  455. int dsize;
  456. char *next;
  457. if (tail < 0 || aoff >= used)
  458. return false;
  459. if (!bytes)
  460. return true;
  461. total = le32_to_cpu(rec->total);
  462. next = Add2Ptr(attr, asize);
  463. if (bytes > 0) {
  464. dsize = ALIGN(bytes, 8);
  465. if (used + dsize > total)
  466. return false;
  467. nsize = asize + dsize;
  468. /* Move tail */
  469. memmove(next + dsize, next, tail);
  470. memset(next, 0, dsize);
  471. used += dsize;
  472. rsize += dsize;
  473. } else {
  474. dsize = ALIGN(-bytes, 8);
  475. if (dsize > asize)
  476. return false;
  477. nsize = asize - dsize;
  478. memmove(next - dsize, next, tail);
  479. used -= dsize;
  480. rsize -= dsize;
  481. }
  482. rec->used = cpu_to_le32(used);
  483. attr->size = cpu_to_le32(nsize);
  484. if (!attr->non_res)
  485. attr->res.data_size = cpu_to_le32(rsize);
  486. mi->dirty = true;
  487. return true;
  488. }
  489. /*
  490. * Pack runs in MFT record.
  491. * If failed record is not changed.
  492. */
  493. int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
  494. struct runs_tree *run, CLST len)
  495. {
  496. int err = 0;
  497. struct ntfs_sb_info *sbi = mi->sbi;
  498. u32 new_run_size;
  499. CLST plen;
  500. struct MFT_REC *rec = mi->mrec;
  501. CLST svcn = le64_to_cpu(attr->nres.svcn);
  502. u32 used = le32_to_cpu(rec->used);
  503. u32 aoff = PtrOffset(rec, attr);
  504. u32 asize = le32_to_cpu(attr->size);
  505. char *next = Add2Ptr(attr, asize);
  506. u16 run_off = le16_to_cpu(attr->nres.run_off);
  507. u32 run_size = asize - run_off;
  508. u32 tail = used - aoff - asize;
  509. u32 dsize = sbi->record_size - used;
  510. /* Make a maximum gap in current record. */
  511. memmove(next + dsize, next, tail);
  512. /* Pack as much as possible. */
  513. err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
  514. &plen);
  515. if (err < 0) {
  516. memmove(next, next + dsize, tail);
  517. return err;
  518. }
  519. new_run_size = ALIGN(err, 8);
  520. memmove(next + new_run_size - run_size, next + dsize, tail);
  521. attr->size = cpu_to_le32(asize + new_run_size - run_size);
  522. attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
  523. rec->used = cpu_to_le32(used + new_run_size - run_size);
  524. mi->dirty = true;
  525. return 0;
  526. }