file-item.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #include <linux/bio.h>
  6. #include <linux/slab.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/highmem.h>
  9. #include "ctree.h"
  10. #include "disk-io.h"
  11. #include "transaction.h"
  12. #include "volumes.h"
  13. #include "print-tree.h"
  14. #include "compression.h"
  15. #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
  16. sizeof(struct btrfs_item) * 2) / \
  17. size) - 1))
  18. #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
  19. PAGE_SIZE))
  20. #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
  21. sizeof(struct btrfs_ordered_sum)) / \
  22. sizeof(u32) * (fs_info)->sectorsize)
  23. int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
  24. struct btrfs_root *root,
  25. u64 objectid, u64 pos,
  26. u64 disk_offset, u64 disk_num_bytes,
  27. u64 num_bytes, u64 offset, u64 ram_bytes,
  28. u8 compression, u8 encryption, u16 other_encoding)
  29. {
  30. int ret = 0;
  31. struct btrfs_file_extent_item *item;
  32. struct btrfs_key file_key;
  33. struct btrfs_path *path;
  34. struct extent_buffer *leaf;
  35. path = btrfs_alloc_path();
  36. if (!path)
  37. return -ENOMEM;
  38. file_key.objectid = objectid;
  39. file_key.offset = pos;
  40. file_key.type = BTRFS_EXTENT_DATA_KEY;
  41. path->leave_spinning = 1;
  42. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  43. sizeof(*item));
  44. if (ret < 0)
  45. goto out;
  46. BUG_ON(ret); /* Can't happen */
  47. leaf = path->nodes[0];
  48. item = btrfs_item_ptr(leaf, path->slots[0],
  49. struct btrfs_file_extent_item);
  50. btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
  51. btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
  52. btrfs_set_file_extent_offset(leaf, item, offset);
  53. btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
  54. btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
  55. btrfs_set_file_extent_generation(leaf, item, trans->transid);
  56. btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
  57. btrfs_set_file_extent_compression(leaf, item, compression);
  58. btrfs_set_file_extent_encryption(leaf, item, encryption);
  59. btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
  60. btrfs_mark_buffer_dirty(leaf);
  61. out:
  62. btrfs_free_path(path);
  63. return ret;
  64. }
  65. static struct btrfs_csum_item *
  66. btrfs_lookup_csum(struct btrfs_trans_handle *trans,
  67. struct btrfs_root *root,
  68. struct btrfs_path *path,
  69. u64 bytenr, int cow)
  70. {
  71. struct btrfs_fs_info *fs_info = root->fs_info;
  72. int ret;
  73. struct btrfs_key file_key;
  74. struct btrfs_key found_key;
  75. struct btrfs_csum_item *item;
  76. struct extent_buffer *leaf;
  77. u64 csum_offset = 0;
  78. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  79. int csums_in_item;
  80. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  81. file_key.offset = bytenr;
  82. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  83. ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
  84. if (ret < 0)
  85. goto fail;
  86. leaf = path->nodes[0];
  87. if (ret > 0) {
  88. ret = 1;
  89. if (path->slots[0] == 0)
  90. goto fail;
  91. path->slots[0]--;
  92. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  93. if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
  94. goto fail;
  95. csum_offset = (bytenr - found_key.offset) >>
  96. fs_info->sb->s_blocksize_bits;
  97. csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
  98. csums_in_item /= csum_size;
  99. if (csum_offset == csums_in_item) {
  100. ret = -EFBIG;
  101. goto fail;
  102. } else if (csum_offset > csums_in_item) {
  103. goto fail;
  104. }
  105. }
  106. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  107. item = (struct btrfs_csum_item *)((unsigned char *)item +
  108. csum_offset * csum_size);
  109. return item;
  110. fail:
  111. if (ret > 0)
  112. ret = -ENOENT;
  113. return ERR_PTR(ret);
  114. }
  115. int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
  116. struct btrfs_root *root,
  117. struct btrfs_path *path, u64 objectid,
  118. u64 offset, int mod)
  119. {
  120. int ret;
  121. struct btrfs_key file_key;
  122. int ins_len = mod < 0 ? -1 : 0;
  123. int cow = mod != 0;
  124. file_key.objectid = objectid;
  125. file_key.offset = offset;
  126. file_key.type = BTRFS_EXTENT_DATA_KEY;
  127. ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
  128. return ret;
  129. }
  130. static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
  131. {
  132. kfree(bio->csum_allocated);
  133. }
  134. static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
  135. u64 logical_offset, u32 *dst, int dio)
  136. {
  137. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  138. struct bio_vec bvec;
  139. struct bvec_iter iter;
  140. struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
  141. struct btrfs_csum_item *item = NULL;
  142. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  143. struct btrfs_path *path;
  144. u8 *csum;
  145. u64 offset = 0;
  146. u64 item_start_offset = 0;
  147. u64 item_last_offset = 0;
  148. u64 disk_bytenr;
  149. u64 page_bytes_left;
  150. u32 diff;
  151. int nblocks;
  152. int count = 0;
  153. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  154. path = btrfs_alloc_path();
  155. if (!path)
  156. return BLK_STS_RESOURCE;
  157. nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
  158. if (!dst) {
  159. if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
  160. btrfs_bio->csum_allocated = kmalloc_array(nblocks,
  161. csum_size, GFP_NOFS);
  162. if (!btrfs_bio->csum_allocated) {
  163. btrfs_free_path(path);
  164. return BLK_STS_RESOURCE;
  165. }
  166. btrfs_bio->csum = btrfs_bio->csum_allocated;
  167. btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
  168. } else {
  169. btrfs_bio->csum = btrfs_bio->csum_inline;
  170. }
  171. csum = btrfs_bio->csum;
  172. } else {
  173. csum = (u8 *)dst;
  174. }
  175. if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
  176. path->reada = READA_FORWARD;
  177. /*
  178. * the free space stuff is only read when it hasn't been
  179. * updated in the current transaction. So, we can safely
  180. * read from the commit root and sidestep a nasty deadlock
  181. * between reading the free space cache and updating the csum tree.
  182. */
  183. if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
  184. path->search_commit_root = 1;
  185. path->skip_locking = 1;
  186. }
  187. disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
  188. if (dio)
  189. offset = logical_offset;
  190. bio_for_each_segment(bvec, bio, iter) {
  191. page_bytes_left = bvec.bv_len;
  192. if (count)
  193. goto next;
  194. if (!dio)
  195. offset = page_offset(bvec.bv_page) + bvec.bv_offset;
  196. count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
  197. (u32 *)csum, nblocks);
  198. if (count)
  199. goto found;
  200. if (!item || disk_bytenr < item_start_offset ||
  201. disk_bytenr >= item_last_offset) {
  202. struct btrfs_key found_key;
  203. u32 item_size;
  204. if (item)
  205. btrfs_release_path(path);
  206. item = btrfs_lookup_csum(NULL, fs_info->csum_root,
  207. path, disk_bytenr, 0);
  208. if (IS_ERR(item)) {
  209. count = 1;
  210. memset(csum, 0, csum_size);
  211. if (BTRFS_I(inode)->root->root_key.objectid ==
  212. BTRFS_DATA_RELOC_TREE_OBJECTID) {
  213. set_extent_bits(io_tree, offset,
  214. offset + fs_info->sectorsize - 1,
  215. EXTENT_NODATASUM);
  216. } else {
  217. btrfs_info_rl(fs_info,
  218. "no csum found for inode %llu start %llu",
  219. btrfs_ino(BTRFS_I(inode)), offset);
  220. }
  221. item = NULL;
  222. btrfs_release_path(path);
  223. goto found;
  224. }
  225. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  226. path->slots[0]);
  227. item_start_offset = found_key.offset;
  228. item_size = btrfs_item_size_nr(path->nodes[0],
  229. path->slots[0]);
  230. item_last_offset = item_start_offset +
  231. (item_size / csum_size) *
  232. fs_info->sectorsize;
  233. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  234. struct btrfs_csum_item);
  235. }
  236. /*
  237. * this byte range must be able to fit inside
  238. * a single leaf so it will also fit inside a u32
  239. */
  240. diff = disk_bytenr - item_start_offset;
  241. diff = diff / fs_info->sectorsize;
  242. diff = diff * csum_size;
  243. count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
  244. inode->i_sb->s_blocksize_bits);
  245. read_extent_buffer(path->nodes[0], csum,
  246. ((unsigned long)item) + diff,
  247. csum_size * count);
  248. found:
  249. csum += count * csum_size;
  250. nblocks -= count;
  251. next:
  252. while (count > 0) {
  253. count--;
  254. disk_bytenr += fs_info->sectorsize;
  255. offset += fs_info->sectorsize;
  256. page_bytes_left -= fs_info->sectorsize;
  257. if (!page_bytes_left)
  258. break; /* move to next bio */
  259. }
  260. }
  261. WARN_ON_ONCE(count);
  262. btrfs_free_path(path);
  263. return 0;
  264. }
  265. blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
  266. {
  267. return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
  268. }
  269. blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
  270. {
  271. return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
  272. }
  273. int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
  274. struct list_head *list, int search_commit)
  275. {
  276. struct btrfs_fs_info *fs_info = root->fs_info;
  277. struct btrfs_key key;
  278. struct btrfs_path *path;
  279. struct extent_buffer *leaf;
  280. struct btrfs_ordered_sum *sums;
  281. struct btrfs_csum_item *item;
  282. LIST_HEAD(tmplist);
  283. unsigned long offset;
  284. int ret;
  285. size_t size;
  286. u64 csum_end;
  287. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  288. ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
  289. IS_ALIGNED(end + 1, fs_info->sectorsize));
  290. path = btrfs_alloc_path();
  291. if (!path)
  292. return -ENOMEM;
  293. if (search_commit) {
  294. path->skip_locking = 1;
  295. path->reada = READA_FORWARD;
  296. path->search_commit_root = 1;
  297. }
  298. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  299. key.offset = start;
  300. key.type = BTRFS_EXTENT_CSUM_KEY;
  301. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  302. if (ret < 0)
  303. goto fail;
  304. if (ret > 0 && path->slots[0] > 0) {
  305. leaf = path->nodes[0];
  306. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  307. if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
  308. key.type == BTRFS_EXTENT_CSUM_KEY) {
  309. offset = (start - key.offset) >>
  310. fs_info->sb->s_blocksize_bits;
  311. if (offset * csum_size <
  312. btrfs_item_size_nr(leaf, path->slots[0] - 1))
  313. path->slots[0]--;
  314. }
  315. }
  316. while (start <= end) {
  317. leaf = path->nodes[0];
  318. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  319. ret = btrfs_next_leaf(root, path);
  320. if (ret < 0)
  321. goto fail;
  322. if (ret > 0)
  323. break;
  324. leaf = path->nodes[0];
  325. }
  326. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  327. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  328. key.type != BTRFS_EXTENT_CSUM_KEY ||
  329. key.offset > end)
  330. break;
  331. if (key.offset > start)
  332. start = key.offset;
  333. size = btrfs_item_size_nr(leaf, path->slots[0]);
  334. csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
  335. if (csum_end <= start) {
  336. path->slots[0]++;
  337. continue;
  338. }
  339. csum_end = min(csum_end, end + 1);
  340. item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  341. struct btrfs_csum_item);
  342. while (start < csum_end) {
  343. size = min_t(size_t, csum_end - start,
  344. MAX_ORDERED_SUM_BYTES(fs_info));
  345. sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
  346. GFP_NOFS);
  347. if (!sums) {
  348. ret = -ENOMEM;
  349. goto fail;
  350. }
  351. sums->bytenr = start;
  352. sums->len = (int)size;
  353. offset = (start - key.offset) >>
  354. fs_info->sb->s_blocksize_bits;
  355. offset *= csum_size;
  356. size >>= fs_info->sb->s_blocksize_bits;
  357. read_extent_buffer(path->nodes[0],
  358. sums->sums,
  359. ((unsigned long)item) + offset,
  360. csum_size * size);
  361. start += fs_info->sectorsize * size;
  362. list_add_tail(&sums->list, &tmplist);
  363. }
  364. path->slots[0]++;
  365. }
  366. ret = 0;
  367. fail:
  368. while (ret < 0 && !list_empty(&tmplist)) {
  369. sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
  370. list_del(&sums->list);
  371. kfree(sums);
  372. }
  373. list_splice_tail(&tmplist, list);
  374. btrfs_free_path(path);
  375. return ret;
  376. }
  377. blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
  378. u64 file_start, int contig)
  379. {
  380. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  381. struct btrfs_ordered_sum *sums;
  382. struct btrfs_ordered_extent *ordered = NULL;
  383. char *data;
  384. struct bvec_iter iter;
  385. struct bio_vec bvec;
  386. int index;
  387. int nr_sectors;
  388. unsigned long total_bytes = 0;
  389. unsigned long this_sum_bytes = 0;
  390. int i;
  391. u64 offset;
  392. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
  393. GFP_NOFS);
  394. if (!sums)
  395. return BLK_STS_RESOURCE;
  396. sums->len = bio->bi_iter.bi_size;
  397. INIT_LIST_HEAD(&sums->list);
  398. if (contig)
  399. offset = file_start;
  400. else
  401. offset = 0; /* shut up gcc */
  402. sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
  403. index = 0;
  404. bio_for_each_segment(bvec, bio, iter) {
  405. if (!contig)
  406. offset = page_offset(bvec.bv_page) + bvec.bv_offset;
  407. if (!ordered) {
  408. ordered = btrfs_lookup_ordered_extent(inode, offset);
  409. BUG_ON(!ordered); /* Logic error */
  410. }
  411. data = kmap_atomic(bvec.bv_page);
  412. nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
  413. bvec.bv_len + fs_info->sectorsize
  414. - 1);
  415. for (i = 0; i < nr_sectors; i++) {
  416. if (offset >= ordered->file_offset + ordered->len ||
  417. offset < ordered->file_offset) {
  418. unsigned long bytes_left;
  419. kunmap_atomic(data);
  420. sums->len = this_sum_bytes;
  421. this_sum_bytes = 0;
  422. btrfs_add_ordered_sum(inode, ordered, sums);
  423. btrfs_put_ordered_extent(ordered);
  424. bytes_left = bio->bi_iter.bi_size - total_bytes;
  425. sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
  426. GFP_NOFS);
  427. BUG_ON(!sums); /* -ENOMEM */
  428. sums->len = bytes_left;
  429. ordered = btrfs_lookup_ordered_extent(inode,
  430. offset);
  431. ASSERT(ordered); /* Logic error */
  432. sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
  433. + total_bytes;
  434. index = 0;
  435. data = kmap_atomic(bvec.bv_page);
  436. }
  437. sums->sums[index] = ~(u32)0;
  438. sums->sums[index]
  439. = btrfs_csum_data(data + bvec.bv_offset
  440. + (i * fs_info->sectorsize),
  441. sums->sums[index],
  442. fs_info->sectorsize);
  443. btrfs_csum_final(sums->sums[index],
  444. (char *)(sums->sums + index));
  445. index++;
  446. offset += fs_info->sectorsize;
  447. this_sum_bytes += fs_info->sectorsize;
  448. total_bytes += fs_info->sectorsize;
  449. }
  450. kunmap_atomic(data);
  451. }
  452. this_sum_bytes = 0;
  453. btrfs_add_ordered_sum(inode, ordered, sums);
  454. btrfs_put_ordered_extent(ordered);
  455. return 0;
  456. }
  457. /*
  458. * helper function for csum removal, this expects the
  459. * key to describe the csum pointed to by the path, and it expects
  460. * the csum to overlap the range [bytenr, len]
  461. *
  462. * The csum should not be entirely contained in the range and the
  463. * range should not be entirely contained in the csum.
  464. *
  465. * This calls btrfs_truncate_item with the correct args based on the
  466. * overlap, and fixes up the key as required.
  467. */
  468. static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
  469. struct btrfs_path *path,
  470. struct btrfs_key *key,
  471. u64 bytenr, u64 len)
  472. {
  473. struct extent_buffer *leaf;
  474. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  475. u64 csum_end;
  476. u64 end_byte = bytenr + len;
  477. u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
  478. leaf = path->nodes[0];
  479. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  480. csum_end <<= fs_info->sb->s_blocksize_bits;
  481. csum_end += key->offset;
  482. if (key->offset < bytenr && csum_end <= end_byte) {
  483. /*
  484. * [ bytenr - len ]
  485. * [ ]
  486. * [csum ]
  487. * A simple truncate off the end of the item
  488. */
  489. u32 new_size = (bytenr - key->offset) >> blocksize_bits;
  490. new_size *= csum_size;
  491. btrfs_truncate_item(fs_info, path, new_size, 1);
  492. } else if (key->offset >= bytenr && csum_end > end_byte &&
  493. end_byte > key->offset) {
  494. /*
  495. * [ bytenr - len ]
  496. * [ ]
  497. * [csum ]
  498. * we need to truncate from the beginning of the csum
  499. */
  500. u32 new_size = (csum_end - end_byte) >> blocksize_bits;
  501. new_size *= csum_size;
  502. btrfs_truncate_item(fs_info, path, new_size, 0);
  503. key->offset = end_byte;
  504. btrfs_set_item_key_safe(fs_info, path, key);
  505. } else {
  506. BUG();
  507. }
  508. }
  509. /*
  510. * deletes the csum items from the csum tree for a given
  511. * range of bytes.
  512. */
  513. int btrfs_del_csums(struct btrfs_trans_handle *trans,
  514. struct btrfs_root *root, u64 bytenr, u64 len)
  515. {
  516. struct btrfs_fs_info *fs_info = trans->fs_info;
  517. struct btrfs_path *path;
  518. struct btrfs_key key;
  519. u64 end_byte = bytenr + len;
  520. u64 csum_end;
  521. struct extent_buffer *leaf;
  522. int ret;
  523. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  524. int blocksize_bits = fs_info->sb->s_blocksize_bits;
  525. ASSERT(root == fs_info->csum_root ||
  526. root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
  527. path = btrfs_alloc_path();
  528. if (!path)
  529. return -ENOMEM;
  530. while (1) {
  531. key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  532. key.offset = end_byte - 1;
  533. key.type = BTRFS_EXTENT_CSUM_KEY;
  534. path->leave_spinning = 1;
  535. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  536. if (ret > 0) {
  537. if (path->slots[0] == 0)
  538. break;
  539. path->slots[0]--;
  540. } else if (ret < 0) {
  541. break;
  542. }
  543. leaf = path->nodes[0];
  544. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  545. if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  546. key.type != BTRFS_EXTENT_CSUM_KEY) {
  547. break;
  548. }
  549. if (key.offset >= end_byte)
  550. break;
  551. csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
  552. csum_end <<= blocksize_bits;
  553. csum_end += key.offset;
  554. /* this csum ends before we start, we're done */
  555. if (csum_end <= bytenr)
  556. break;
  557. /* delete the entire item, it is inside our range */
  558. if (key.offset >= bytenr && csum_end <= end_byte) {
  559. int del_nr = 1;
  560. /*
  561. * Check how many csum items preceding this one in this
  562. * leaf correspond to our range and then delete them all
  563. * at once.
  564. */
  565. if (key.offset > bytenr && path->slots[0] > 0) {
  566. int slot = path->slots[0] - 1;
  567. while (slot >= 0) {
  568. struct btrfs_key pk;
  569. btrfs_item_key_to_cpu(leaf, &pk, slot);
  570. if (pk.offset < bytenr ||
  571. pk.type != BTRFS_EXTENT_CSUM_KEY ||
  572. pk.objectid !=
  573. BTRFS_EXTENT_CSUM_OBJECTID)
  574. break;
  575. path->slots[0] = slot;
  576. del_nr++;
  577. key.offset = pk.offset;
  578. slot--;
  579. }
  580. }
  581. ret = btrfs_del_items(trans, root, path,
  582. path->slots[0], del_nr);
  583. if (ret)
  584. goto out;
  585. if (key.offset == bytenr)
  586. break;
  587. } else if (key.offset < bytenr && csum_end > end_byte) {
  588. unsigned long offset;
  589. unsigned long shift_len;
  590. unsigned long item_offset;
  591. /*
  592. * [ bytenr - len ]
  593. * [csum ]
  594. *
  595. * Our bytes are in the middle of the csum,
  596. * we need to split this item and insert a new one.
  597. *
  598. * But we can't drop the path because the
  599. * csum could change, get removed, extended etc.
  600. *
  601. * The trick here is the max size of a csum item leaves
  602. * enough room in the tree block for a single
  603. * item header. So, we split the item in place,
  604. * adding a new header pointing to the existing
  605. * bytes. Then we loop around again and we have
  606. * a nicely formed csum item that we can neatly
  607. * truncate.
  608. */
  609. offset = (bytenr - key.offset) >> blocksize_bits;
  610. offset *= csum_size;
  611. shift_len = (len >> blocksize_bits) * csum_size;
  612. item_offset = btrfs_item_ptr_offset(leaf,
  613. path->slots[0]);
  614. memzero_extent_buffer(leaf, item_offset + offset,
  615. shift_len);
  616. key.offset = bytenr;
  617. /*
  618. * btrfs_split_item returns -EAGAIN when the
  619. * item changed size or key
  620. */
  621. ret = btrfs_split_item(trans, root, path, &key, offset);
  622. if (ret && ret != -EAGAIN) {
  623. btrfs_abort_transaction(trans, ret);
  624. goto out;
  625. }
  626. key.offset = end_byte - 1;
  627. } else {
  628. truncate_one_csum(fs_info, path, &key, bytenr, len);
  629. if (key.offset < bytenr)
  630. break;
  631. }
  632. btrfs_release_path(path);
  633. }
  634. ret = 0;
  635. out:
  636. btrfs_free_path(path);
  637. return ret;
  638. }
  639. int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
  640. struct btrfs_root *root,
  641. struct btrfs_ordered_sum *sums)
  642. {
  643. struct btrfs_fs_info *fs_info = root->fs_info;
  644. struct btrfs_key file_key;
  645. struct btrfs_key found_key;
  646. struct btrfs_path *path;
  647. struct btrfs_csum_item *item;
  648. struct btrfs_csum_item *item_end;
  649. struct extent_buffer *leaf = NULL;
  650. u64 next_offset;
  651. u64 total_bytes = 0;
  652. u64 csum_offset;
  653. u64 bytenr;
  654. u32 nritems;
  655. u32 ins_size;
  656. int index = 0;
  657. int found_next;
  658. int ret;
  659. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  660. path = btrfs_alloc_path();
  661. if (!path)
  662. return -ENOMEM;
  663. again:
  664. next_offset = (u64)-1;
  665. found_next = 0;
  666. bytenr = sums->bytenr + total_bytes;
  667. file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
  668. file_key.offset = bytenr;
  669. file_key.type = BTRFS_EXTENT_CSUM_KEY;
  670. item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
  671. if (!IS_ERR(item)) {
  672. ret = 0;
  673. leaf = path->nodes[0];
  674. item_end = btrfs_item_ptr(leaf, path->slots[0],
  675. struct btrfs_csum_item);
  676. item_end = (struct btrfs_csum_item *)((char *)item_end +
  677. btrfs_item_size_nr(leaf, path->slots[0]));
  678. goto found;
  679. }
  680. ret = PTR_ERR(item);
  681. if (ret != -EFBIG && ret != -ENOENT)
  682. goto fail_unlock;
  683. if (ret == -EFBIG) {
  684. u32 item_size;
  685. /* we found one, but it isn't big enough yet */
  686. leaf = path->nodes[0];
  687. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  688. if ((item_size / csum_size) >=
  689. MAX_CSUM_ITEMS(fs_info, csum_size)) {
  690. /* already at max size, make a new one */
  691. goto insert;
  692. }
  693. } else {
  694. int slot = path->slots[0] + 1;
  695. /* we didn't find a csum item, insert one */
  696. nritems = btrfs_header_nritems(path->nodes[0]);
  697. if (!nritems || (path->slots[0] >= nritems - 1)) {
  698. ret = btrfs_next_leaf(root, path);
  699. if (ret < 0) {
  700. goto out;
  701. } else if (ret > 0) {
  702. found_next = 1;
  703. goto insert;
  704. }
  705. slot = path->slots[0];
  706. }
  707. btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
  708. if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  709. found_key.type != BTRFS_EXTENT_CSUM_KEY) {
  710. found_next = 1;
  711. goto insert;
  712. }
  713. next_offset = found_key.offset;
  714. found_next = 1;
  715. goto insert;
  716. }
  717. /*
  718. * at this point, we know the tree has an item, but it isn't big
  719. * enough yet to put our csum in. Grow it
  720. */
  721. btrfs_release_path(path);
  722. ret = btrfs_search_slot(trans, root, &file_key, path,
  723. csum_size, 1);
  724. if (ret < 0)
  725. goto fail_unlock;
  726. if (ret > 0) {
  727. if (path->slots[0] == 0)
  728. goto insert;
  729. path->slots[0]--;
  730. }
  731. leaf = path->nodes[0];
  732. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  733. csum_offset = (bytenr - found_key.offset) >>
  734. fs_info->sb->s_blocksize_bits;
  735. if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
  736. found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
  737. csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
  738. goto insert;
  739. }
  740. if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
  741. csum_size) {
  742. int extend_nr;
  743. u64 tmp;
  744. u32 diff;
  745. u32 free_space;
  746. if (btrfs_leaf_free_space(fs_info, leaf) <
  747. sizeof(struct btrfs_item) + csum_size * 2)
  748. goto insert;
  749. free_space = btrfs_leaf_free_space(fs_info, leaf) -
  750. sizeof(struct btrfs_item) - csum_size;
  751. tmp = sums->len - total_bytes;
  752. tmp >>= fs_info->sb->s_blocksize_bits;
  753. WARN_ON(tmp < 1);
  754. extend_nr = max_t(int, 1, (int)tmp);
  755. diff = (csum_offset + extend_nr) * csum_size;
  756. diff = min(diff,
  757. MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
  758. diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
  759. diff = min(free_space, diff);
  760. diff /= csum_size;
  761. diff *= csum_size;
  762. btrfs_extend_item(fs_info, path, diff);
  763. ret = 0;
  764. goto csum;
  765. }
  766. insert:
  767. btrfs_release_path(path);
  768. csum_offset = 0;
  769. if (found_next) {
  770. u64 tmp;
  771. tmp = sums->len - total_bytes;
  772. tmp >>= fs_info->sb->s_blocksize_bits;
  773. tmp = min(tmp, (next_offset - file_key.offset) >>
  774. fs_info->sb->s_blocksize_bits);
  775. tmp = max_t(u64, 1, tmp);
  776. tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
  777. ins_size = csum_size * tmp;
  778. } else {
  779. ins_size = csum_size;
  780. }
  781. path->leave_spinning = 1;
  782. ret = btrfs_insert_empty_item(trans, root, path, &file_key,
  783. ins_size);
  784. path->leave_spinning = 0;
  785. if (ret < 0)
  786. goto fail_unlock;
  787. if (WARN_ON(ret != 0))
  788. goto fail_unlock;
  789. leaf = path->nodes[0];
  790. csum:
  791. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
  792. item_end = (struct btrfs_csum_item *)((unsigned char *)item +
  793. btrfs_item_size_nr(leaf, path->slots[0]));
  794. item = (struct btrfs_csum_item *)((unsigned char *)item +
  795. csum_offset * csum_size);
  796. found:
  797. ins_size = (u32)(sums->len - total_bytes) >>
  798. fs_info->sb->s_blocksize_bits;
  799. ins_size *= csum_size;
  800. ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
  801. ins_size);
  802. write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
  803. ins_size);
  804. ins_size /= csum_size;
  805. total_bytes += ins_size * fs_info->sectorsize;
  806. index += ins_size;
  807. btrfs_mark_buffer_dirty(path->nodes[0]);
  808. if (total_bytes < sums->len) {
  809. btrfs_release_path(path);
  810. cond_resched();
  811. goto again;
  812. }
  813. out:
  814. btrfs_free_path(path);
  815. return ret;
  816. fail_unlock:
  817. goto out;
  818. }
  819. void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
  820. const struct btrfs_path *path,
  821. struct btrfs_file_extent_item *fi,
  822. const bool new_inline,
  823. struct extent_map *em)
  824. {
  825. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  826. struct btrfs_root *root = inode->root;
  827. struct extent_buffer *leaf = path->nodes[0];
  828. const int slot = path->slots[0];
  829. struct btrfs_key key;
  830. u64 extent_start, extent_end;
  831. u64 bytenr;
  832. u8 type = btrfs_file_extent_type(leaf, fi);
  833. int compress_type = btrfs_file_extent_compression(leaf, fi);
  834. em->bdev = fs_info->fs_devices->latest_bdev;
  835. btrfs_item_key_to_cpu(leaf, &key, slot);
  836. extent_start = key.offset;
  837. if (type == BTRFS_FILE_EXTENT_REG ||
  838. type == BTRFS_FILE_EXTENT_PREALLOC) {
  839. extent_end = extent_start +
  840. btrfs_file_extent_num_bytes(leaf, fi);
  841. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  842. size_t size;
  843. size = btrfs_file_extent_ram_bytes(leaf, fi);
  844. extent_end = ALIGN(extent_start + size,
  845. fs_info->sectorsize);
  846. }
  847. em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
  848. if (type == BTRFS_FILE_EXTENT_REG ||
  849. type == BTRFS_FILE_EXTENT_PREALLOC) {
  850. em->start = extent_start;
  851. em->len = extent_end - extent_start;
  852. em->orig_start = extent_start -
  853. btrfs_file_extent_offset(leaf, fi);
  854. em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
  855. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  856. if (bytenr == 0) {
  857. em->block_start = EXTENT_MAP_HOLE;
  858. return;
  859. }
  860. if (compress_type != BTRFS_COMPRESS_NONE) {
  861. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  862. em->compress_type = compress_type;
  863. em->block_start = bytenr;
  864. em->block_len = em->orig_block_len;
  865. } else {
  866. bytenr += btrfs_file_extent_offset(leaf, fi);
  867. em->block_start = bytenr;
  868. em->block_len = em->len;
  869. if (type == BTRFS_FILE_EXTENT_PREALLOC)
  870. set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
  871. }
  872. } else if (type == BTRFS_FILE_EXTENT_INLINE) {
  873. em->block_start = EXTENT_MAP_INLINE;
  874. em->start = extent_start;
  875. em->len = extent_end - extent_start;
  876. /*
  877. * Initialize orig_start and block_len with the same values
  878. * as in inode.c:btrfs_get_extent().
  879. */
  880. em->orig_start = EXTENT_MAP_HOLE;
  881. em->block_len = (u64)-1;
  882. if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
  883. set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  884. em->compress_type = compress_type;
  885. }
  886. } else {
  887. btrfs_err(fs_info,
  888. "unknown file extent item type %d, inode %llu, offset %llu, "
  889. "root %llu", type, btrfs_ino(inode), extent_start,
  890. root->root_key.objectid);
  891. }
  892. }