t10-pi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * t10_pi.c - Functions for generating and verifying T10 Protection
  4. * Information.
  5. */
  6. #include <linux/t10-pi.h>
  7. #include <linux/blk-integrity.h>
  8. #include <linux/crc-t10dif.h>
  9. #include <linux/crc64.h>
  10. #include <net/checksum.h>
  11. #include <linux/unaligned.h>
  12. #include "blk.h"
  13. struct blk_integrity_iter {
  14. void *prot_buf;
  15. void *data_buf;
  16. sector_t seed;
  17. unsigned int data_size;
  18. unsigned short interval;
  19. const char *disk_name;
  20. };
  21. static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
  22. unsigned char csum_type)
  23. {
  24. if (csum_type == BLK_INTEGRITY_CSUM_IP)
  25. return (__force __be16)ip_compute_csum(data, len);
  26. return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
  27. }
  28. /*
  29. * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
  30. * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  31. * tag.
  32. */
  33. static void t10_pi_generate(struct blk_integrity_iter *iter,
  34. struct blk_integrity *bi)
  35. {
  36. u8 offset = bi->pi_offset;
  37. unsigned int i;
  38. for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  39. struct t10_pi_tuple *pi = iter->prot_buf + offset;
  40. pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
  41. bi->csum_type);
  42. if (offset)
  43. pi->guard_tag = t10_pi_csum(pi->guard_tag,
  44. iter->prot_buf, offset, bi->csum_type);
  45. pi->app_tag = 0;
  46. if (bi->flags & BLK_INTEGRITY_REF_TAG)
  47. pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
  48. else
  49. pi->ref_tag = 0;
  50. iter->data_buf += iter->interval;
  51. iter->prot_buf += bi->tuple_size;
  52. iter->seed++;
  53. }
  54. }
  55. static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
  56. struct blk_integrity *bi)
  57. {
  58. u8 offset = bi->pi_offset;
  59. unsigned int i;
  60. for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  61. struct t10_pi_tuple *pi = iter->prot_buf + offset;
  62. __be16 csum;
  63. if (bi->flags & BLK_INTEGRITY_REF_TAG) {
  64. if (pi->app_tag == T10_PI_APP_ESCAPE)
  65. goto next;
  66. if (be32_to_cpu(pi->ref_tag) !=
  67. lower_32_bits(iter->seed)) {
  68. pr_err("%s: ref tag error at location %llu " \
  69. "(rcvd %u)\n", iter->disk_name,
  70. (unsigned long long)
  71. iter->seed, be32_to_cpu(pi->ref_tag));
  72. return BLK_STS_PROTECTION;
  73. }
  74. } else {
  75. if (pi->app_tag == T10_PI_APP_ESCAPE &&
  76. pi->ref_tag == T10_PI_REF_ESCAPE)
  77. goto next;
  78. }
  79. csum = t10_pi_csum(0, iter->data_buf, iter->interval,
  80. bi->csum_type);
  81. if (offset)
  82. csum = t10_pi_csum(csum, iter->prot_buf, offset,
  83. bi->csum_type);
  84. if (pi->guard_tag != csum) {
  85. pr_err("%s: guard tag error at sector %llu " \
  86. "(rcvd %04x, want %04x)\n", iter->disk_name,
  87. (unsigned long long)iter->seed,
  88. be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
  89. return BLK_STS_PROTECTION;
  90. }
  91. next:
  92. iter->data_buf += iter->interval;
  93. iter->prot_buf += bi->tuple_size;
  94. iter->seed++;
  95. }
  96. return BLK_STS_OK;
  97. }
  98. /**
  99. * t10_pi_type1_prepare - prepare PI prior submitting request to device
  100. * @rq: request with PI that should be prepared
  101. *
  102. * For Type 1/Type 2, the virtual start sector is the one that was
  103. * originally submitted by the block layer for the ref_tag usage. Due to
  104. * partitioning, MD/DM cloning, etc. the actual physical start sector is
  105. * likely to be different. Remap protection information to match the
  106. * physical LBA.
  107. */
  108. static void t10_pi_type1_prepare(struct request *rq)
  109. {
  110. struct blk_integrity *bi = &rq->q->limits.integrity;
  111. const int tuple_sz = bi->tuple_size;
  112. u32 ref_tag = t10_pi_ref_tag(rq);
  113. u8 offset = bi->pi_offset;
  114. struct bio *bio;
  115. __rq_for_each_bio(bio, rq) {
  116. struct bio_integrity_payload *bip = bio_integrity(bio);
  117. u32 virt = bip_get_seed(bip) & 0xffffffff;
  118. struct bio_vec iv;
  119. struct bvec_iter iter;
  120. /* Already remapped? */
  121. if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
  122. break;
  123. bip_for_each_vec(iv, bip, iter) {
  124. unsigned int j;
  125. void *p;
  126. p = bvec_kmap_local(&iv);
  127. for (j = 0; j < iv.bv_len; j += tuple_sz) {
  128. struct t10_pi_tuple *pi = p + offset;
  129. if (be32_to_cpu(pi->ref_tag) == virt)
  130. pi->ref_tag = cpu_to_be32(ref_tag);
  131. virt++;
  132. ref_tag++;
  133. p += tuple_sz;
  134. }
  135. kunmap_local(p);
  136. }
  137. bip->bip_flags |= BIP_MAPPED_INTEGRITY;
  138. }
  139. }
  140. /**
  141. * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
  142. * @rq: request with PI that should be prepared
  143. * @nr_bytes: total bytes to prepare
  144. *
  145. * For Type 1/Type 2, the virtual start sector is the one that was
  146. * originally submitted by the block layer for the ref_tag usage. Due to
  147. * partitioning, MD/DM cloning, etc. the actual physical start sector is
  148. * likely to be different. Since the physical start sector was submitted
  149. * to the device, we should remap it back to virtual values expected by the
  150. * block layer.
  151. */
  152. static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
  153. {
  154. struct blk_integrity *bi = &rq->q->limits.integrity;
  155. unsigned intervals = nr_bytes >> bi->interval_exp;
  156. const int tuple_sz = bi->tuple_size;
  157. u32 ref_tag = t10_pi_ref_tag(rq);
  158. u8 offset = bi->pi_offset;
  159. struct bio *bio;
  160. __rq_for_each_bio(bio, rq) {
  161. struct bio_integrity_payload *bip = bio_integrity(bio);
  162. u32 virt = bip_get_seed(bip) & 0xffffffff;
  163. struct bio_vec iv;
  164. struct bvec_iter iter;
  165. bip_for_each_vec(iv, bip, iter) {
  166. unsigned int j;
  167. void *p;
  168. p = bvec_kmap_local(&iv);
  169. for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
  170. struct t10_pi_tuple *pi = p + offset;
  171. if (be32_to_cpu(pi->ref_tag) == ref_tag)
  172. pi->ref_tag = cpu_to_be32(virt);
  173. virt++;
  174. ref_tag++;
  175. intervals--;
  176. p += tuple_sz;
  177. }
  178. kunmap_local(p);
  179. }
  180. }
  181. }
  182. static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
  183. {
  184. return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
  185. }
  186. static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
  187. struct blk_integrity *bi)
  188. {
  189. u8 offset = bi->pi_offset;
  190. unsigned int i;
  191. for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  192. struct crc64_pi_tuple *pi = iter->prot_buf + offset;
  193. pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
  194. if (offset)
  195. pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag),
  196. iter->prot_buf, offset);
  197. pi->app_tag = 0;
  198. if (bi->flags & BLK_INTEGRITY_REF_TAG)
  199. put_unaligned_be48(iter->seed, pi->ref_tag);
  200. else
  201. put_unaligned_be48(0ULL, pi->ref_tag);
  202. iter->data_buf += iter->interval;
  203. iter->prot_buf += bi->tuple_size;
  204. iter->seed++;
  205. }
  206. }
  207. static bool ext_pi_ref_escape(const u8 ref_tag[6])
  208. {
  209. static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  210. return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
  211. }
  212. static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
  213. struct blk_integrity *bi)
  214. {
  215. u8 offset = bi->pi_offset;
  216. unsigned int i;
  217. for (i = 0; i < iter->data_size; i += iter->interval) {
  218. struct crc64_pi_tuple *pi = iter->prot_buf + offset;
  219. u64 ref, seed;
  220. __be64 csum;
  221. if (bi->flags & BLK_INTEGRITY_REF_TAG) {
  222. if (pi->app_tag == T10_PI_APP_ESCAPE)
  223. goto next;
  224. ref = get_unaligned_be48(pi->ref_tag);
  225. seed = lower_48_bits(iter->seed);
  226. if (ref != seed) {
  227. pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
  228. iter->disk_name, seed, ref);
  229. return BLK_STS_PROTECTION;
  230. }
  231. } else {
  232. if (pi->app_tag == T10_PI_APP_ESCAPE &&
  233. ext_pi_ref_escape(pi->ref_tag))
  234. goto next;
  235. }
  236. csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
  237. if (offset)
  238. csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
  239. offset);
  240. if (pi->guard_tag != csum) {
  241. pr_err("%s: guard tag error at sector %llu " \
  242. "(rcvd %016llx, want %016llx)\n",
  243. iter->disk_name, (unsigned long long)iter->seed,
  244. be64_to_cpu(pi->guard_tag), be64_to_cpu(csum));
  245. return BLK_STS_PROTECTION;
  246. }
  247. next:
  248. iter->data_buf += iter->interval;
  249. iter->prot_buf += bi->tuple_size;
  250. iter->seed++;
  251. }
  252. return BLK_STS_OK;
  253. }
  254. static void ext_pi_type1_prepare(struct request *rq)
  255. {
  256. struct blk_integrity *bi = &rq->q->limits.integrity;
  257. const int tuple_sz = bi->tuple_size;
  258. u64 ref_tag = ext_pi_ref_tag(rq);
  259. u8 offset = bi->pi_offset;
  260. struct bio *bio;
  261. __rq_for_each_bio(bio, rq) {
  262. struct bio_integrity_payload *bip = bio_integrity(bio);
  263. u64 virt = lower_48_bits(bip_get_seed(bip));
  264. struct bio_vec iv;
  265. struct bvec_iter iter;
  266. /* Already remapped? */
  267. if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
  268. break;
  269. bip_for_each_vec(iv, bip, iter) {
  270. unsigned int j;
  271. void *p;
  272. p = bvec_kmap_local(&iv);
  273. for (j = 0; j < iv.bv_len; j += tuple_sz) {
  274. struct crc64_pi_tuple *pi = p + offset;
  275. u64 ref = get_unaligned_be48(pi->ref_tag);
  276. if (ref == virt)
  277. put_unaligned_be48(ref_tag, pi->ref_tag);
  278. virt++;
  279. ref_tag++;
  280. p += tuple_sz;
  281. }
  282. kunmap_local(p);
  283. }
  284. bip->bip_flags |= BIP_MAPPED_INTEGRITY;
  285. }
  286. }
  287. static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
  288. {
  289. struct blk_integrity *bi = &rq->q->limits.integrity;
  290. unsigned intervals = nr_bytes >> bi->interval_exp;
  291. const int tuple_sz = bi->tuple_size;
  292. u64 ref_tag = ext_pi_ref_tag(rq);
  293. u8 offset = bi->pi_offset;
  294. struct bio *bio;
  295. __rq_for_each_bio(bio, rq) {
  296. struct bio_integrity_payload *bip = bio_integrity(bio);
  297. u64 virt = lower_48_bits(bip_get_seed(bip));
  298. struct bio_vec iv;
  299. struct bvec_iter iter;
  300. bip_for_each_vec(iv, bip, iter) {
  301. unsigned int j;
  302. void *p;
  303. p = bvec_kmap_local(&iv);
  304. for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
  305. struct crc64_pi_tuple *pi = p + offset;
  306. u64 ref = get_unaligned_be48(pi->ref_tag);
  307. if (ref == ref_tag)
  308. put_unaligned_be48(virt, pi->ref_tag);
  309. virt++;
  310. ref_tag++;
  311. intervals--;
  312. p += tuple_sz;
  313. }
  314. kunmap_local(p);
  315. }
  316. }
  317. }
  318. void blk_integrity_generate(struct bio *bio)
  319. {
  320. struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
  321. struct bio_integrity_payload *bip = bio_integrity(bio);
  322. struct blk_integrity_iter iter;
  323. struct bvec_iter bviter;
  324. struct bio_vec bv;
  325. iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
  326. iter.interval = 1 << bi->interval_exp;
  327. iter.seed = bio->bi_iter.bi_sector;
  328. iter.prot_buf = bvec_virt(bip->bip_vec);
  329. bio_for_each_segment(bv, bio, bviter) {
  330. void *kaddr = bvec_kmap_local(&bv);
  331. iter.data_buf = kaddr;
  332. iter.data_size = bv.bv_len;
  333. switch (bi->csum_type) {
  334. case BLK_INTEGRITY_CSUM_CRC64:
  335. ext_pi_crc64_generate(&iter, bi);
  336. break;
  337. case BLK_INTEGRITY_CSUM_CRC:
  338. case BLK_INTEGRITY_CSUM_IP:
  339. t10_pi_generate(&iter, bi);
  340. break;
  341. default:
  342. break;
  343. }
  344. kunmap_local(kaddr);
  345. }
  346. }
  347. void blk_integrity_verify(struct bio *bio)
  348. {
  349. struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
  350. struct bio_integrity_payload *bip = bio_integrity(bio);
  351. struct blk_integrity_iter iter;
  352. struct bvec_iter bviter;
  353. struct bio_vec bv;
  354. /*
  355. * At the moment verify is called bi_iter has been advanced during split
  356. * and completion, so use the copy created during submission here.
  357. */
  358. iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
  359. iter.interval = 1 << bi->interval_exp;
  360. iter.seed = bip->bio_iter.bi_sector;
  361. iter.prot_buf = bvec_virt(bip->bip_vec);
  362. __bio_for_each_segment(bv, bio, bviter, bip->bio_iter) {
  363. void *kaddr = bvec_kmap_local(&bv);
  364. blk_status_t ret = BLK_STS_OK;
  365. iter.data_buf = kaddr;
  366. iter.data_size = bv.bv_len;
  367. switch (bi->csum_type) {
  368. case BLK_INTEGRITY_CSUM_CRC64:
  369. ret = ext_pi_crc64_verify(&iter, bi);
  370. break;
  371. case BLK_INTEGRITY_CSUM_CRC:
  372. case BLK_INTEGRITY_CSUM_IP:
  373. ret = t10_pi_verify(&iter, bi);
  374. break;
  375. default:
  376. break;
  377. }
  378. kunmap_local(kaddr);
  379. if (ret) {
  380. bio->bi_status = ret;
  381. return;
  382. }
  383. }
  384. }
  385. void blk_integrity_prepare(struct request *rq)
  386. {
  387. struct blk_integrity *bi = &rq->q->limits.integrity;
  388. if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
  389. return;
  390. if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
  391. ext_pi_type1_prepare(rq);
  392. else
  393. t10_pi_type1_prepare(rq);
  394. }
  395. void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
  396. {
  397. struct blk_integrity *bi = &rq->q->limits.integrity;
  398. if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
  399. return;
  400. if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
  401. ext_pi_type1_complete(rq, nr_bytes);
  402. else
  403. t10_pi_type1_complete(rq, nr_bytes);
  404. }