null_blk_zoned.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/vmalloc.h>
  3. #include <linux/sizes.h>
  4. #include "null_blk.h"
  5. #define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
  6. static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
  7. {
  8. return sect >> ilog2(dev->zone_size_sects);
  9. }
  10. int null_zone_init(struct nullb_device *dev)
  11. {
  12. sector_t dev_capacity_sects;
  13. sector_t sector = 0;
  14. unsigned int i;
  15. if (!is_power_of_2(dev->zone_size)) {
  16. pr_err("null_blk: zone_size must be power-of-two\n");
  17. return -EINVAL;
  18. }
  19. if (dev->zone_size > dev->size) {
  20. pr_err("Zone size larger than device capacity\n");
  21. return -EINVAL;
  22. }
  23. dev_capacity_sects = MB_TO_SECTS(dev->size);
  24. dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
  25. dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
  26. if (dev_capacity_sects & (dev->zone_size_sects - 1))
  27. dev->nr_zones++;
  28. dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
  29. GFP_KERNEL | __GFP_ZERO);
  30. if (!dev->zones)
  31. return -ENOMEM;
  32. for (i = 0; i < dev->nr_zones; i++) {
  33. struct blk_zone *zone = &dev->zones[i];
  34. zone->start = zone->wp = sector;
  35. if (zone->start + dev->zone_size_sects > dev_capacity_sects)
  36. zone->len = dev_capacity_sects - zone->start;
  37. else
  38. zone->len = dev->zone_size_sects;
  39. zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
  40. zone->cond = BLK_ZONE_COND_EMPTY;
  41. sector += dev->zone_size_sects;
  42. }
  43. return 0;
  44. }
  45. void null_zone_exit(struct nullb_device *dev)
  46. {
  47. kvfree(dev->zones);
  48. dev->zones = NULL;
  49. }
  50. static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
  51. unsigned int zno, unsigned int nr_zones)
  52. {
  53. struct blk_zone_report_hdr *hdr = NULL;
  54. struct bio_vec bvec;
  55. struct bvec_iter iter;
  56. void *addr;
  57. unsigned int zones_to_cpy;
  58. bio_for_each_segment(bvec, bio, iter) {
  59. addr = kmap_atomic(bvec.bv_page);
  60. zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
  61. if (!hdr) {
  62. hdr = (struct blk_zone_report_hdr *)addr;
  63. hdr->nr_zones = nr_zones;
  64. zones_to_cpy--;
  65. addr += sizeof(struct blk_zone_report_hdr);
  66. }
  67. zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
  68. memcpy(addr, &dev->zones[zno],
  69. zones_to_cpy * sizeof(struct blk_zone));
  70. kunmap_atomic(addr);
  71. nr_zones -= zones_to_cpy;
  72. zno += zones_to_cpy;
  73. if (!nr_zones)
  74. break;
  75. }
  76. }
  77. blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
  78. {
  79. struct nullb_device *dev = nullb->dev;
  80. unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
  81. unsigned int nr_zones = dev->nr_zones - zno;
  82. unsigned int max_zones;
  83. max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
  84. nr_zones = min_t(unsigned int, nr_zones, max_zones);
  85. null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
  86. return BLK_STS_OK;
  87. }
  88. void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
  89. unsigned int nr_sectors)
  90. {
  91. struct nullb_device *dev = cmd->nq->dev;
  92. unsigned int zno = null_zone_no(dev, sector);
  93. struct blk_zone *zone = &dev->zones[zno];
  94. switch (zone->cond) {
  95. case BLK_ZONE_COND_FULL:
  96. /* Cannot write to a full zone */
  97. cmd->error = BLK_STS_IOERR;
  98. break;
  99. case BLK_ZONE_COND_EMPTY:
  100. case BLK_ZONE_COND_IMP_OPEN:
  101. /* Writes must be at the write pointer position */
  102. if (sector != zone->wp) {
  103. cmd->error = BLK_STS_IOERR;
  104. break;
  105. }
  106. if (zone->cond == BLK_ZONE_COND_EMPTY)
  107. zone->cond = BLK_ZONE_COND_IMP_OPEN;
  108. zone->wp += nr_sectors;
  109. if (zone->wp == zone->start + zone->len)
  110. zone->cond = BLK_ZONE_COND_FULL;
  111. break;
  112. default:
  113. /* Invalid zone condition */
  114. cmd->error = BLK_STS_IOERR;
  115. break;
  116. }
  117. }
  118. void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
  119. {
  120. struct nullb_device *dev = cmd->nq->dev;
  121. unsigned int zno = null_zone_no(dev, sector);
  122. struct blk_zone *zone = &dev->zones[zno];
  123. zone->cond = BLK_ZONE_COND_EMPTY;
  124. zone->wp = zone->start;
  125. }