dm-bio-prison-v1.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2011-2017 Red Hat, Inc.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #ifndef DM_BIO_PRISON_H
  8. #define DM_BIO_PRISON_H
  9. #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
  10. #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
  11. #include <linux/bio.h>
  12. #include <linux/rbtree.h>
  13. /*----------------------------------------------------------------*/
  14. /*
  15. * Sometimes we can't deal with a bio straight away. We put them in prison
  16. * where they can't cause any mischief. Bios are put in a cell identified
  17. * by a key, multiple bios can be in the same cell. When the cell is
  18. * subsequently unlocked the bios become available.
  19. */
  20. struct dm_bio_prison;
  21. /*
  22. * Keys define a range of blocks within either a virtual or physical
  23. * device.
  24. */
  25. struct dm_cell_key {
  26. int virtual;
  27. dm_thin_id dev;
  28. dm_block_t block_begin, block_end;
  29. };
  30. /*
  31. * The range of a key (block_end - block_begin) must not
  32. * exceed BIO_PRISON_MAX_RANGE. Also the range must not
  33. * cross a similarly sized boundary.
  34. *
  35. * Must be a power of 2.
  36. */
  37. #define BIO_PRISON_MAX_RANGE 1024
  38. #define BIO_PRISON_MAX_RANGE_SHIFT 10
  39. /*
  40. * Treat this as opaque, only in header so callers can manage allocation
  41. * themselves.
  42. */
  43. struct dm_bio_prison_cell {
  44. struct list_head user_list; /* for client use */
  45. struct rb_node node;
  46. struct dm_cell_key key;
  47. struct bio *holder;
  48. struct bio_list bios;
  49. };
  50. struct dm_bio_prison *dm_bio_prison_create(void);
  51. void dm_bio_prison_destroy(struct dm_bio_prison *prison);
  52. /*
  53. * These two functions just wrap a mempool. This is a transitory step:
  54. * Eventually all bio prison clients should manage their own cell memory.
  55. *
  56. * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
  57. * in interrupt context or passed GFP_NOWAIT.
  58. */
  59. struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
  60. gfp_t gfp);
  61. void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  62. struct dm_bio_prison_cell *cell);
  63. /*
  64. * Creates, or retrieves a cell that overlaps the given key.
  65. *
  66. * Returns 1 if pre-existing cell returned, zero if new cell created using
  67. * @cell_prealloc.
  68. */
  69. int dm_get_cell(struct dm_bio_prison *prison,
  70. struct dm_cell_key *key,
  71. struct dm_bio_prison_cell *cell_prealloc,
  72. struct dm_bio_prison_cell **cell_result);
  73. /*
  74. * Returns false if key is beyond BIO_PRISON_MAX_RANGE or spans a boundary.
  75. */
  76. bool dm_cell_key_has_valid_range(struct dm_cell_key *key);
  77. /*
  78. * An atomic op that combines retrieving or creating a cell, and adding a
  79. * bio to it.
  80. *
  81. * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  82. */
  83. int dm_bio_detain(struct dm_bio_prison *prison,
  84. struct dm_cell_key *key,
  85. struct bio *inmate,
  86. struct dm_bio_prison_cell *cell_prealloc,
  87. struct dm_bio_prison_cell **cell_result);
  88. void dm_cell_release(struct dm_bio_prison *prison,
  89. struct dm_bio_prison_cell *cell,
  90. struct bio_list *bios);
  91. void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  92. struct dm_bio_prison_cell *cell,
  93. struct bio_list *inmates);
  94. void dm_cell_error(struct dm_bio_prison *prison,
  95. struct dm_bio_prison_cell *cell, blk_status_t error);
  96. /*
  97. * Visits the cell and then releases. Guarantees no new inmates are
  98. * inserted between the visit and release.
  99. */
  100. void dm_cell_visit_release(struct dm_bio_prison *prison,
  101. void (*visit_fn)(void *, struct dm_bio_prison_cell *),
  102. void *context, struct dm_bio_prison_cell *cell);
  103. /*
  104. * Rather than always releasing the prisoners in a cell, the client may
  105. * want to promote one of them to be the new holder. There is a race here
  106. * though between releasing an empty cell, and other threads adding new
  107. * inmates. So this function makes the decision with its lock held.
  108. *
  109. * This function can have two outcomes:
  110. * i) An inmate is promoted to be the holder of the cell (return value of 0).
  111. * ii) The cell has no inmate for promotion and is released (return value of 1).
  112. */
  113. int dm_cell_promote_or_release(struct dm_bio_prison *prison,
  114. struct dm_bio_prison_cell *cell);
  115. /*----------------------------------------------------------------*/
  116. /*
  117. * We use the deferred set to keep track of pending reads to shared blocks.
  118. * We do this to ensure the new mapping caused by a write isn't performed
  119. * until these prior reads have completed. Otherwise the insertion of the
  120. * new mapping could free the old block that the read bios are mapped to.
  121. */
  122. struct dm_deferred_set;
  123. struct dm_deferred_entry;
  124. struct dm_deferred_set *dm_deferred_set_create(void);
  125. void dm_deferred_set_destroy(struct dm_deferred_set *ds);
  126. struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
  127. void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
  128. int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
  129. /*----------------------------------------------------------------*/
  130. #endif