zpool.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * zpool memory storage api
  4. *
  5. * Copyright (C) 2014 Dan Streetman
  6. *
  7. * This is a common frontend for memory storage pool implementations.
  8. * Typically, this is used to store compressed memory.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/list.h>
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/module.h>
  17. #include <linux/zpool.h>
  18. struct zpool {
  19. struct zpool_driver *driver;
  20. void *pool;
  21. };
  22. static LIST_HEAD(drivers_head);
  23. static DEFINE_SPINLOCK(drivers_lock);
  24. /**
  25. * zpool_register_driver() - register a zpool implementation.
  26. * @driver: driver to register
  27. */
  28. void zpool_register_driver(struct zpool_driver *driver)
  29. {
  30. spin_lock(&drivers_lock);
  31. atomic_set(&driver->refcount, 0);
  32. list_add(&driver->list, &drivers_head);
  33. spin_unlock(&drivers_lock);
  34. }
  35. EXPORT_SYMBOL(zpool_register_driver);
  36. /**
  37. * zpool_unregister_driver() - unregister a zpool implementation.
  38. * @driver: driver to unregister.
  39. *
  40. * Module usage counting is used to prevent using a driver
  41. * while/after unloading, so if this is called from module
  42. * exit function, this should never fail; if called from
  43. * other than the module exit function, and this returns
  44. * failure, the driver is in use and must remain available.
  45. */
  46. int zpool_unregister_driver(struct zpool_driver *driver)
  47. {
  48. int ret = 0, refcount;
  49. spin_lock(&drivers_lock);
  50. refcount = atomic_read(&driver->refcount);
  51. WARN_ON(refcount < 0);
  52. if (refcount > 0)
  53. ret = -EBUSY;
  54. else
  55. list_del(&driver->list);
  56. spin_unlock(&drivers_lock);
  57. return ret;
  58. }
  59. EXPORT_SYMBOL(zpool_unregister_driver);
  60. /* this assumes @type is null-terminated. */
  61. static struct zpool_driver *zpool_get_driver(const char *type)
  62. {
  63. struct zpool_driver *driver;
  64. spin_lock(&drivers_lock);
  65. list_for_each_entry(driver, &drivers_head, list) {
  66. if (!strcmp(driver->type, type)) {
  67. bool got = try_module_get(driver->owner);
  68. if (got)
  69. atomic_inc(&driver->refcount);
  70. spin_unlock(&drivers_lock);
  71. return got ? driver : NULL;
  72. }
  73. }
  74. spin_unlock(&drivers_lock);
  75. return NULL;
  76. }
  77. static void zpool_put_driver(struct zpool_driver *driver)
  78. {
  79. atomic_dec(&driver->refcount);
  80. module_put(driver->owner);
  81. }
  82. /**
  83. * zpool_has_pool() - Check if the pool driver is available
  84. * @type: The type of the zpool to check (e.g. zbud, zsmalloc)
  85. *
  86. * This checks if the @type pool driver is available. This will try to load
  87. * the requested module, if needed, but there is no guarantee the module will
  88. * still be loaded and available immediately after calling. If this returns
  89. * true, the caller should assume the pool is available, but must be prepared
  90. * to handle the @zpool_create_pool() returning failure. However if this
  91. * returns false, the caller should assume the requested pool type is not
  92. * available; either the requested pool type module does not exist, or could
  93. * not be loaded, and calling @zpool_create_pool() with the pool type will
  94. * fail.
  95. *
  96. * The @type string must be null-terminated.
  97. *
  98. * Returns: true if @type pool is available, false if not
  99. */
  100. bool zpool_has_pool(char *type)
  101. {
  102. struct zpool_driver *driver = zpool_get_driver(type);
  103. if (!driver) {
  104. request_module("zpool-%s", type);
  105. driver = zpool_get_driver(type);
  106. }
  107. if (!driver)
  108. return false;
  109. zpool_put_driver(driver);
  110. return true;
  111. }
  112. EXPORT_SYMBOL(zpool_has_pool);
  113. /**
  114. * zpool_create_pool() - Create a new zpool
  115. * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
  116. * @name: The name of the zpool (e.g. zram0, zswap)
  117. * @gfp: The GFP flags to use when allocating the pool.
  118. *
  119. * This creates a new zpool of the specified type. The gfp flags will be
  120. * used when allocating memory, if the implementation supports it. If the
  121. * ops param is NULL, then the created zpool will not be evictable.
  122. *
  123. * Implementations must guarantee this to be thread-safe.
  124. *
  125. * The @type and @name strings must be null-terminated.
  126. *
  127. * Returns: New zpool on success, NULL on failure.
  128. */
  129. struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
  130. {
  131. struct zpool_driver *driver;
  132. struct zpool *zpool;
  133. pr_debug("creating pool type %s\n", type);
  134. driver = zpool_get_driver(type);
  135. if (!driver) {
  136. request_module("zpool-%s", type);
  137. driver = zpool_get_driver(type);
  138. }
  139. if (!driver) {
  140. pr_err("no driver for type %s\n", type);
  141. return NULL;
  142. }
  143. zpool = kmalloc(sizeof(*zpool), gfp);
  144. if (!zpool) {
  145. pr_err("couldn't create zpool - out of memory\n");
  146. zpool_put_driver(driver);
  147. return NULL;
  148. }
  149. zpool->driver = driver;
  150. zpool->pool = driver->create(name, gfp);
  151. if (!zpool->pool) {
  152. pr_err("couldn't create %s pool\n", type);
  153. zpool_put_driver(driver);
  154. kfree(zpool);
  155. return NULL;
  156. }
  157. pr_debug("created pool type %s\n", type);
  158. return zpool;
  159. }
  160. /**
  161. * zpool_destroy_pool() - Destroy a zpool
  162. * @zpool: The zpool to destroy.
  163. *
  164. * Implementations must guarantee this to be thread-safe,
  165. * however only when destroying different pools. The same
  166. * pool should only be destroyed once, and should not be used
  167. * after it is destroyed.
  168. *
  169. * This destroys an existing zpool. The zpool should not be in use.
  170. */
  171. void zpool_destroy_pool(struct zpool *zpool)
  172. {
  173. pr_debug("destroying pool type %s\n", zpool->driver->type);
  174. zpool->driver->destroy(zpool->pool);
  175. zpool_put_driver(zpool->driver);
  176. kfree(zpool);
  177. }
  178. /**
  179. * zpool_get_type() - Get the type of the zpool
  180. * @zpool: The zpool to check
  181. *
  182. * This returns the type of the pool.
  183. *
  184. * Implementations must guarantee this to be thread-safe.
  185. *
  186. * Returns: The type of zpool.
  187. */
  188. const char *zpool_get_type(struct zpool *zpool)
  189. {
  190. return zpool->driver->type;
  191. }
  192. /**
  193. * zpool_malloc_support_movable() - Check if the zpool supports
  194. * allocating movable memory
  195. * @zpool: The zpool to check
  196. *
  197. * This returns if the zpool supports allocating movable memory.
  198. *
  199. * Implementations must guarantee this to be thread-safe.
  200. *
  201. * Returns: true if the zpool supports allocating movable memory, false if not
  202. */
  203. bool zpool_malloc_support_movable(struct zpool *zpool)
  204. {
  205. return zpool->driver->malloc_support_movable;
  206. }
  207. /**
  208. * zpool_malloc() - Allocate memory
  209. * @zpool: The zpool to allocate from.
  210. * @size: The amount of memory to allocate.
  211. * @gfp: The GFP flags to use when allocating memory.
  212. * @handle: Pointer to the handle to set
  213. *
  214. * This allocates the requested amount of memory from the pool.
  215. * The gfp flags will be used when allocating memory, if the
  216. * implementation supports it. The provided @handle will be
  217. * set to the allocated object handle.
  218. *
  219. * Implementations must guarantee this to be thread-safe.
  220. *
  221. * Returns: 0 on success, negative value on error.
  222. */
  223. int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
  224. unsigned long *handle)
  225. {
  226. return zpool->driver->malloc(zpool->pool, size, gfp, handle);
  227. }
  228. /**
  229. * zpool_free() - Free previously allocated memory
  230. * @zpool: The zpool that allocated the memory.
  231. * @handle: The handle to the memory to free.
  232. *
  233. * This frees previously allocated memory. This does not guarantee
  234. * that the pool will actually free memory, only that the memory
  235. * in the pool will become available for use by the pool.
  236. *
  237. * Implementations must guarantee this to be thread-safe,
  238. * however only when freeing different handles. The same
  239. * handle should only be freed once, and should not be used
  240. * after freeing.
  241. */
  242. void zpool_free(struct zpool *zpool, unsigned long handle)
  243. {
  244. zpool->driver->free(zpool->pool, handle);
  245. }
  246. /**
  247. * zpool_map_handle() - Map a previously allocated handle into memory
  248. * @zpool: The zpool that the handle was allocated from
  249. * @handle: The handle to map
  250. * @mapmode: How the memory should be mapped
  251. *
  252. * This maps a previously allocated handle into memory. The @mapmode
  253. * param indicates to the implementation how the memory will be
  254. * used, i.e. read-only, write-only, read-write. If the
  255. * implementation does not support it, the memory will be treated
  256. * as read-write.
  257. *
  258. * This may hold locks, disable interrupts, and/or preemption,
  259. * and the zpool_unmap_handle() must be called to undo those
  260. * actions. The code that uses the mapped handle should complete
  261. * its operations on the mapped handle memory quickly and unmap
  262. * as soon as possible. As the implementation may use per-cpu
  263. * data, multiple handles should not be mapped concurrently on
  264. * any cpu.
  265. *
  266. * Returns: A pointer to the handle's mapped memory area.
  267. */
  268. void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
  269. enum zpool_mapmode mapmode)
  270. {
  271. return zpool->driver->map(zpool->pool, handle, mapmode);
  272. }
  273. /**
  274. * zpool_unmap_handle() - Unmap a previously mapped handle
  275. * @zpool: The zpool that the handle was allocated from
  276. * @handle: The handle to unmap
  277. *
  278. * This unmaps a previously mapped handle. Any locks or other
  279. * actions that the implementation took in zpool_map_handle()
  280. * will be undone here. The memory area returned from
  281. * zpool_map_handle() should no longer be used after this.
  282. */
  283. void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
  284. {
  285. zpool->driver->unmap(zpool->pool, handle);
  286. }
  287. /**
  288. * zpool_get_total_pages() - The total size of the pool
  289. * @zpool: The zpool to check
  290. *
  291. * This returns the total size in pages of the pool.
  292. *
  293. * Returns: Total size of the zpool in pages.
  294. */
  295. u64 zpool_get_total_pages(struct zpool *zpool)
  296. {
  297. return zpool->driver->total_pages(zpool->pool);
  298. }
  299. /**
  300. * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
  301. * @zpool: The zpool to test
  302. *
  303. * Some allocators enter non-preemptible context in ->map() callback (e.g.
  304. * disable pagefaults) and exit that context in ->unmap(), which limits what
  305. * we can do with the mapped object. For instance, we cannot wait for
  306. * asynchronous crypto API to decompress such an object or take mutexes
  307. * since those will call into the scheduler. This function tells us whether
  308. * we use such an allocator.
  309. *
  310. * Returns: true if zpool can sleep; false otherwise.
  311. */
  312. bool zpool_can_sleep_mapped(struct zpool *zpool)
  313. {
  314. return zpool->driver->sleep_mapped;
  315. }
  316. MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
  317. MODULE_DESCRIPTION("Common API for compressed memory storage");