zcomp.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/kernel.h>
  3. #include <linux/string.h>
  4. #include <linux/err.h>
  5. #include <linux/slab.h>
  6. #include <linux/wait.h>
  7. #include <linux/sched.h>
  8. #include <linux/cpu.h>
  9. #include <linux/crypto.h>
  10. #include <linux/vmalloc.h>
  11. #include "zcomp.h"
  12. #include "backend_lzo.h"
  13. #include "backend_lzorle.h"
  14. #include "backend_lz4.h"
  15. #include "backend_lz4hc.h"
  16. #include "backend_zstd.h"
  17. #include "backend_deflate.h"
  18. #include "backend_842.h"
  19. static const struct zcomp_ops *backends[] = {
  20. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
  21. &backend_lzorle,
  22. &backend_lzo,
  23. #endif
  24. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
  25. &backend_lz4,
  26. #endif
  27. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
  28. &backend_lz4hc,
  29. #endif
  30. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
  31. &backend_zstd,
  32. #endif
  33. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
  34. &backend_deflate,
  35. #endif
  36. #if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
  37. &backend_842,
  38. #endif
  39. NULL
  40. };
  41. static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
  42. {
  43. comp->ops->destroy_ctx(&zstrm->ctx);
  44. vfree(zstrm->buffer);
  45. zstrm->buffer = NULL;
  46. }
  47. static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
  48. {
  49. int ret;
  50. ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
  51. if (ret)
  52. return ret;
  53. /*
  54. * allocate 2 pages. 1 for compressed data, plus 1 extra for the
  55. * case when compressed size is larger than the original one
  56. */
  57. zstrm->buffer = vzalloc(2 * PAGE_SIZE);
  58. if (!zstrm->buffer) {
  59. zcomp_strm_free(comp, zstrm);
  60. return -ENOMEM;
  61. }
  62. return 0;
  63. }
  64. static const struct zcomp_ops *lookup_backend_ops(const char *comp)
  65. {
  66. int i = 0;
  67. while (backends[i]) {
  68. if (sysfs_streq(comp, backends[i]->name))
  69. break;
  70. i++;
  71. }
  72. return backends[i];
  73. }
  74. bool zcomp_available_algorithm(const char *comp)
  75. {
  76. return lookup_backend_ops(comp) != NULL;
  77. }
  78. /* show available compressors */
  79. ssize_t zcomp_available_show(const char *comp, char *buf)
  80. {
  81. ssize_t sz = 0;
  82. int i;
  83. for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
  84. if (!strcmp(comp, backends[i]->name)) {
  85. sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
  86. "[%s] ", backends[i]->name);
  87. } else {
  88. sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
  89. "%s ", backends[i]->name);
  90. }
  91. }
  92. sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
  93. return sz;
  94. }
  95. struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
  96. {
  97. local_lock(&comp->stream->lock);
  98. return this_cpu_ptr(comp->stream);
  99. }
  100. void zcomp_stream_put(struct zcomp *comp)
  101. {
  102. local_unlock(&comp->stream->lock);
  103. }
  104. int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
  105. const void *src, unsigned int *dst_len)
  106. {
  107. struct zcomp_req req = {
  108. .src = src,
  109. .dst = zstrm->buffer,
  110. .src_len = PAGE_SIZE,
  111. .dst_len = 2 * PAGE_SIZE,
  112. };
  113. int ret;
  114. ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
  115. if (!ret)
  116. *dst_len = req.dst_len;
  117. return ret;
  118. }
  119. int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
  120. const void *src, unsigned int src_len, void *dst)
  121. {
  122. struct zcomp_req req = {
  123. .src = src,
  124. .dst = dst,
  125. .src_len = src_len,
  126. .dst_len = PAGE_SIZE,
  127. };
  128. return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
  129. }
  130. int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
  131. {
  132. struct zcomp *comp = hlist_entry(node, struct zcomp, node);
  133. struct zcomp_strm *zstrm;
  134. int ret;
  135. zstrm = per_cpu_ptr(comp->stream, cpu);
  136. local_lock_init(&zstrm->lock);
  137. ret = zcomp_strm_init(comp, zstrm);
  138. if (ret)
  139. pr_err("Can't allocate a compression stream\n");
  140. return ret;
  141. }
  142. int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
  143. {
  144. struct zcomp *comp = hlist_entry(node, struct zcomp, node);
  145. struct zcomp_strm *zstrm;
  146. zstrm = per_cpu_ptr(comp->stream, cpu);
  147. zcomp_strm_free(comp, zstrm);
  148. return 0;
  149. }
  150. static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
  151. {
  152. int ret;
  153. comp->stream = alloc_percpu(struct zcomp_strm);
  154. if (!comp->stream)
  155. return -ENOMEM;
  156. comp->params = params;
  157. ret = comp->ops->setup_params(comp->params);
  158. if (ret)
  159. goto cleanup;
  160. ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
  161. if (ret < 0)
  162. goto cleanup;
  163. return 0;
  164. cleanup:
  165. comp->ops->release_params(comp->params);
  166. free_percpu(comp->stream);
  167. return ret;
  168. }
  169. void zcomp_destroy(struct zcomp *comp)
  170. {
  171. cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
  172. comp->ops->release_params(comp->params);
  173. free_percpu(comp->stream);
  174. kfree(comp);
  175. }
  176. struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
  177. {
  178. struct zcomp *comp;
  179. int error;
  180. /*
  181. * The backends array has a sentinel NULL value, so the minimum
  182. * size is 1. In order to be valid the array, apart from the
  183. * sentinel NULL element, should have at least one compression
  184. * backend selected.
  185. */
  186. BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
  187. comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
  188. if (!comp)
  189. return ERR_PTR(-ENOMEM);
  190. comp->ops = lookup_backend_ops(alg);
  191. if (!comp->ops) {
  192. kfree(comp);
  193. return ERR_PTR(-EINVAL);
  194. }
  195. error = zcomp_init(comp, params);
  196. if (error) {
  197. kfree(comp);
  198. return ERR_PTR(error);
  199. }
  200. return comp;
  201. }