scompress.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Synchronous Compression operations
  3. *
  4. * Copyright 2015 LG Electronics Inc.
  5. * Copyright (c) 2016, Intel Corporation
  6. * Author: Giovanni Cabiddu <[email protected]>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/slab.h>
  19. #include <linux/string.h>
  20. #include <linux/crypto.h>
  21. #include <linux/vmalloc.h>
  22. #include <crypto/algapi.h>
  23. #include <linux/cryptouser.h>
  24. #include <net/netlink.h>
  25. #include <linux/scatterlist.h>
  26. #include <crypto/scatterwalk.h>
  27. #include <crypto/internal/acompress.h>
  28. #include <crypto/internal/scompress.h>
  29. #include "internal.h"
  30. static const struct crypto_type crypto_scomp_type;
  31. static void * __percpu *scomp_src_scratches;
  32. static void * __percpu *scomp_dst_scratches;
  33. static int scomp_scratch_users;
  34. static DEFINE_MUTEX(scomp_lock);
  35. #ifdef CONFIG_NET
  36. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  37. {
  38. struct crypto_report_comp rscomp;
  39. strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
  40. if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
  41. sizeof(struct crypto_report_comp), &rscomp))
  42. goto nla_put_failure;
  43. return 0;
  44. nla_put_failure:
  45. return -EMSGSIZE;
  46. }
  47. #else
  48. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  49. {
  50. return -ENOSYS;
  51. }
  52. #endif
  53. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  54. __attribute__ ((unused));
  55. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  56. {
  57. seq_puts(m, "type : scomp\n");
  58. }
  59. static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
  60. {
  61. return 0;
  62. }
  63. static void crypto_scomp_free_scratches(void * __percpu *scratches)
  64. {
  65. int i;
  66. if (!scratches)
  67. return;
  68. for_each_possible_cpu(i)
  69. vfree(*per_cpu_ptr(scratches, i));
  70. free_percpu(scratches);
  71. }
  72. static void * __percpu *crypto_scomp_alloc_scratches(void)
  73. {
  74. void * __percpu *scratches;
  75. int i;
  76. scratches = alloc_percpu(void *);
  77. if (!scratches)
  78. return NULL;
  79. for_each_possible_cpu(i) {
  80. void *scratch;
  81. scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
  82. if (!scratch)
  83. goto error;
  84. *per_cpu_ptr(scratches, i) = scratch;
  85. }
  86. return scratches;
  87. error:
  88. crypto_scomp_free_scratches(scratches);
  89. return NULL;
  90. }
  91. static void crypto_scomp_free_all_scratches(void)
  92. {
  93. if (!--scomp_scratch_users) {
  94. crypto_scomp_free_scratches(scomp_src_scratches);
  95. crypto_scomp_free_scratches(scomp_dst_scratches);
  96. scomp_src_scratches = NULL;
  97. scomp_dst_scratches = NULL;
  98. }
  99. }
  100. static int crypto_scomp_alloc_all_scratches(void)
  101. {
  102. if (!scomp_scratch_users++) {
  103. scomp_src_scratches = crypto_scomp_alloc_scratches();
  104. if (!scomp_src_scratches)
  105. return -ENOMEM;
  106. scomp_dst_scratches = crypto_scomp_alloc_scratches();
  107. if (!scomp_dst_scratches)
  108. return -ENOMEM;
  109. }
  110. return 0;
  111. }
  112. static void crypto_scomp_sg_free(struct scatterlist *sgl)
  113. {
  114. int i, n;
  115. struct page *page;
  116. if (!sgl)
  117. return;
  118. n = sg_nents(sgl);
  119. for_each_sg(sgl, sgl, n, i) {
  120. page = sg_page(sgl);
  121. if (page)
  122. __free_page(page);
  123. }
  124. kfree(sgl);
  125. }
  126. static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
  127. {
  128. struct scatterlist *sgl;
  129. struct page *page;
  130. int i, n;
  131. n = ((size - 1) >> PAGE_SHIFT) + 1;
  132. sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
  133. if (!sgl)
  134. return NULL;
  135. sg_init_table(sgl, n);
  136. for (i = 0; i < n; i++) {
  137. page = alloc_page(gfp);
  138. if (!page)
  139. goto err;
  140. sg_set_page(sgl + i, page, PAGE_SIZE, 0);
  141. }
  142. return sgl;
  143. err:
  144. sg_mark_end(sgl + i);
  145. crypto_scomp_sg_free(sgl);
  146. return NULL;
  147. }
  148. static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
  149. {
  150. struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
  151. void **tfm_ctx = acomp_tfm_ctx(tfm);
  152. struct crypto_scomp *scomp = *tfm_ctx;
  153. void **ctx = acomp_request_ctx(req);
  154. const int cpu = get_cpu();
  155. u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
  156. u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
  157. int ret;
  158. if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
  159. ret = -EINVAL;
  160. goto out;
  161. }
  162. if (req->dst && !req->dlen) {
  163. ret = -EINVAL;
  164. goto out;
  165. }
  166. if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
  167. req->dlen = SCOMP_SCRATCH_SIZE;
  168. scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
  169. if (dir)
  170. ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
  171. scratch_dst, &req->dlen, *ctx);
  172. else
  173. ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
  174. scratch_dst, &req->dlen, *ctx);
  175. if (!ret) {
  176. if (!req->dst) {
  177. req->dst = crypto_scomp_sg_alloc(req->dlen,
  178. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  179. GFP_KERNEL : GFP_ATOMIC);
  180. if (!req->dst)
  181. goto out;
  182. }
  183. scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
  184. 1);
  185. }
  186. out:
  187. put_cpu();
  188. return ret;
  189. }
  190. static int scomp_acomp_compress(struct acomp_req *req)
  191. {
  192. return scomp_acomp_comp_decomp(req, 1);
  193. }
  194. static int scomp_acomp_decompress(struct acomp_req *req)
  195. {
  196. return scomp_acomp_comp_decomp(req, 0);
  197. }
  198. static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
  199. {
  200. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  201. crypto_free_scomp(*ctx);
  202. }
  203. int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
  204. {
  205. struct crypto_alg *calg = tfm->__crt_alg;
  206. struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
  207. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  208. struct crypto_scomp *scomp;
  209. if (!crypto_mod_get(calg))
  210. return -EAGAIN;
  211. scomp = crypto_create_tfm(calg, &crypto_scomp_type);
  212. if (IS_ERR(scomp)) {
  213. crypto_mod_put(calg);
  214. return PTR_ERR(scomp);
  215. }
  216. *ctx = scomp;
  217. tfm->exit = crypto_exit_scomp_ops_async;
  218. crt->compress = scomp_acomp_compress;
  219. crt->decompress = scomp_acomp_decompress;
  220. crt->dst_free = crypto_scomp_sg_free;
  221. crt->reqsize = sizeof(void *);
  222. return 0;
  223. }
  224. struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
  225. {
  226. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  227. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  228. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  229. struct crypto_scomp *scomp = *tfm_ctx;
  230. void *ctx;
  231. ctx = crypto_scomp_alloc_ctx(scomp);
  232. if (IS_ERR(ctx)) {
  233. kfree(req);
  234. return NULL;
  235. }
  236. *req->__ctx = ctx;
  237. return req;
  238. }
  239. void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
  240. {
  241. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  242. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  243. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  244. struct crypto_scomp *scomp = *tfm_ctx;
  245. void *ctx = *req->__ctx;
  246. if (ctx)
  247. crypto_scomp_free_ctx(scomp, ctx);
  248. }
  249. static const struct crypto_type crypto_scomp_type = {
  250. .extsize = crypto_alg_extsize,
  251. .init_tfm = crypto_scomp_init_tfm,
  252. #ifdef CONFIG_PROC_FS
  253. .show = crypto_scomp_show,
  254. #endif
  255. .report = crypto_scomp_report,
  256. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  257. .maskset = CRYPTO_ALG_TYPE_MASK,
  258. .type = CRYPTO_ALG_TYPE_SCOMPRESS,
  259. .tfmsize = offsetof(struct crypto_scomp, base),
  260. };
  261. int crypto_register_scomp(struct scomp_alg *alg)
  262. {
  263. struct crypto_alg *base = &alg->base;
  264. int ret = -ENOMEM;
  265. mutex_lock(&scomp_lock);
  266. if (crypto_scomp_alloc_all_scratches())
  267. goto error;
  268. base->cra_type = &crypto_scomp_type;
  269. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  270. base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
  271. ret = crypto_register_alg(base);
  272. if (ret)
  273. goto error;
  274. mutex_unlock(&scomp_lock);
  275. return ret;
  276. error:
  277. crypto_scomp_free_all_scratches();
  278. mutex_unlock(&scomp_lock);
  279. return ret;
  280. }
  281. EXPORT_SYMBOL_GPL(crypto_register_scomp);
  282. int crypto_unregister_scomp(struct scomp_alg *alg)
  283. {
  284. int ret;
  285. mutex_lock(&scomp_lock);
  286. ret = crypto_unregister_alg(&alg->base);
  287. crypto_scomp_free_all_scratches();
  288. mutex_unlock(&scomp_lock);
  289. return ret;
  290. }
  291. EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
  292. MODULE_LICENSE("GPL");
  293. MODULE_DESCRIPTION("Synchronous compression type");