crypto.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * This contains encryption functions for per-file encryption.
  3. *
  4. * Copyright (C) 2015, Google, Inc.
  5. * Copyright (C) 2015, Motorola Mobility
  6. *
  7. * Written by Michael Halcrow, 2014.
  8. *
  9. * Filename encryption additions
  10. * Uday Savagaonkar, 2014
  11. * Encryption policy handling additions
  12. * Ildar Muslukhov, 2014
  13. * Add fscrypt_pullback_bio_page()
  14. * Jaegeuk Kim, 2015.
  15. *
  16. * This has not yet undergone a rigorous security audit.
  17. *
  18. * The usage of AES-XTS should conform to recommendations in NIST
  19. * Special Publication 800-38E and IEEE P1619/D16.
  20. */
  21. #include <linux/pagemap.h>
  22. #include <linux/mempool.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/ratelimit.h>
  26. #include <linux/dcache.h>
  27. #include <linux/namei.h>
  28. #include <crypto/aes.h>
  29. #include <crypto/skcipher.h>
  30. #include "fscrypt_private.h"
  31. static unsigned int num_prealloc_crypto_pages = 32;
  32. static unsigned int num_prealloc_crypto_ctxs = 128;
  33. module_param(num_prealloc_crypto_pages, uint, 0444);
  34. MODULE_PARM_DESC(num_prealloc_crypto_pages,
  35. "Number of crypto pages to preallocate");
  36. module_param(num_prealloc_crypto_ctxs, uint, 0444);
  37. MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
  38. "Number of crypto contexts to preallocate");
  39. static mempool_t *fscrypt_bounce_page_pool = NULL;
  40. static LIST_HEAD(fscrypt_free_ctxs);
  41. static DEFINE_SPINLOCK(fscrypt_ctx_lock);
  42. static struct workqueue_struct *fscrypt_read_workqueue;
  43. static DEFINE_MUTEX(fscrypt_init_mutex);
  44. static struct kmem_cache *fscrypt_ctx_cachep;
  45. struct kmem_cache *fscrypt_info_cachep;
  46. void fscrypt_enqueue_decrypt_work(struct work_struct *work)
  47. {
  48. queue_work(fscrypt_read_workqueue, work);
  49. }
  50. EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
  51. /**
  52. * fscrypt_release_ctx() - Releases an encryption context
  53. * @ctx: The encryption context to release.
  54. *
  55. * If the encryption context was allocated from the pre-allocated pool, returns
  56. * it to that pool. Else, frees it.
  57. *
  58. * If there's a bounce page in the context, this frees that.
  59. */
  60. void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
  61. {
  62. unsigned long flags;
  63. if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
  64. mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
  65. ctx->w.bounce_page = NULL;
  66. }
  67. ctx->w.control_page = NULL;
  68. if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
  69. kmem_cache_free(fscrypt_ctx_cachep, ctx);
  70. } else {
  71. spin_lock_irqsave(&fscrypt_ctx_lock, flags);
  72. list_add(&ctx->free_list, &fscrypt_free_ctxs);
  73. spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
  74. }
  75. }
  76. EXPORT_SYMBOL(fscrypt_release_ctx);
  77. /**
  78. * fscrypt_get_ctx() - Gets an encryption context
  79. * @inode: The inode for which we are doing the crypto
  80. * @gfp_flags: The gfp flag for memory allocation
  81. *
  82. * Allocates and initializes an encryption context.
  83. *
  84. * Return: An allocated and initialized encryption context on success; error
  85. * value or NULL otherwise.
  86. */
  87. struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
  88. {
  89. struct fscrypt_ctx *ctx = NULL;
  90. struct fscrypt_info *ci = inode->i_crypt_info;
  91. unsigned long flags;
  92. if (ci == NULL)
  93. return ERR_PTR(-ENOKEY);
  94. /*
  95. * We first try getting the ctx from a free list because in
  96. * the common case the ctx will have an allocated and
  97. * initialized crypto tfm, so it's probably a worthwhile
  98. * optimization. For the bounce page, we first try getting it
  99. * from the kernel allocator because that's just about as fast
  100. * as getting it from a list and because a cache of free pages
  101. * should generally be a "last resort" option for a filesystem
  102. * to be able to do its job.
  103. */
  104. spin_lock_irqsave(&fscrypt_ctx_lock, flags);
  105. ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
  106. struct fscrypt_ctx, free_list);
  107. if (ctx)
  108. list_del(&ctx->free_list);
  109. spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
  110. if (!ctx) {
  111. ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
  112. if (!ctx)
  113. return ERR_PTR(-ENOMEM);
  114. ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  115. } else {
  116. ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  117. }
  118. ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
  119. return ctx;
  120. }
  121. EXPORT_SYMBOL(fscrypt_get_ctx);
  122. void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
  123. const struct fscrypt_info *ci)
  124. {
  125. memset(iv, 0, ci->ci_mode->ivsize);
  126. iv->lblk_num = cpu_to_le64(lblk_num);
  127. if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY)
  128. memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
  129. if (ci->ci_essiv_tfm != NULL)
  130. crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
  131. }
  132. int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
  133. u64 lblk_num, struct page *src_page,
  134. struct page *dest_page, unsigned int len,
  135. unsigned int offs, gfp_t gfp_flags)
  136. {
  137. union fscrypt_iv iv;
  138. struct skcipher_request *req = NULL;
  139. DECLARE_CRYPTO_WAIT(wait);
  140. struct scatterlist dst, src;
  141. struct fscrypt_info *ci = inode->i_crypt_info;
  142. struct crypto_skcipher *tfm = ci->ci_ctfm;
  143. int res = 0;
  144. BUG_ON(len == 0);
  145. fscrypt_generate_iv(&iv, lblk_num, ci);
  146. req = skcipher_request_alloc(tfm, gfp_flags);
  147. if (!req)
  148. return -ENOMEM;
  149. skcipher_request_set_callback(
  150. req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
  151. crypto_req_done, &wait);
  152. sg_init_table(&dst, 1);
  153. sg_set_page(&dst, dest_page, len, offs);
  154. sg_init_table(&src, 1);
  155. sg_set_page(&src, src_page, len, offs);
  156. skcipher_request_set_crypt(req, &src, &dst, len, &iv);
  157. if (rw == FS_DECRYPT)
  158. res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
  159. else
  160. res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
  161. skcipher_request_free(req);
  162. if (res) {
  163. fscrypt_err(inode->i_sb,
  164. "%scryption failed for inode %lu, block %llu: %d",
  165. (rw == FS_DECRYPT ? "de" : "en"),
  166. inode->i_ino, lblk_num, res);
  167. return res;
  168. }
  169. return 0;
  170. }
  171. struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
  172. gfp_t gfp_flags)
  173. {
  174. ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
  175. if (ctx->w.bounce_page == NULL)
  176. return ERR_PTR(-ENOMEM);
  177. ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
  178. return ctx->w.bounce_page;
  179. }
  180. /**
  181. * fscypt_encrypt_page() - Encrypts a page
  182. * @inode: The inode for which the encryption should take place
  183. * @page: The page to encrypt. Must be locked for bounce-page
  184. * encryption.
  185. * @len: Length of data to encrypt in @page and encrypted
  186. * data in returned page.
  187. * @offs: Offset of data within @page and returned
  188. * page holding encrypted data.
  189. * @lblk_num: Logical block number. This must be unique for multiple
  190. * calls with same inode, except when overwriting
  191. * previously written data.
  192. * @gfp_flags: The gfp flag for memory allocation
  193. *
  194. * Encrypts @page using the ctx encryption context. Performs encryption
  195. * either in-place or into a newly allocated bounce page.
  196. * Called on the page write path.
  197. *
  198. * Bounce page allocation is the default.
  199. * In this case, the contents of @page are encrypted and stored in an
  200. * allocated bounce page. @page has to be locked and the caller must call
  201. * fscrypt_restore_control_page() on the returned ciphertext page to
  202. * release the bounce buffer and the encryption context.
  203. *
  204. * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
  205. * fscrypt_operations. Here, the input-page is returned with its content
  206. * encrypted.
  207. *
  208. * Return: A page with the encrypted content on success. Else, an
  209. * error value or NULL.
  210. */
  211. struct page *fscrypt_encrypt_page(const struct inode *inode,
  212. struct page *page,
  213. unsigned int len,
  214. unsigned int offs,
  215. u64 lblk_num, gfp_t gfp_flags)
  216. {
  217. struct fscrypt_ctx *ctx;
  218. struct page *ciphertext_page = page;
  219. int err;
  220. BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
  221. if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
  222. /* with inplace-encryption we just encrypt the page */
  223. err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
  224. ciphertext_page, len, offs,
  225. gfp_flags);
  226. if (err)
  227. return ERR_PTR(err);
  228. return ciphertext_page;
  229. }
  230. BUG_ON(!PageLocked(page));
  231. ctx = fscrypt_get_ctx(inode, gfp_flags);
  232. if (IS_ERR(ctx))
  233. return (struct page *)ctx;
  234. /* The encryption operation will require a bounce page. */
  235. ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
  236. if (IS_ERR(ciphertext_page))
  237. goto errout;
  238. ctx->w.control_page = page;
  239. err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
  240. page, ciphertext_page, len, offs,
  241. gfp_flags);
  242. if (err) {
  243. ciphertext_page = ERR_PTR(err);
  244. goto errout;
  245. }
  246. SetPagePrivate(ciphertext_page);
  247. set_page_private(ciphertext_page, (unsigned long)ctx);
  248. lock_page(ciphertext_page);
  249. return ciphertext_page;
  250. errout:
  251. fscrypt_release_ctx(ctx);
  252. return ciphertext_page;
  253. }
  254. EXPORT_SYMBOL(fscrypt_encrypt_page);
  255. /**
  256. * fscrypt_decrypt_page() - Decrypts a page in-place
  257. * @inode: The corresponding inode for the page to decrypt.
  258. * @page: The page to decrypt. Must be locked in case
  259. * it is a writeback page (FS_CFLG_OWN_PAGES unset).
  260. * @len: Number of bytes in @page to be decrypted.
  261. * @offs: Start of data in @page.
  262. * @lblk_num: Logical block number.
  263. *
  264. * Decrypts page in-place using the ctx encryption context.
  265. *
  266. * Called from the read completion callback.
  267. *
  268. * Return: Zero on success, non-zero otherwise.
  269. */
  270. int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
  271. unsigned int len, unsigned int offs, u64 lblk_num)
  272. {
  273. if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
  274. BUG_ON(!PageLocked(page));
  275. return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
  276. len, offs, GFP_NOFS);
  277. }
  278. EXPORT_SYMBOL(fscrypt_decrypt_page);
  279. /*
  280. * Validate dentries for encrypted directories to make sure we aren't
  281. * potentially caching stale data after a key has been added or
  282. * removed.
  283. */
  284. static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
  285. {
  286. struct dentry *dir;
  287. int dir_has_key, cached_with_key;
  288. if (flags & LOOKUP_RCU)
  289. return -ECHILD;
  290. dir = dget_parent(dentry);
  291. if (!IS_ENCRYPTED(d_inode(dir))) {
  292. dput(dir);
  293. return 0;
  294. }
  295. spin_lock(&dentry->d_lock);
  296. cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
  297. spin_unlock(&dentry->d_lock);
  298. dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
  299. dput(dir);
  300. /*
  301. * If the dentry was cached without the key, and it is a
  302. * negative dentry, it might be a valid name. We can't check
  303. * if the key has since been made available due to locking
  304. * reasons, so we fail the validation so ext4_lookup() can do
  305. * this check.
  306. *
  307. * We also fail the validation if the dentry was created with
  308. * the key present, but we no longer have the key, or vice versa.
  309. */
  310. if ((!cached_with_key && d_is_negative(dentry)) ||
  311. (!cached_with_key && dir_has_key) ||
  312. (cached_with_key && !dir_has_key))
  313. return 0;
  314. return 1;
  315. }
  316. const struct dentry_operations fscrypt_d_ops = {
  317. .d_revalidate = fscrypt_d_revalidate,
  318. };
  319. void fscrypt_restore_control_page(struct page *page)
  320. {
  321. struct fscrypt_ctx *ctx;
  322. ctx = (struct fscrypt_ctx *)page_private(page);
  323. set_page_private(page, (unsigned long)NULL);
  324. ClearPagePrivate(page);
  325. unlock_page(page);
  326. fscrypt_release_ctx(ctx);
  327. }
  328. EXPORT_SYMBOL(fscrypt_restore_control_page);
  329. static void fscrypt_destroy(void)
  330. {
  331. struct fscrypt_ctx *pos, *n;
  332. list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
  333. kmem_cache_free(fscrypt_ctx_cachep, pos);
  334. INIT_LIST_HEAD(&fscrypt_free_ctxs);
  335. mempool_destroy(fscrypt_bounce_page_pool);
  336. fscrypt_bounce_page_pool = NULL;
  337. }
  338. /**
  339. * fscrypt_initialize() - allocate major buffers for fs encryption.
  340. * @cop_flags: fscrypt operations flags
  341. *
  342. * We only call this when we start accessing encrypted files, since it
  343. * results in memory getting allocated that wouldn't otherwise be used.
  344. *
  345. * Return: Zero on success, non-zero otherwise.
  346. */
  347. int fscrypt_initialize(unsigned int cop_flags)
  348. {
  349. int i, res = -ENOMEM;
  350. /* No need to allocate a bounce page pool if this FS won't use it. */
  351. if (cop_flags & FS_CFLG_OWN_PAGES)
  352. return 0;
  353. mutex_lock(&fscrypt_init_mutex);
  354. if (fscrypt_bounce_page_pool)
  355. goto already_initialized;
  356. for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
  357. struct fscrypt_ctx *ctx;
  358. ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
  359. if (!ctx)
  360. goto fail;
  361. list_add(&ctx->free_list, &fscrypt_free_ctxs);
  362. }
  363. fscrypt_bounce_page_pool =
  364. mempool_create_page_pool(num_prealloc_crypto_pages, 0);
  365. if (!fscrypt_bounce_page_pool)
  366. goto fail;
  367. already_initialized:
  368. mutex_unlock(&fscrypt_init_mutex);
  369. return 0;
  370. fail:
  371. fscrypt_destroy();
  372. mutex_unlock(&fscrypt_init_mutex);
  373. return res;
  374. }
  375. void fscrypt_msg(struct super_block *sb, const char *level,
  376. const char *fmt, ...)
  377. {
  378. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  379. DEFAULT_RATELIMIT_BURST);
  380. struct va_format vaf;
  381. va_list args;
  382. if (!__ratelimit(&rs))
  383. return;
  384. va_start(args, fmt);
  385. vaf.fmt = fmt;
  386. vaf.va = &args;
  387. if (sb)
  388. printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
  389. else
  390. printk("%sfscrypt: %pV\n", level, &vaf);
  391. va_end(args);
  392. }
  393. /**
  394. * fscrypt_init() - Set up for fs encryption.
  395. */
  396. static int __init fscrypt_init(void)
  397. {
  398. /*
  399. * Use an unbound workqueue to allow bios to be decrypted in parallel
  400. * even when they happen to complete on the same CPU. This sacrifices
  401. * locality, but it's worthwhile since decryption is CPU-intensive.
  402. *
  403. * Also use a high-priority workqueue to prioritize decryption work,
  404. * which blocks reads from completing, over regular application tasks.
  405. */
  406. fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
  407. WQ_UNBOUND | WQ_HIGHPRI,
  408. num_online_cpus());
  409. if (!fscrypt_read_workqueue)
  410. goto fail;
  411. fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
  412. if (!fscrypt_ctx_cachep)
  413. goto fail_free_queue;
  414. fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
  415. if (!fscrypt_info_cachep)
  416. goto fail_free_ctx;
  417. return 0;
  418. fail_free_ctx:
  419. kmem_cache_destroy(fscrypt_ctx_cachep);
  420. fail_free_queue:
  421. destroy_workqueue(fscrypt_read_workqueue);
  422. fail:
  423. return -ENOMEM;
  424. }
  425. module_init(fscrypt_init)
  426. /**
  427. * fscrypt_exit() - Shutdown the fs encryption system
  428. */
  429. static void __exit fscrypt_exit(void)
  430. {
  431. fscrypt_destroy();
  432. if (fscrypt_read_workqueue)
  433. destroy_workqueue(fscrypt_read_workqueue);
  434. kmem_cache_destroy(fscrypt_ctx_cachep);
  435. kmem_cache_destroy(fscrypt_info_cachep);
  436. fscrypt_essiv_cleanup();
  437. }
  438. module_exit(fscrypt_exit);
  439. MODULE_LICENSE("GPL");