pfk_kc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. /*
  2. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. /*
  14. * PFK Key Cache
  15. *
  16. * Key Cache used internally in PFK.
  17. * The purpose of the cache is to save access time to QSEE when loading keys.
  18. * Currently the cache is the same size as the total number of keys that can
  19. * be loaded to ICE. Since this number is relatively small, the algorithms for
  20. * cache eviction are simple, linear and based on last usage timestamp, i.e
  21. * the node that will be evicted is the one with the oldest timestamp.
  22. * Empty entries always have the oldest timestamp.
  23. */
  24. #include <linux/module.h>
  25. #include <linux/mutex.h>
  26. #include <linux/spinlock.h>
  27. #include <crypto/ice.h>
  28. #include <linux/errno.h>
  29. #include <linux/string.h>
  30. #include <linux/jiffies.h>
  31. #include <linux/slab.h>
  32. #include <linux/printk.h>
  33. #include <linux/sched.h>
  34. #include "pfk_kc.h"
  35. #include "pfk_ice.h"
  36. /** the first available index in ice engine */
  37. #define PFK_KC_STARTING_INDEX 2
  38. /** currently the only supported key and salt sizes */
  39. #define PFK_KC_KEY_SIZE 32
  40. #define PFK_KC_SALT_SIZE 32
  41. /** Table size */
  42. /* TODO replace by some constant from ice.h */
  43. #define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
  44. /** The maximum key and salt size */
  45. #define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
  46. #define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
  47. #define PFK_UFS "ufs"
  48. static DEFINE_SPINLOCK(kc_lock);
  49. static unsigned long flags;
  50. static bool kc_ready;
  51. static char *s_type = "sdcc";
  52. /**
  53. * enum pfk_kc_entry_state - state of the entry inside kc table
  54. *
  55. * @FREE: entry is free
  56. * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
  57. and cannot be used by others. SCM call
  58. to load key to ICE is pending to be performed
  59. * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
  60. cannot be used by others. SCM call to load the
  61. key to ICE was successfully executed and key is
  62. now loaded
  63. * @INACTIVE_INVALIDATING: entry is being invalidated during file close
  64. and cannot be used by others until invalidation
  65. is complete
  66. * @INACTIVE: entry's key is already loaded, but is not
  67. currently being used. It can be re-used for
  68. optimization and to avoid SCM call cost or
  69. it can be taken by another key if there are
  70. no FREE entries
  71. * @SCM_ERROR: error occurred while scm call was performed to
  72. load the key to ICE
  73. */
  74. enum pfk_kc_entry_state {
  75. FREE,
  76. ACTIVE_ICE_PRELOAD,
  77. ACTIVE_ICE_LOADED,
  78. INACTIVE_INVALIDATING,
  79. INACTIVE,
  80. SCM_ERROR
  81. };
  82. struct kc_entry {
  83. unsigned char key[PFK_MAX_KEY_SIZE];
  84. size_t key_size;
  85. unsigned char salt[PFK_MAX_SALT_SIZE];
  86. size_t salt_size;
  87. u64 time_stamp;
  88. u32 key_index;
  89. struct task_struct *thread_pending;
  90. enum pfk_kc_entry_state state;
  91. /* ref count for the number of requests in the HW queue for this key */
  92. int loaded_ref_cnt;
  93. int scm_error;
  94. };
  95. static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
  96. /**
  97. * kc_is_ready() - driver is initialized and ready.
  98. *
  99. * Return: true if the key cache is ready.
  100. */
  101. static inline bool kc_is_ready(void)
  102. {
  103. return kc_ready;
  104. }
  105. static inline void kc_spin_lock(void)
  106. {
  107. spin_lock_irqsave(&kc_lock, flags);
  108. }
  109. static inline void kc_spin_unlock(void)
  110. {
  111. spin_unlock_irqrestore(&kc_lock, flags);
  112. }
  113. /**
  114. * pfk_kc_get_storage_type() - return the hardware storage type.
  115. *
  116. * Return: storage type queried during bootup.
  117. */
  118. const char *pfk_kc_get_storage_type(void)
  119. {
  120. return s_type;
  121. }
  122. /**
  123. * kc_entry_is_available() - checks whether the entry is available
  124. *
  125. * Return true if it is , false otherwise or if invalid
  126. * Should be invoked under spinlock
  127. */
  128. static bool kc_entry_is_available(const struct kc_entry *entry)
  129. {
  130. if (!entry)
  131. return false;
  132. return (entry->state == FREE || entry->state == INACTIVE);
  133. }
  134. /**
  135. * kc_entry_wait_till_available() - waits till entry is available
  136. *
  137. * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
  138. * by signal
  139. *
  140. * Should be invoked under spinlock
  141. */
  142. static int kc_entry_wait_till_available(struct kc_entry *entry)
  143. {
  144. int res = 0;
  145. while (!kc_entry_is_available(entry)) {
  146. set_current_state(TASK_INTERRUPTIBLE);
  147. if (signal_pending(current)) {
  148. res = -ERESTARTSYS;
  149. break;
  150. }
  151. /* assuming only one thread can try to invalidate
  152. * the same entry
  153. */
  154. entry->thread_pending = current;
  155. kc_spin_unlock();
  156. schedule();
  157. kc_spin_lock();
  158. }
  159. set_current_state(TASK_RUNNING);
  160. return res;
  161. }
  162. /**
  163. * kc_entry_start_invalidating() - moves entry to state
  164. * INACTIVE_INVALIDATING
  165. * If entry is in use, waits till
  166. * it gets available
  167. * @entry: pointer to entry
  168. *
  169. * Return 0 in case of success, otherwise error
  170. * Should be invoked under spinlock
  171. */
  172. static int kc_entry_start_invalidating(struct kc_entry *entry)
  173. {
  174. int res;
  175. res = kc_entry_wait_till_available(entry);
  176. if (res)
  177. return res;
  178. entry->state = INACTIVE_INVALIDATING;
  179. return 0;
  180. }
  181. /**
  182. * kc_entry_finish_invalidating() - moves entry to state FREE
  183. * wakes up all the tasks waiting
  184. * on it
  185. *
  186. * @entry: pointer to entry
  187. *
  188. * Return 0 in case of success, otherwise error
  189. * Should be invoked under spinlock
  190. */
  191. static void kc_entry_finish_invalidating(struct kc_entry *entry)
  192. {
  193. if (!entry)
  194. return;
  195. if (entry->state != INACTIVE_INVALIDATING)
  196. return;
  197. entry->state = FREE;
  198. }
  199. /**
  200. * kc_min_entry() - compare two entries to find one with minimal time
  201. * @a: ptr to the first entry. If NULL the other entry will be returned
  202. * @b: pointer to the second entry
  203. *
  204. * Return the entry which timestamp is the minimal, or b if a is NULL
  205. */
  206. static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
  207. struct kc_entry *b)
  208. {
  209. if (!a)
  210. return b;
  211. if (time_before64(b->time_stamp, a->time_stamp))
  212. return b;
  213. return a;
  214. }
  215. /**
  216. * kc_entry_at_index() - return entry at specific index
  217. * @index: index of entry to be accessed
  218. *
  219. * Return entry
  220. * Should be invoked under spinlock
  221. */
  222. static struct kc_entry *kc_entry_at_index(int index)
  223. {
  224. return &(kc_table[index]);
  225. }
  226. /**
  227. * kc_find_key_at_index() - find kc entry starting at specific index
  228. * @key: key to look for
  229. * @key_size: the key size
  230. * @salt: salt to look for
  231. * @salt_size: the salt size
  232. * @sarting_index: index to start search with, if entry found, updated with
  233. * index of that entry
  234. *
  235. * Return entry or NULL in case of error
  236. * Should be invoked under spinlock
  237. */
  238. static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
  239. size_t key_size, const unsigned char *salt, size_t salt_size,
  240. int *starting_index)
  241. {
  242. struct kc_entry *entry = NULL;
  243. int i = 0;
  244. for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
  245. entry = kc_entry_at_index(i);
  246. if (salt != NULL) {
  247. if (entry->salt_size != salt_size)
  248. continue;
  249. if (memcmp(entry->salt, salt, salt_size) != 0)
  250. continue;
  251. }
  252. if (entry->key_size != key_size)
  253. continue;
  254. if (memcmp(entry->key, key, key_size) == 0) {
  255. *starting_index = i;
  256. return entry;
  257. }
  258. }
  259. return NULL;
  260. }
  261. /**
  262. * kc_find_key() - find kc entry
  263. * @key: key to look for
  264. * @key_size: the key size
  265. * @salt: salt to look for
  266. * @salt_size: the salt size
  267. *
  268. * Return entry or NULL in case of error
  269. * Should be invoked under spinlock
  270. */
  271. static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
  272. const unsigned char *salt, size_t salt_size)
  273. {
  274. int index = 0;
  275. return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
  276. }
  277. /**
  278. * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
  279. * that is not locked
  280. *
  281. * Returns entry with minimal timestamp. Empty entries have timestamp
  282. * of 0, therefore they are returned first.
  283. * If all the entries are locked, will return NULL
  284. * Should be invoked under spin lock
  285. */
  286. static struct kc_entry *kc_find_oldest_entry_non_locked(void)
  287. {
  288. struct kc_entry *curr_min_entry = NULL;
  289. struct kc_entry *entry = NULL;
  290. int i = 0;
  291. for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
  292. entry = kc_entry_at_index(i);
  293. if (entry->state == FREE)
  294. return entry;
  295. if (entry->state == INACTIVE)
  296. curr_min_entry = kc_min_entry(curr_min_entry, entry);
  297. }
  298. return curr_min_entry;
  299. }
  300. /**
  301. * kc_update_timestamp() - updates timestamp of entry to current
  302. *
  303. * @entry: entry to update
  304. *
  305. */
  306. static void kc_update_timestamp(struct kc_entry *entry)
  307. {
  308. if (!entry)
  309. return;
  310. entry->time_stamp = get_jiffies_64();
  311. }
  312. /**
  313. * kc_clear_entry() - clear the key from entry and mark entry not in use
  314. *
  315. * @entry: pointer to entry
  316. *
  317. * Should be invoked under spinlock
  318. */
  319. static void kc_clear_entry(struct kc_entry *entry)
  320. {
  321. if (!entry)
  322. return;
  323. memset(entry->key, 0, entry->key_size);
  324. memset(entry->salt, 0, entry->salt_size);
  325. entry->key_size = 0;
  326. entry->salt_size = 0;
  327. entry->time_stamp = 0;
  328. entry->scm_error = 0;
  329. entry->state = FREE;
  330. entry->loaded_ref_cnt = 0;
  331. entry->thread_pending = NULL;
  332. }
  333. /**
  334. * kc_update_entry() - replaces the key in given entry and
  335. * loads the new key to ICE
  336. *
  337. * @entry: entry to replace key in
  338. * @key: key
  339. * @key_size: key_size
  340. * @salt: salt
  341. * @salt_size: salt_size
  342. * @data_unit: dun size
  343. *
  344. * The previous key is securely released and wiped, the new one is loaded
  345. * to ICE.
  346. * Should be invoked under spinlock
  347. */
  348. static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
  349. size_t key_size, const unsigned char *salt, size_t salt_size,
  350. unsigned int data_unit)
  351. {
  352. int ret;
  353. kc_clear_entry(entry);
  354. memcpy(entry->key, key, key_size);
  355. entry->key_size = key_size;
  356. memcpy(entry->salt, salt, salt_size);
  357. entry->salt_size = salt_size;
  358. /* Mark entry as no longer free before releasing the lock */
  359. entry->state = ACTIVE_ICE_PRELOAD;
  360. kc_spin_unlock();
  361. ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
  362. entry->salt, s_type, data_unit);
  363. kc_spin_lock();
  364. return ret;
  365. }
  366. /**
  367. * pfk_kc_init() - init function
  368. *
  369. * Return 0 in case of success, error otherwise
  370. */
  371. int pfk_kc_init(void)
  372. {
  373. int i = 0;
  374. struct kc_entry *entry = NULL;
  375. kc_spin_lock();
  376. for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
  377. entry = kc_entry_at_index(i);
  378. entry->key_index = PFK_KC_STARTING_INDEX + i;
  379. }
  380. kc_ready = true;
  381. kc_spin_unlock();
  382. return 0;
  383. }
  384. /**
  385. * pfk_kc_denit() - deinit function
  386. *
  387. * Return 0 in case of success, error otherwise
  388. */
  389. int pfk_kc_deinit(void)
  390. {
  391. int res = pfk_kc_clear();
  392. kc_ready = false;
  393. return res;
  394. }
  395. /**
  396. * pfk_kc_load_key_start() - retrieve the key from cache or add it if
  397. * it's not there and return the ICE hw key index in @key_index.
  398. * @key: pointer to the key
  399. * @key_size: the size of the key
  400. * @salt: pointer to the salt
  401. * @salt_size: the size of the salt
  402. * @key_index: the pointer to key_index where the output will be stored
  403. * @async: whether scm calls are allowed in the caller context
  404. *
  405. * If key is present in cache, than the key_index will be retrieved from cache.
  406. * If it is not present, the oldest entry from kc table will be evicted,
  407. * the key will be loaded to ICE via QSEE to the index that is the evicted
  408. * entry number and stored in cache.
  409. * Entry that is going to be used is marked as being used, it will mark
  410. * as not being used when ICE finishes using it and pfk_kc_load_key_end
  411. * will be invoked.
  412. * As QSEE calls can only be done from a non-atomic context, when @async flag
  413. * is set to 'false', it specifies that it is ok to make the calls in the
  414. * current context. Otherwise, when @async is set, the caller should retry the
  415. * call again from a different context, and -EAGAIN error will be returned.
  416. *
  417. * Return 0 in case of success, error otherwise
  418. */
  419. int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
  420. const unsigned char *salt, size_t salt_size, u32 *key_index,
  421. bool async, unsigned int data_unit)
  422. {
  423. int ret = 0;
  424. struct kc_entry *entry = NULL;
  425. bool entry_exists = false;
  426. if (!kc_is_ready())
  427. return -ENODEV;
  428. if (!key || !salt || !key_index) {
  429. pr_err("%s key/salt/key_index NULL\n", __func__);
  430. return -EINVAL;
  431. }
  432. if (key_size != PFK_KC_KEY_SIZE) {
  433. pr_err("unsupported key size %zu\n", key_size);
  434. return -EINVAL;
  435. }
  436. if (salt_size != PFK_KC_SALT_SIZE) {
  437. pr_err("unsupported salt size %zu\n", salt_size);
  438. return -EINVAL;
  439. }
  440. kc_spin_lock();
  441. entry = kc_find_key(key, key_size, salt, salt_size);
  442. if (!entry) {
  443. if (async) {
  444. pr_debug("%s task will populate entry\n", __func__);
  445. kc_spin_unlock();
  446. return -EAGAIN;
  447. }
  448. entry = kc_find_oldest_entry_non_locked();
  449. if (!entry) {
  450. /* could not find a single non locked entry,
  451. * return EBUSY to upper layers so that the
  452. * request will be rescheduled
  453. */
  454. kc_spin_unlock();
  455. return -EBUSY;
  456. }
  457. } else {
  458. entry_exists = true;
  459. }
  460. pr_debug("entry with index %d is in state %d\n",
  461. entry->key_index, entry->state);
  462. switch (entry->state) {
  463. case (INACTIVE):
  464. if (entry_exists) {
  465. kc_update_timestamp(entry);
  466. entry->state = ACTIVE_ICE_LOADED;
  467. if (!strcmp(s_type, (char *)PFK_UFS)) {
  468. if (async)
  469. entry->loaded_ref_cnt++;
  470. } else {
  471. entry->loaded_ref_cnt++;
  472. }
  473. break;
  474. }
  475. case (FREE):
  476. ret = kc_update_entry(entry, key, key_size, salt, salt_size,
  477. data_unit);
  478. if (ret) {
  479. entry->state = SCM_ERROR;
  480. entry->scm_error = ret;
  481. pr_err("%s: key load error (%d)\n", __func__, ret);
  482. } else {
  483. kc_update_timestamp(entry);
  484. entry->state = ACTIVE_ICE_LOADED;
  485. /*
  486. * In case of UFS only increase ref cnt for async calls,
  487. * sync calls from within work thread do not pass
  488. * requests further to HW
  489. */
  490. if (!strcmp(s_type, (char *)PFK_UFS)) {
  491. if (async)
  492. entry->loaded_ref_cnt++;
  493. } else {
  494. entry->loaded_ref_cnt++;
  495. }
  496. }
  497. break;
  498. case (ACTIVE_ICE_PRELOAD):
  499. case (INACTIVE_INVALIDATING):
  500. ret = -EAGAIN;
  501. break;
  502. case (ACTIVE_ICE_LOADED):
  503. kc_update_timestamp(entry);
  504. if (!strcmp(s_type, (char *)PFK_UFS)) {
  505. if (async)
  506. entry->loaded_ref_cnt++;
  507. } else {
  508. entry->loaded_ref_cnt++;
  509. }
  510. break;
  511. case(SCM_ERROR):
  512. ret = entry->scm_error;
  513. kc_clear_entry(entry);
  514. entry->state = FREE;
  515. break;
  516. default:
  517. pr_err("invalid state %d for entry with key index %d\n",
  518. entry->state, entry->key_index);
  519. ret = -EINVAL;
  520. }
  521. *key_index = entry->key_index;
  522. kc_spin_unlock();
  523. return ret;
  524. }
  525. /**
  526. * pfk_kc_load_key_end() - finish the process of key loading that was started
  527. * by pfk_kc_load_key_start
  528. * by marking the entry as not
  529. * being in use
  530. * @key: pointer to the key
  531. * @key_size: the size of the key
  532. * @salt: pointer to the salt
  533. * @salt_size: the size of the salt
  534. *
  535. */
  536. void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
  537. const unsigned char *salt, size_t salt_size)
  538. {
  539. struct kc_entry *entry = NULL;
  540. struct task_struct *tmp_pending = NULL;
  541. int ref_cnt = 0;
  542. if (!kc_is_ready())
  543. return;
  544. if (!key || !salt)
  545. return;
  546. if (key_size != PFK_KC_KEY_SIZE)
  547. return;
  548. if (salt_size != PFK_KC_SALT_SIZE)
  549. return;
  550. kc_spin_lock();
  551. entry = kc_find_key(key, key_size, salt, salt_size);
  552. if (!entry) {
  553. kc_spin_unlock();
  554. pr_err("internal error, there should an entry to unlock\n");
  555. return;
  556. }
  557. ref_cnt = --entry->loaded_ref_cnt;
  558. if (ref_cnt < 0)
  559. pr_err("internal error, ref count should never be negative\n");
  560. if (!ref_cnt) {
  561. entry->state = INACTIVE;
  562. /*
  563. * wake-up invalidation if it's waiting
  564. * for the entry to be released
  565. */
  566. if (entry->thread_pending) {
  567. tmp_pending = entry->thread_pending;
  568. entry->thread_pending = NULL;
  569. kc_spin_unlock();
  570. wake_up_process(tmp_pending);
  571. return;
  572. }
  573. }
  574. kc_spin_unlock();
  575. }
  576. /**
  577. * pfk_kc_remove_key() - remove the key from cache and from ICE engine
  578. * @key: pointer to the key
  579. * @key_size: the size of the key
  580. * @salt: pointer to the key
  581. * @salt_size: the size of the key
  582. *
  583. * Return 0 in case of success, error otherwise (also in case of non
  584. * (existing key)
  585. */
  586. int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
  587. const unsigned char *salt, size_t salt_size)
  588. {
  589. struct kc_entry *entry = NULL;
  590. int res = 0;
  591. if (!kc_is_ready())
  592. return -ENODEV;
  593. if (!key)
  594. return -EINVAL;
  595. if (!salt)
  596. return -EINVAL;
  597. if (key_size != PFK_KC_KEY_SIZE)
  598. return -EINVAL;
  599. if (salt_size != PFK_KC_SALT_SIZE)
  600. return -EINVAL;
  601. kc_spin_lock();
  602. entry = kc_find_key(key, key_size, salt, salt_size);
  603. if (!entry) {
  604. pr_debug("%s: key does not exist\n", __func__);
  605. kc_spin_unlock();
  606. return -EINVAL;
  607. }
  608. res = kc_entry_start_invalidating(entry);
  609. if (res != 0) {
  610. kc_spin_unlock();
  611. return res;
  612. }
  613. kc_clear_entry(entry);
  614. kc_spin_unlock();
  615. qti_pfk_ice_invalidate_key(entry->key_index, s_type);
  616. kc_spin_lock();
  617. kc_entry_finish_invalidating(entry);
  618. kc_spin_unlock();
  619. return 0;
  620. }
  621. /**
  622. * pfk_kc_remove_key() - remove the key from cache and from ICE engine
  623. * when no salt is available. Will only search key part, if there are several,
  624. * all will be removed
  625. *
  626. * @key: pointer to the key
  627. * @key_size: the size of the key
  628. *
  629. * Return 0 in case of success, error otherwise (also for non-existing key)
  630. */
  631. int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
  632. {
  633. struct kc_entry *entry = NULL;
  634. int index = 0;
  635. int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
  636. int temp_indexes_size = 0;
  637. int i = 0;
  638. int res = 0;
  639. if (!kc_is_ready())
  640. return -ENODEV;
  641. if (!key)
  642. return -EINVAL;
  643. if (key_size != PFK_KC_KEY_SIZE)
  644. return -EINVAL;
  645. memset(temp_indexes, -1, sizeof(temp_indexes));
  646. kc_spin_lock();
  647. entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
  648. if (!entry) {
  649. pr_err("%s: key does not exist\n", __func__);
  650. kc_spin_unlock();
  651. return -EINVAL;
  652. }
  653. res = kc_entry_start_invalidating(entry);
  654. if (res != 0) {
  655. kc_spin_unlock();
  656. return res;
  657. }
  658. temp_indexes[temp_indexes_size++] = index;
  659. kc_clear_entry(entry);
  660. /* let's clean additional entries with the same key if there are any */
  661. do {
  662. index++;
  663. entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
  664. if (!entry)
  665. break;
  666. res = kc_entry_start_invalidating(entry);
  667. if (res != 0) {
  668. kc_spin_unlock();
  669. goto out;
  670. }
  671. temp_indexes[temp_indexes_size++] = index;
  672. kc_clear_entry(entry);
  673. } while (true);
  674. kc_spin_unlock();
  675. temp_indexes_size--;
  676. for (i = temp_indexes_size; i >= 0 ; i--)
  677. qti_pfk_ice_invalidate_key(
  678. kc_entry_at_index(temp_indexes[i])->key_index,
  679. s_type);
  680. /* fall through */
  681. res = 0;
  682. out:
  683. kc_spin_lock();
  684. for (i = temp_indexes_size; i >= 0 ; i--)
  685. kc_entry_finish_invalidating(
  686. kc_entry_at_index(temp_indexes[i]));
  687. kc_spin_unlock();
  688. return res;
  689. }
  690. /**
  691. * pfk_kc_clear() - clear the table and remove all keys from ICE
  692. *
  693. * Return 0 on success, error otherwise
  694. *
  695. */
  696. int pfk_kc_clear(void)
  697. {
  698. struct kc_entry *entry = NULL;
  699. int i = 0;
  700. int res = 0;
  701. if (!kc_is_ready())
  702. return -ENODEV;
  703. kc_spin_lock();
  704. for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
  705. entry = kc_entry_at_index(i);
  706. res = kc_entry_start_invalidating(entry);
  707. if (res != 0) {
  708. kc_spin_unlock();
  709. goto out;
  710. }
  711. kc_clear_entry(entry);
  712. }
  713. kc_spin_unlock();
  714. for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
  715. qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
  716. s_type);
  717. /* fall through */
  718. res = 0;
  719. out:
  720. kc_spin_lock();
  721. for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
  722. kc_entry_finish_invalidating(kc_entry_at_index(i));
  723. kc_spin_unlock();
  724. return res;
  725. }
  726. /**
  727. * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
  728. * The assumption is that at this point we don't have any pending transactions
  729. * Also, there is no need to clear keys from ICE
  730. *
  731. * Return 0 on success, error otherwise
  732. *
  733. */
  734. void pfk_kc_clear_on_reset(void)
  735. {
  736. struct kc_entry *entry = NULL;
  737. int i = 0;
  738. if (!kc_is_ready())
  739. return;
  740. kc_spin_lock();
  741. for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
  742. entry = kc_entry_at_index(i);
  743. kc_clear_entry(entry);
  744. }
  745. kc_spin_unlock();
  746. }
  747. static int pfk_kc_find_storage_type(char **device)
  748. {
  749. char boot[20] = {'\0'};
  750. char *match = (char *)strnstr(saved_command_line,
  751. "androidboot.bootdevice=",
  752. strlen(saved_command_line));
  753. if (match) {
  754. memcpy(boot, (match + strlen("androidboot.bootdevice=")),
  755. sizeof(boot) - 1);
  756. if (strnstr(boot, PFK_UFS, strlen(boot)))
  757. *device = PFK_UFS;
  758. return 0;
  759. }
  760. return -EINVAL;
  761. }
  762. static int __init pfk_kc_pre_init(void)
  763. {
  764. return pfk_kc_find_storage_type(&s_type);
  765. }
  766. static void __exit pfk_kc_exit(void)
  767. {
  768. s_type = NULL;
  769. }
  770. module_init(pfk_kc_pre_init);
  771. module_exit(pfk_kc_exit);
  772. MODULE_LICENSE("GPL v2");
  773. MODULE_DESCRIPTION("Per-File-Key-KC driver");