ice.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768
  1. /* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/io.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/delay.h>
  17. #include <linux/of.h>
  18. #include <linux/device-mapper.h>
  19. #include <linux/clk.h>
  20. #include <linux/cdev.h>
  21. #include <linux/regulator/consumer.h>
  22. #include <linux/msm-bus.h>
  23. #include <linux/pfk.h>
  24. #include <crypto/ice.h>
  25. #include <soc/qcom/scm.h>
  26. #include <soc/qcom/qseecomi.h>
  27. #include "iceregs.h"
  28. #define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
  29. ((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
  30. #define TZ_OWNER_QSEE_OS 50
  31. #define TZ_SVC_KEYSTORE 5 /* Keystore management */
  32. #define TZ_OS_KS_RESTORE_KEY_ID \
  33. TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
  34. #define TZ_SYSCALL_CREATE_PARAM_ID_0 0
  35. #define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
  36. TZ_SYSCALL_CREATE_PARAM_ID_0
  37. #define TZ_OS_KS_RESTORE_KEY_CONFIG_ID \
  38. TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
  39. #define TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID \
  40. TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
  41. #define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
  42. #define QCOM_UFS_ICE_DEV "iceufs"
  43. #define QCOM_SDCC_ICE_DEV "icesdcc"
  44. #define QCOM_ICE_TYPE_NAME_LEN 8
  45. #define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
  46. #define QCOM_ICE_UFS 10
  47. #define QCOM_ICE_SDCC 20
  48. struct ice_clk_info {
  49. struct list_head list;
  50. struct clk *clk;
  51. const char *name;
  52. u32 max_freq;
  53. u32 min_freq;
  54. u32 curr_freq;
  55. bool enabled;
  56. };
  57. struct qcom_ice_bus_vote {
  58. uint32_t client_handle;
  59. uint32_t curr_vote;
  60. int min_bw_vote;
  61. int max_bw_vote;
  62. int saved_vote;
  63. bool is_max_bw_needed;
  64. struct device_attribute max_bus_bw;
  65. };
  66. static LIST_HEAD(ice_devices);
  67. /*
  68. * ICE HW device structure.
  69. */
  70. struct ice_device {
  71. struct list_head list;
  72. struct device *pdev;
  73. struct cdev cdev;
  74. dev_t device_no;
  75. struct class *driver_class;
  76. void __iomem *mmio;
  77. struct resource *res;
  78. int irq;
  79. bool is_ice_enabled;
  80. bool is_ice_disable_fuse_blown;
  81. ice_error_cb error_cb;
  82. void *host_controller_data; /* UFS/EMMC/other? */
  83. struct list_head clk_list_head;
  84. u32 ice_hw_version;
  85. bool is_ice_clk_available;
  86. char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
  87. struct regulator *reg;
  88. bool is_regulator_available;
  89. struct qcom_ice_bus_vote bus_vote;
  90. ktime_t ice_reset_start_time;
  91. ktime_t ice_reset_complete_time;
  92. };
  93. static int qti_ice_setting_config(struct request *req,
  94. struct platform_device *pdev,
  95. struct ice_crypto_setting *crypto_data,
  96. struct ice_data_setting *setting)
  97. {
  98. struct ice_device *ice_dev = NULL;
  99. ice_dev = platform_get_drvdata(pdev);
  100. if (!ice_dev) {
  101. pr_debug("%s no ICE device\n", __func__);
  102. /* make the caller finish peacfully */
  103. return 0;
  104. }
  105. if (ice_dev->is_ice_disable_fuse_blown) {
  106. pr_err("%s ICE disabled fuse is blown\n", __func__);
  107. return -EPERM;
  108. }
  109. if (!setting)
  110. return -EINVAL;
  111. if ((short)(crypto_data->key_index) >= 0) {
  112. memcpy(&setting->crypto_data, crypto_data,
  113. sizeof(setting->crypto_data));
  114. if (rq_data_dir(req) == WRITE)
  115. setting->encr_bypass = false;
  116. else if (rq_data_dir(req) == READ)
  117. setting->decr_bypass = false;
  118. else {
  119. /* Should I say BUG_ON */
  120. setting->encr_bypass = true;
  121. setting->decr_bypass = true;
  122. }
  123. }
  124. return 0;
  125. }
  126. static int qcom_ice_enable_clocks(struct ice_device *, bool);
  127. #ifdef CONFIG_MSM_BUS_SCALING
  128. static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
  129. {
  130. int err = 0;
  131. if (vote != ice_dev->bus_vote.curr_vote) {
  132. err = msm_bus_scale_client_update_request(
  133. ice_dev->bus_vote.client_handle, vote);
  134. if (err) {
  135. dev_err(ice_dev->pdev,
  136. "%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
  137. __func__, ice_dev->bus_vote.client_handle,
  138. vote, err);
  139. goto out;
  140. }
  141. ice_dev->bus_vote.curr_vote = vote;
  142. }
  143. out:
  144. return err;
  145. }
  146. static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
  147. const char *speed_mode)
  148. {
  149. struct device *dev = ice_dev->pdev;
  150. struct device_node *np = dev->of_node;
  151. int err;
  152. const char *key = "qcom,bus-vector-names";
  153. if (!speed_mode) {
  154. err = -EINVAL;
  155. goto out;
  156. }
  157. if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
  158. err = of_property_match_string(np, key, "MAX");
  159. else
  160. err = of_property_match_string(np, key, speed_mode);
  161. out:
  162. if (err < 0)
  163. dev_err(dev, "%s: Invalid %s mode %d\n",
  164. __func__, speed_mode, err);
  165. return err;
  166. }
  167. static int qcom_ice_bus_register(struct ice_device *ice_dev)
  168. {
  169. int err = 0;
  170. struct msm_bus_scale_pdata *bus_pdata;
  171. struct device *dev = ice_dev->pdev;
  172. struct platform_device *pdev = to_platform_device(dev);
  173. struct device_node *np = dev->of_node;
  174. bus_pdata = msm_bus_cl_get_pdata(pdev);
  175. if (!bus_pdata) {
  176. dev_err(dev, "%s: failed to get bus vectors\n", __func__);
  177. err = -ENODATA;
  178. goto out;
  179. }
  180. err = of_property_count_strings(np, "qcom,bus-vector-names");
  181. if (err < 0 || err != bus_pdata->num_usecases) {
  182. dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
  183. __func__, err);
  184. goto out;
  185. }
  186. err = 0;
  187. ice_dev->bus_vote.client_handle =
  188. msm_bus_scale_register_client(bus_pdata);
  189. if (!ice_dev->bus_vote.client_handle) {
  190. dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
  191. __func__);
  192. err = -EFAULT;
  193. goto out;
  194. }
  195. /* cache the vote index for minimum and maximum bandwidth */
  196. ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
  197. ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
  198. out:
  199. return err;
  200. }
  201. #else
  202. static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
  203. {
  204. return 0;
  205. }
  206. static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
  207. const char *speed_mode)
  208. {
  209. return 0;
  210. }
  211. static int qcom_ice_bus_register(struct ice_device *ice_dev)
  212. {
  213. return 0;
  214. }
  215. #endif /* CONFIG_MSM_BUS_SCALING */
  216. static int qcom_ice_get_vreg(struct ice_device *ice_dev)
  217. {
  218. int ret = 0;
  219. if (!ice_dev->is_regulator_available)
  220. return 0;
  221. if (ice_dev->reg)
  222. return 0;
  223. ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
  224. if (IS_ERR(ice_dev->reg)) {
  225. ret = PTR_ERR(ice_dev->reg);
  226. dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
  227. __func__, "vdd-hba-supply", ret);
  228. }
  229. return ret;
  230. }
  231. static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
  232. {
  233. u32 regval;
  234. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
  235. ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
  236. ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
  237. regval = qcom_ice_readl(ice_dev,
  238. QCOM_ICE_REGS_ADVANCED_CONTROL);
  239. regval |= 0x800;
  240. qcom_ice_writel(ice_dev, regval,
  241. QCOM_ICE_REGS_ADVANCED_CONTROL);
  242. /* Ensure register is updated */
  243. mb();
  244. }
  245. }
  246. static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
  247. {
  248. u32 regval;
  249. regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
  250. /*
  251. * Enable low power mode sequence
  252. * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
  253. */
  254. regval |= 0x7000;
  255. qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
  256. /*
  257. * Ensure previous instructions was completed before issuing next
  258. * ICE initialization/optimization instruction
  259. */
  260. mb();
  261. }
  262. static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
  263. {
  264. /*
  265. * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
  266. * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
  267. * TEST_BUS_REG_EN = 1 (ENABLE)
  268. */
  269. u32 regval;
  270. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
  271. return;
  272. regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
  273. regval &= 0x0FFFFFFF;
  274. /* TBD: replace 0x2 with define in iceregs.h */
  275. regval |= 0x2;
  276. qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
  277. /*
  278. * Ensure previous instructions was completed before issuing next
  279. * ICE initialization/optimization instruction
  280. */
  281. mb();
  282. }
  283. static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
  284. {
  285. u32 regval;
  286. regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
  287. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
  288. regval |= 0xD807100;
  289. else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
  290. regval |= 0x3F007100;
  291. /* ICE Optimizations Enable Sequence */
  292. udelay(5);
  293. /* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
  294. qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
  295. /*
  296. * Ensure previous instructions was completed before issuing next
  297. * ICE initialization/optimization instruction
  298. */
  299. mb();
  300. /* ICE HPG requires sleep before writing */
  301. udelay(5);
  302. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
  303. regval = 0;
  304. regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
  305. regval |= 0xF;
  306. qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
  307. /*
  308. * Ensure previous instructions were completed before issue
  309. * next ICE commands
  310. */
  311. mb();
  312. }
  313. }
  314. static int qcom_ice_wait_bist_status(struct ice_device *ice_dev)
  315. {
  316. int count;
  317. u32 reg;
  318. /* Poll until all BIST bits are reset */
  319. for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT; count++) {
  320. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS);
  321. if (!(reg & ICE_BIST_STATUS_MASK))
  322. break;
  323. udelay(50);
  324. }
  325. if (reg)
  326. return -ETIMEDOUT;
  327. return 0;
  328. }
  329. static int qcom_ice_enable(struct ice_device *ice_dev)
  330. {
  331. unsigned int reg;
  332. int ret = 0;
  333. if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
  334. ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
  335. (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1)))
  336. ret = qcom_ice_wait_bist_status(ice_dev);
  337. if (ret) {
  338. dev_err(ice_dev->pdev, "BIST status error (%d)\n", ret);
  339. return ret;
  340. }
  341. /* Starting ICE v3 enabling is done at storage controller (UFS/SDCC) */
  342. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 3)
  343. return 0;
  344. /*
  345. * To enable ICE, perform following
  346. * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
  347. * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
  348. */
  349. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
  350. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
  351. reg &= 0x0;
  352. else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
  353. reg &= ~0x100;
  354. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
  355. /*
  356. * Ensure previous instructions was completed before issuing next
  357. * ICE initialization/optimization instruction
  358. */
  359. mb();
  360. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
  361. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
  362. reg &= 0xFFFE;
  363. else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
  364. reg &= ~0x7;
  365. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
  366. /*
  367. * Ensure previous instructions was completed before issuing next
  368. * ICE initialization/optimization instruction
  369. */
  370. mb();
  371. if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
  372. ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
  373. (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
  374. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
  375. if ((reg & 0x80000000) != 0x0) {
  376. pr_err("%s: Bypass failed for ice = %pK",
  377. __func__, (void *)ice_dev);
  378. WARN_ON(1);
  379. }
  380. }
  381. return 0;
  382. }
  383. static int qcom_ice_verify_ice(struct ice_device *ice_dev)
  384. {
  385. unsigned int rev;
  386. unsigned int maj_rev, min_rev, step_rev;
  387. rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
  388. maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
  389. min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
  390. step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
  391. if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
  392. pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
  393. __func__, (unsigned long)ice_dev->mmio,
  394. maj_rev, min_rev, step_rev);
  395. return -ENODEV;
  396. }
  397. ice_dev->ice_hw_version = rev;
  398. dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n",
  399. maj_rev, min_rev, step_rev,
  400. ice_dev->mmio);
  401. return 0;
  402. }
  403. static void qcom_ice_enable_intr(struct ice_device *ice_dev)
  404. {
  405. unsigned int reg;
  406. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
  407. reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
  408. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
  409. /*
  410. * Ensure previous instructions was completed before issuing next
  411. * ICE initialization/optimization instruction
  412. */
  413. mb();
  414. }
  415. static void qcom_ice_disable_intr(struct ice_device *ice_dev)
  416. {
  417. unsigned int reg;
  418. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
  419. reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
  420. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
  421. /*
  422. * Ensure previous instructions was completed before issuing next
  423. * ICE initialization/optimization instruction
  424. */
  425. mb();
  426. }
  427. static irqreturn_t qcom_ice_isr(int isr, void *data)
  428. {
  429. irqreturn_t retval = IRQ_NONE;
  430. u32 status;
  431. struct ice_device *ice_dev = data;
  432. status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
  433. if (status) {
  434. ice_dev->error_cb(ice_dev->host_controller_data, status);
  435. /* Interrupt has been handled. Clear the IRQ */
  436. qcom_ice_writel(ice_dev, status, QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
  437. /* Ensure instruction is completed */
  438. mb();
  439. retval = IRQ_HANDLED;
  440. }
  441. return retval;
  442. }
  443. static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
  444. struct ice_device *ice_dev)
  445. {
  446. int ret = -1;
  447. struct device *dev = &pdev->dev;
  448. struct device_node *np = dev->of_node;
  449. const char *type;
  450. ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
  451. if (ret) {
  452. pr_err("%s: Could not get ICE instance type\n", __func__);
  453. goto out;
  454. }
  455. strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
  456. out:
  457. return;
  458. }
  459. static int qcom_ice_parse_clock_info(struct platform_device *pdev,
  460. struct ice_device *ice_dev)
  461. {
  462. int ret = -1, cnt, i, len;
  463. struct device *dev = &pdev->dev;
  464. struct device_node *np = dev->of_node;
  465. char *name;
  466. struct ice_clk_info *clki;
  467. u32 *clkfreq = NULL;
  468. if (!np)
  469. goto out;
  470. cnt = of_property_count_strings(np, "clock-names");
  471. if (cnt <= 0) {
  472. dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
  473. __func__);
  474. ret = cnt;
  475. goto out;
  476. }
  477. if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
  478. dev_info(dev, "qcom,op-freq-hz property not specified\n");
  479. goto out;
  480. }
  481. len = len/sizeof(*clkfreq);
  482. if (len != cnt)
  483. goto out;
  484. clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
  485. if (!clkfreq) {
  486. ret = -ENOMEM;
  487. goto out;
  488. }
  489. ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
  490. INIT_LIST_HEAD(&ice_dev->clk_list_head);
  491. for (i = 0; i < cnt; i++) {
  492. ret = of_property_read_string_index(np,
  493. "clock-names", i, (const char **)&name);
  494. if (ret)
  495. goto out;
  496. clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
  497. if (!clki) {
  498. ret = -ENOMEM;
  499. goto out;
  500. }
  501. clki->max_freq = clkfreq[i];
  502. clki->name = kstrdup(name, GFP_KERNEL);
  503. list_add_tail(&clki->list, &ice_dev->clk_list_head);
  504. }
  505. out:
  506. if (clkfreq)
  507. devm_kfree(dev, (void *)clkfreq);
  508. return ret;
  509. }
  510. static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
  511. struct ice_device *ice_dev)
  512. {
  513. struct device *dev = &pdev->dev;
  514. int rc = -1;
  515. int irq;
  516. ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  517. if (!ice_dev->res) {
  518. pr_err("%s: No memory available for IORESOURCE\n", __func__);
  519. return -ENOMEM;
  520. }
  521. ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
  522. if (IS_ERR(ice_dev->mmio)) {
  523. rc = PTR_ERR(ice_dev->mmio);
  524. pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
  525. goto out;
  526. }
  527. if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
  528. pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
  529. __func__);
  530. ice_dev->is_regulator_available = false;
  531. } else {
  532. ice_dev->is_regulator_available = true;
  533. }
  534. ice_dev->is_ice_clk_available = of_property_read_bool(
  535. (&pdev->dev)->of_node,
  536. "qcom,enable-ice-clk");
  537. if (ice_dev->is_ice_clk_available) {
  538. rc = qcom_ice_parse_clock_info(pdev, ice_dev);
  539. if (rc) {
  540. pr_err("%s: qcom_ice_parse_clock_info failed (%d)\n",
  541. __func__, rc);
  542. goto err_dev;
  543. }
  544. }
  545. /* ICE interrupts is only relevant for v2.x */
  546. irq = platform_get_irq(pdev, 0);
  547. if (irq >= 0) {
  548. rc = devm_request_irq(dev, irq, qcom_ice_isr, 0, dev_name(dev),
  549. ice_dev);
  550. if (rc) {
  551. pr_err("%s: devm_request_irq irq=%d failed (%d)\n",
  552. __func__, irq, rc);
  553. goto err_dev;
  554. }
  555. ice_dev->irq = irq;
  556. pr_info("ICE IRQ = %d\n", ice_dev->irq);
  557. } else {
  558. dev_dbg(dev, "IRQ resource not available\n");
  559. }
  560. qcom_ice_parse_ice_instance_type(pdev, ice_dev);
  561. return 0;
  562. err_dev:
  563. if (rc && ice_dev->mmio)
  564. devm_iounmap(dev, ice_dev->mmio);
  565. out:
  566. return rc;
  567. }
  568. /*
  569. * ICE HW instance can exist in UFS or eMMC based storage HW
  570. * Userspace does not know what kind of ICE it is dealing with.
  571. * Though userspace can find which storage device it is booting
  572. * from but all kind of storage types dont support ICE from
  573. * beginning. So ICE device is created for user space to ping
  574. * if ICE exist for that kind of storage
  575. */
  576. static const struct file_operations qcom_ice_fops = {
  577. .owner = THIS_MODULE,
  578. };
  579. static int register_ice_device(struct ice_device *ice_dev)
  580. {
  581. int rc = 0;
  582. unsigned int baseminor = 0;
  583. unsigned int count = 1;
  584. struct device *class_dev;
  585. int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
  586. rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
  587. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  588. if (rc < 0) {
  589. pr_err("alloc_chrdev_region failed %d for %s\n", rc,
  590. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  591. return rc;
  592. }
  593. ice_dev->driver_class = class_create(THIS_MODULE,
  594. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  595. if (IS_ERR(ice_dev->driver_class)) {
  596. rc = -ENOMEM;
  597. pr_err("class_create failed %d for %s\n", rc,
  598. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  599. goto exit_unreg_chrdev_region;
  600. }
  601. class_dev = device_create(ice_dev->driver_class, NULL,
  602. ice_dev->device_no, NULL,
  603. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  604. if (!class_dev) {
  605. pr_err("class_device_create failed %d for %s\n", rc,
  606. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  607. rc = -ENOMEM;
  608. goto exit_destroy_class;
  609. }
  610. cdev_init(&ice_dev->cdev, &qcom_ice_fops);
  611. ice_dev->cdev.owner = THIS_MODULE;
  612. rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
  613. if (rc < 0) {
  614. pr_err("cdev_add failed %d for %s\n", rc,
  615. is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
  616. goto exit_destroy_device;
  617. }
  618. return 0;
  619. exit_destroy_device:
  620. device_destroy(ice_dev->driver_class, ice_dev->device_no);
  621. exit_destroy_class:
  622. class_destroy(ice_dev->driver_class);
  623. exit_unreg_chrdev_region:
  624. unregister_chrdev_region(ice_dev->device_no, 1);
  625. return rc;
  626. }
  627. static int qcom_ice_probe(struct platform_device *pdev)
  628. {
  629. struct ice_device *ice_dev;
  630. int rc = 0;
  631. if (!pdev) {
  632. pr_err("%s: Invalid platform_device passed\n",
  633. __func__);
  634. return -EINVAL;
  635. }
  636. ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
  637. if (!ice_dev) {
  638. rc = -ENOMEM;
  639. pr_err("%s: Error %d allocating memory for ICE device:\n",
  640. __func__, rc);
  641. goto out;
  642. }
  643. ice_dev->pdev = &pdev->dev;
  644. if (!ice_dev->pdev) {
  645. rc = -EINVAL;
  646. pr_err("%s: Invalid device passed in platform_device\n",
  647. __func__);
  648. goto err_ice_dev;
  649. }
  650. if (pdev->dev.of_node)
  651. rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
  652. else {
  653. rc = -EINVAL;
  654. pr_err("%s: ICE device node not found\n", __func__);
  655. }
  656. if (rc)
  657. goto err_ice_dev;
  658. pr_debug("%s: Registering ICE device\n", __func__);
  659. rc = register_ice_device(ice_dev);
  660. if (rc) {
  661. pr_err("create character device failed.\n");
  662. goto err_ice_dev;
  663. }
  664. /*
  665. * If ICE is enabled here, it would be waste of power.
  666. * We would enable ICE when first request for crypto
  667. * operation arrives.
  668. */
  669. ice_dev->is_ice_enabled = false;
  670. platform_set_drvdata(pdev, ice_dev);
  671. list_add_tail(&ice_dev->list, &ice_devices);
  672. goto out;
  673. err_ice_dev:
  674. kfree(ice_dev);
  675. out:
  676. return rc;
  677. }
  678. static int qcom_ice_remove(struct platform_device *pdev)
  679. {
  680. struct ice_device *ice_dev;
  681. ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
  682. if (!ice_dev)
  683. return 0;
  684. qcom_ice_disable_intr(ice_dev);
  685. device_init_wakeup(&pdev->dev, false);
  686. if (ice_dev->mmio)
  687. iounmap(ice_dev->mmio);
  688. list_del_init(&ice_dev->list);
  689. kfree(ice_dev);
  690. return 1;
  691. }
  692. static int qcom_ice_suspend(struct platform_device *pdev)
  693. {
  694. return 0;
  695. }
  696. static int qcom_ice_restore_config(void)
  697. {
  698. struct scm_desc desc = {0};
  699. int ret;
  700. /*
  701. * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
  702. * restore config command. This would prevent two calls from HLOS to TZ
  703. * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
  704. * config
  705. */
  706. desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
  707. ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
  708. if (ret)
  709. pr_err("%s: Error: 0x%x\n", __func__, ret);
  710. return ret;
  711. }
  712. static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
  713. {
  714. struct scm_desc desc = {0};
  715. int ret = -1;
  716. /* For ice 3, key configuration needs to be restored in case of reset */
  717. desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
  718. if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
  719. desc.args[0] = QCOM_ICE_SDCC;
  720. if (!strcmp(ice_dev->ice_instance_type, "ufs"))
  721. desc.args[0] = QCOM_ICE_UFS;
  722. ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
  723. if (ret)
  724. pr_err("%s: Error: 0x%x\n", __func__, ret);
  725. return ret;
  726. }
  727. static int qcom_ice_init_clocks(struct ice_device *ice)
  728. {
  729. int ret = -EINVAL;
  730. struct ice_clk_info *clki = NULL;
  731. struct device *dev = ice->pdev;
  732. struct list_head *head = &ice->clk_list_head;
  733. if (!head || list_empty(head)) {
  734. dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
  735. goto out;
  736. }
  737. list_for_each_entry(clki, head, list) {
  738. if (!clki->name)
  739. continue;
  740. clki->clk = devm_clk_get(dev, clki->name);
  741. if (IS_ERR(clki->clk)) {
  742. ret = PTR_ERR(clki->clk);
  743. dev_err(dev, "%s: %s clk get failed, %d\n",
  744. __func__, clki->name, ret);
  745. goto out;
  746. }
  747. /* Not all clocks would have a rate to be set */
  748. ret = 0;
  749. if (clki->max_freq) {
  750. ret = clk_set_rate(clki->clk, clki->max_freq);
  751. if (ret) {
  752. dev_err(dev,
  753. "%s: %s clk set rate(%dHz) failed, %d\n",
  754. __func__, clki->name,
  755. clki->max_freq, ret);
  756. goto out;
  757. }
  758. clki->curr_freq = clki->max_freq;
  759. dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
  760. clki->name, clk_get_rate(clki->clk));
  761. }
  762. }
  763. out:
  764. return ret;
  765. }
  766. static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
  767. {
  768. int ret = 0;
  769. struct ice_clk_info *clki = NULL;
  770. struct device *dev = ice->pdev;
  771. struct list_head *head = &ice->clk_list_head;
  772. if (!head || list_empty(head)) {
  773. dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
  774. ret = -EINVAL;
  775. goto out;
  776. }
  777. if (!ice->is_ice_clk_available) {
  778. dev_err(dev, "%s:ICE Clock not available\n", __func__);
  779. ret = -EINVAL;
  780. goto out;
  781. }
  782. list_for_each_entry(clki, head, list) {
  783. if (!clki->name)
  784. continue;
  785. if (enable)
  786. ret = clk_prepare_enable(clki->clk);
  787. else
  788. clk_disable_unprepare(clki->clk);
  789. if (ret) {
  790. dev_err(dev, "Unable to %s ICE core clk\n",
  791. enable?"enable":"disable");
  792. goto out;
  793. }
  794. }
  795. out:
  796. return ret;
  797. }
  798. static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
  799. {
  800. /* We need to enable source for ICE secure interrupts */
  801. int ret = 0;
  802. u32 regval;
  803. regval = scm_io_read((unsigned long)ice_dev->res +
  804. QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
  805. regval &= ~QCOM_ICE_SEC_IRQ_MASK;
  806. ret = scm_io_write((unsigned long)ice_dev->res +
  807. QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
  808. /*
  809. * Ensure previous instructions was completed before issuing next
  810. * ICE initialization/optimization instruction
  811. */
  812. mb();
  813. if (!ret)
  814. pr_err("%s: failed(0x%x) to init secure ICE config\n",
  815. __func__, ret);
  816. return ret;
  817. }
  818. static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
  819. {
  820. int ret = 0, scm_ret = 0;
  821. /* scm command buffer structure */
  822. struct qcom_scm_cmd_buf {
  823. unsigned int device_id;
  824. unsigned int spare;
  825. } cbuf = {0};
  826. /*
  827. * Ideally, we should check ICE version to decide whether to proceed or
  828. * or not. Since version wont be available when this function is called
  829. * we need to depend upon is_ice_clk_available to decide
  830. */
  831. if (ice_dev->is_ice_clk_available)
  832. goto out;
  833. /*
  834. * Store dev_id in ice_device structure so that emmc/ufs cases can be
  835. * handled properly
  836. */
  837. #define RESTORE_SEC_CFG_CMD 0x2
  838. #define ICE_TZ_DEV_ID 20
  839. cbuf.device_id = ICE_TZ_DEV_ID;
  840. ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
  841. if (ret || scm_ret) {
  842. pr_err("%s: failed, ret %d scm_ret %d\n",
  843. __func__, ret, scm_ret);
  844. if (!ret)
  845. ret = scm_ret;
  846. }
  847. out:
  848. return ret;
  849. }
  850. static int qcom_ice_finish_init(struct ice_device *ice_dev)
  851. {
  852. unsigned int reg;
  853. int err = 0;
  854. if (!ice_dev) {
  855. pr_err("%s: Null data received\n", __func__);
  856. err = -ENODEV;
  857. goto out;
  858. }
  859. if (ice_dev->is_ice_clk_available) {
  860. err = qcom_ice_init_clocks(ice_dev);
  861. if (err)
  862. goto out;
  863. err = qcom_ice_bus_register(ice_dev);
  864. if (err)
  865. goto out;
  866. }
  867. /*
  868. * It is possible that ICE device is not probed when host is probed
  869. * This would cause host probe to be deferred. When probe for host is
  870. * deferred, it can cause power collapse for host and that can wipe
  871. * configurations of host & ice. It is prudent to restore the config
  872. */
  873. err = qcom_ice_update_sec_cfg(ice_dev);
  874. if (err)
  875. goto out;
  876. err = qcom_ice_verify_ice(ice_dev);
  877. if (err)
  878. goto out;
  879. /* if ICE_DISABLE_FUSE is blown, return immediately
  880. * Currently, FORCE HW Keys are also disabled, since
  881. * there is no use case for their usage neither in FDE
  882. * nor in PFE
  883. */
  884. reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
  885. reg &= (ICE_FUSE_SETTING_MASK |
  886. ICE_FORCE_HW_KEY0_SETTING_MASK |
  887. ICE_FORCE_HW_KEY1_SETTING_MASK);
  888. if (reg) {
  889. ice_dev->is_ice_disable_fuse_blown = true;
  890. pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
  891. __func__);
  892. err = -EPERM;
  893. goto out;
  894. }
  895. /* TZ side of ICE driver would handle secure init of ICE HW from v2 */
  896. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
  897. !qcom_ice_secure_ice_init(ice_dev)) {
  898. pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
  899. err = -EFAULT;
  900. goto out;
  901. }
  902. qcom_ice_low_power_mode_enable(ice_dev);
  903. qcom_ice_optimization_enable(ice_dev);
  904. qcom_ice_config_proc_ignore(ice_dev);
  905. qcom_ice_enable_test_bus_config(ice_dev);
  906. qcom_ice_enable(ice_dev);
  907. ice_dev->is_ice_enabled = true;
  908. qcom_ice_enable_intr(ice_dev);
  909. out:
  910. return err;
  911. }
  912. static int qcom_ice_init(struct platform_device *pdev,
  913. void *host_controller_data,
  914. ice_error_cb error_cb)
  915. {
  916. /*
  917. * A completion event for host controller would be triggered upon
  918. * initialization completion
  919. * When ICE is initialized, it would put ICE into Global Bypass mode
  920. * When any request for data transfer is received, it would enable
  921. * the ICE for that particular request
  922. */
  923. struct ice_device *ice_dev;
  924. ice_dev = platform_get_drvdata(pdev);
  925. if (!ice_dev) {
  926. pr_err("%s: invalid device\n", __func__);
  927. return -EINVAL;
  928. }
  929. ice_dev->error_cb = error_cb;
  930. ice_dev->host_controller_data = host_controller_data;
  931. return qcom_ice_finish_init(ice_dev);
  932. }
  933. static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
  934. {
  935. int err = 0;
  936. if (ice_dev->is_ice_disable_fuse_blown) {
  937. err = -EPERM;
  938. goto out;
  939. }
  940. if (ice_dev->is_ice_enabled) {
  941. /*
  942. * ICE resets into global bypass mode with optimization and
  943. * low power mode disabled. Hence we need to redo those seq's.
  944. */
  945. qcom_ice_low_power_mode_enable(ice_dev);
  946. qcom_ice_enable_test_bus_config(ice_dev);
  947. qcom_ice_optimization_enable(ice_dev);
  948. qcom_ice_enable(ice_dev);
  949. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
  950. /*
  951. * When ICE resets, it wipes all of keys from LUTs
  952. * ICE driver should call TZ to restore keys
  953. */
  954. if (qcom_ice_restore_config()) {
  955. err = -EFAULT;
  956. goto out;
  957. }
  958. /*
  959. * ICE looses its key configuration when UFS is reset,
  960. * restore it
  961. */
  962. } else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
  963. err = qcom_ice_restore_key_config(ice_dev);
  964. if (err)
  965. goto out;
  966. /*
  967. * for PFE case, clear the cached ICE key table,
  968. * this will force keys to be reconfigured
  969. * per each next transaction
  970. */
  971. pfk_clear_on_reset();
  972. }
  973. }
  974. ice_dev->ice_reset_complete_time = ktime_get();
  975. out:
  976. return err;
  977. }
  978. static int qcom_ice_resume(struct platform_device *pdev)
  979. {
  980. /*
  981. * ICE is power collapsed when storage controller is power collapsed
  982. * ICE resume function is responsible for:
  983. * ICE HW enabling sequence
  984. * Key restoration
  985. * A completion event should be triggered
  986. * upon resume completion
  987. * Storage driver will be fully operational only
  988. * after receiving this event
  989. */
  990. struct ice_device *ice_dev;
  991. ice_dev = platform_get_drvdata(pdev);
  992. if (!ice_dev)
  993. return -EINVAL;
  994. if (ice_dev->is_ice_clk_available) {
  995. /*
  996. * Storage is calling this function after power collapse which
  997. * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
  998. * ICE
  999. */
  1000. qcom_ice_enable(ice_dev);
  1001. }
  1002. return 0;
  1003. }
  1004. static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
  1005. {
  1006. u32 reg = 0x1;
  1007. u32 val;
  1008. u8 bus_selector;
  1009. u8 stream_selector;
  1010. pr_err("ICE TEST BUS DUMP:\n");
  1011. for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) {
  1012. reg = 0x1; /* enable test bus */
  1013. reg |= bus_selector << 28;
  1014. if (bus_selector == 0xD)
  1015. continue;
  1016. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
  1017. /*
  1018. * make sure test bus selector is written before reading
  1019. * the test bus register
  1020. */
  1021. mb();
  1022. val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
  1023. pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
  1024. reg, val);
  1025. }
  1026. pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
  1027. for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
  1028. reg = 0xD0000001; /* enable stream test bus */
  1029. reg |= stream_selector << 16;
  1030. qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
  1031. /*
  1032. * make sure test bus selector is written before reading
  1033. * the test bus register
  1034. */
  1035. mb();
  1036. val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
  1037. pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
  1038. reg, val);
  1039. }
  1040. }
  1041. static void qcom_ice_debug(struct platform_device *pdev)
  1042. {
  1043. struct ice_device *ice_dev;
  1044. if (!pdev) {
  1045. pr_err("%s: Invalid params passed\n", __func__);
  1046. goto out;
  1047. }
  1048. ice_dev = platform_get_drvdata(pdev);
  1049. if (!ice_dev) {
  1050. pr_err("%s: No ICE device available\n", __func__);
  1051. goto out;
  1052. }
  1053. if (!ice_dev->is_ice_enabled) {
  1054. pr_err("%s: ICE device is not enabled\n", __func__);
  1055. goto out;
  1056. }
  1057. pr_err("%s: =========== REGISTER DUMP (%pK)===========\n",
  1058. ice_dev->ice_instance_type, ice_dev);
  1059. pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
  1060. ice_dev->ice_instance_type,
  1061. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
  1062. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
  1063. pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n",
  1064. ice_dev->ice_instance_type,
  1065. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
  1066. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
  1067. pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n",
  1068. ice_dev->ice_instance_type,
  1069. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
  1070. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
  1071. pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n",
  1072. ice_dev->ice_instance_type,
  1073. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
  1074. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
  1075. pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n",
  1076. ice_dev->ice_instance_type,
  1077. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
  1078. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
  1079. pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n",
  1080. ice_dev->ice_instance_type,
  1081. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
  1082. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
  1083. if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
  1084. pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
  1085. ice_dev->ice_instance_type,
  1086. qcom_ice_readl(ice_dev,
  1087. QCOM_ICE_INVALID_CCFG_ERR_STTS));
  1088. }
  1089. if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
  1090. ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
  1091. (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
  1092. pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n",
  1093. ice_dev->ice_instance_type,
  1094. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
  1095. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
  1096. }
  1097. pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n",
  1098. ice_dev->ice_instance_type,
  1099. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
  1100. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
  1101. pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
  1102. ice_dev->ice_instance_type,
  1103. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
  1104. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
  1105. pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
  1106. ice_dev->ice_instance_type,
  1107. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
  1108. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
  1109. pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
  1110. ice_dev->ice_instance_type,
  1111. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
  1112. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
  1113. pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
  1114. ice_dev->ice_instance_type,
  1115. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
  1116. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
  1117. pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
  1118. ice_dev->ice_instance_type,
  1119. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
  1120. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
  1121. pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
  1122. ice_dev->ice_instance_type,
  1123. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
  1124. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
  1125. pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
  1126. ice_dev->ice_instance_type,
  1127. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
  1128. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
  1129. pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
  1130. ice_dev->ice_instance_type,
  1131. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
  1132. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
  1133. pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
  1134. ice_dev->ice_instance_type,
  1135. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
  1136. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
  1137. pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
  1138. ice_dev->ice_instance_type,
  1139. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
  1140. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
  1141. pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
  1142. ice_dev->ice_instance_type,
  1143. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
  1144. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
  1145. pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
  1146. ice_dev->ice_instance_type,
  1147. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
  1148. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
  1149. pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
  1150. ice_dev->ice_instance_type,
  1151. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
  1152. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
  1153. pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
  1154. ice_dev->ice_instance_type,
  1155. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
  1156. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
  1157. pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
  1158. ice_dev->ice_instance_type,
  1159. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
  1160. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
  1161. pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
  1162. ice_dev->ice_instance_type,
  1163. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
  1164. qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
  1165. qcom_ice_dump_test_bus(ice_dev);
  1166. pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
  1167. ice_dev->ice_instance_type,
  1168. (unsigned long long)ice_dev->ice_reset_start_time.tv64,
  1169. (unsigned long long)ice_dev->ice_reset_complete_time.tv64);
  1170. if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
  1171. ice_dev->ice_reset_start_time)) > 0)
  1172. pr_err("%s: Time taken for reset: %lu\n",
  1173. ice_dev->ice_instance_type,
  1174. (unsigned long)ktime_to_us(ktime_sub(
  1175. ice_dev->ice_reset_complete_time,
  1176. ice_dev->ice_reset_start_time)));
  1177. out:
  1178. return;
  1179. }
  1180. static int qcom_ice_reset(struct platform_device *pdev)
  1181. {
  1182. struct ice_device *ice_dev;
  1183. ice_dev = platform_get_drvdata(pdev);
  1184. if (!ice_dev) {
  1185. pr_err("%s: INVALID ice_dev\n", __func__);
  1186. return -EINVAL;
  1187. }
  1188. ice_dev->ice_reset_start_time = ktime_get();
  1189. return qcom_ice_finish_power_collapse(ice_dev);
  1190. }
  1191. static int qcom_ice_config_start(struct platform_device *pdev,
  1192. struct request *req,
  1193. struct ice_data_setting *setting, bool async)
  1194. {
  1195. struct ice_crypto_setting *crypto_data;
  1196. struct ice_crypto_setting pfk_crypto_data = {0};
  1197. union map_info *info;
  1198. int ret = 0;
  1199. bool is_pfe = false;
  1200. if (!pdev || !req) {
  1201. pr_err("%s: Invalid params passed\n", __func__);
  1202. return -EINVAL;
  1203. }
  1204. /*
  1205. * It is not an error to have a request with no bio
  1206. * Such requests must bypass ICE. So first set bypass and then
  1207. * return if bio is not available in request
  1208. */
  1209. if (setting) {
  1210. setting->encr_bypass = true;
  1211. setting->decr_bypass = true;
  1212. }
  1213. if (!req->bio) {
  1214. /* It is not an error to have a request with no bio */
  1215. return 0;
  1216. }
  1217. //pr_err("%s bio is %pK\n", __func__, req->bio);
  1218. ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
  1219. if (is_pfe) {
  1220. if (ret) {
  1221. if (ret != -EBUSY && ret != -EAGAIN)
  1222. pr_err("%s error %d while configuring ice key for PFE\n",
  1223. __func__, ret);
  1224. return ret;
  1225. }
  1226. return qti_ice_setting_config(req, pdev,
  1227. &pfk_crypto_data, setting);
  1228. }
  1229. /*
  1230. * info field in req->end_io_data could be used by mulitple dm or
  1231. * non-dm entities. To ensure that we are running operation on dm
  1232. * based request, check BIO_DONT_FREE flag
  1233. */
  1234. if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
  1235. info = dm_get_rq_mapinfo(req);
  1236. if (!info) {
  1237. pr_debug("%s info not available in request\n",
  1238. __func__);
  1239. return 0;
  1240. }
  1241. crypto_data = (struct ice_crypto_setting *)info->ptr;
  1242. if (!crypto_data) {
  1243. pr_err("%s crypto_data not available in request\n",
  1244. __func__);
  1245. return -EINVAL;
  1246. }
  1247. return qti_ice_setting_config(req, pdev,
  1248. crypto_data, setting);
  1249. }
  1250. /*
  1251. * It is not an error. If target is not req-crypt based, all request
  1252. * from storage driver would come here to check if there is any ICE
  1253. * setting required
  1254. */
  1255. return 0;
  1256. }
  1257. EXPORT_SYMBOL(qcom_ice_config_start);
  1258. static int qcom_ice_config_end(struct request *req)
  1259. {
  1260. int ret = 0;
  1261. bool is_pfe = false;
  1262. if (!req) {
  1263. pr_err("%s: Invalid params passed\n", __func__);
  1264. return -EINVAL;
  1265. }
  1266. if (!req->bio) {
  1267. /* It is not an error to have a request with no bio */
  1268. return 0;
  1269. }
  1270. ret = pfk_load_key_end(req->bio, &is_pfe);
  1271. if (is_pfe) {
  1272. if (ret != 0)
  1273. pr_err("%s error %d while end configuring ice key for PFE\n",
  1274. __func__, ret);
  1275. return ret;
  1276. }
  1277. return 0;
  1278. }
  1279. EXPORT_SYMBOL(qcom_ice_config_end);
  1280. static int qcom_ice_status(struct platform_device *pdev)
  1281. {
  1282. struct ice_device *ice_dev;
  1283. unsigned int test_bus_reg_status;
  1284. if (!pdev) {
  1285. pr_err("%s: Invalid params passed\n", __func__);
  1286. return -EINVAL;
  1287. }
  1288. ice_dev = platform_get_drvdata(pdev);
  1289. if (!ice_dev)
  1290. return -ENODEV;
  1291. if (!ice_dev->is_ice_enabled)
  1292. return -ENODEV;
  1293. test_bus_reg_status = qcom_ice_readl(ice_dev,
  1294. QCOM_ICE_REGS_TEST_BUS_REG);
  1295. return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
  1296. }
  1297. struct qcom_ice_variant_ops qcom_ice_ops = {
  1298. .name = "qcom",
  1299. .init = qcom_ice_init,
  1300. .reset = qcom_ice_reset,
  1301. .resume = qcom_ice_resume,
  1302. .suspend = qcom_ice_suspend,
  1303. .config_start = qcom_ice_config_start,
  1304. .config_end = qcom_ice_config_end,
  1305. .status = qcom_ice_status,
  1306. .debug = qcom_ice_debug,
  1307. };
  1308. struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
  1309. {
  1310. struct platform_device *ice_pdev = NULL;
  1311. struct ice_device *ice_dev = NULL;
  1312. if (!node) {
  1313. pr_err("%s: invalid node %pK", __func__, node);
  1314. goto out;
  1315. }
  1316. if (!of_device_is_available(node)) {
  1317. pr_err("%s: device unavailable\n", __func__);
  1318. goto out;
  1319. }
  1320. if (list_empty(&ice_devices)) {
  1321. pr_err("%s: invalid device list\n", __func__);
  1322. ice_pdev = ERR_PTR(-EPROBE_DEFER);
  1323. goto out;
  1324. }
  1325. list_for_each_entry(ice_dev, &ice_devices, list) {
  1326. if (ice_dev->pdev->of_node == node) {
  1327. pr_info("%s: found ice device %pK\n", __func__,
  1328. ice_dev);
  1329. ice_pdev = to_platform_device(ice_dev->pdev);
  1330. break;
  1331. }
  1332. }
  1333. if (ice_pdev)
  1334. pr_info("%s: matching platform device %pK\n", __func__,
  1335. ice_pdev);
  1336. out:
  1337. return ice_pdev;
  1338. }
  1339. static struct ice_device *get_ice_device_from_storage_type
  1340. (const char *storage_type)
  1341. {
  1342. struct ice_device *ice_dev = NULL;
  1343. if (list_empty(&ice_devices)) {
  1344. pr_err("%s: invalid device list\n", __func__);
  1345. ice_dev = ERR_PTR(-EPROBE_DEFER);
  1346. goto out;
  1347. }
  1348. list_for_each_entry(ice_dev, &ice_devices, list) {
  1349. if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
  1350. pr_debug("%s: ice device %pK\n", __func__, ice_dev);
  1351. return ice_dev;
  1352. }
  1353. }
  1354. out:
  1355. return NULL;
  1356. }
  1357. static int enable_ice_setup(struct ice_device *ice_dev)
  1358. {
  1359. int ret = -1, vote;
  1360. /* Setup Regulator */
  1361. if (ice_dev->is_regulator_available) {
  1362. if (qcom_ice_get_vreg(ice_dev)) {
  1363. pr_err("%s: Could not get regulator\n", __func__);
  1364. goto out;
  1365. }
  1366. ret = regulator_enable(ice_dev->reg);
  1367. if (ret) {
  1368. pr_err("%s:%pK: Could not enable regulator\n",
  1369. __func__, ice_dev);
  1370. goto out;
  1371. }
  1372. }
  1373. /* Setup Clocks */
  1374. if (qcom_ice_enable_clocks(ice_dev, true)) {
  1375. pr_err("%s:%pK:%s Could not enable clocks\n", __func__,
  1376. ice_dev, ice_dev->ice_instance_type);
  1377. goto out_reg;
  1378. }
  1379. /* Setup Bus Vote */
  1380. vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
  1381. if (vote < 0)
  1382. goto out_clocks;
  1383. ret = qcom_ice_set_bus_vote(ice_dev, vote);
  1384. if (ret) {
  1385. pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
  1386. goto out_clocks;
  1387. }
  1388. return ret;
  1389. out_clocks:
  1390. qcom_ice_enable_clocks(ice_dev, false);
  1391. out_reg:
  1392. if (ice_dev->is_regulator_available) {
  1393. if (qcom_ice_get_vreg(ice_dev)) {
  1394. pr_err("%s: Could not get regulator\n", __func__);
  1395. goto out;
  1396. }
  1397. ret = regulator_disable(ice_dev->reg);
  1398. if (ret) {
  1399. pr_err("%s:%pK: Could not disable regulator\n",
  1400. __func__, ice_dev);
  1401. goto out;
  1402. }
  1403. }
  1404. out:
  1405. return ret;
  1406. }
  1407. static int disable_ice_setup(struct ice_device *ice_dev)
  1408. {
  1409. int ret = -1, vote;
  1410. /* Setup Bus Vote */
  1411. vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
  1412. if (vote < 0) {
  1413. pr_err("%s:%pK: Unable to get bus vote\n", __func__, ice_dev);
  1414. goto out_disable_clocks;
  1415. }
  1416. ret = qcom_ice_set_bus_vote(ice_dev, vote);
  1417. if (ret)
  1418. pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
  1419. out_disable_clocks:
  1420. /* Setup Clocks */
  1421. if (qcom_ice_enable_clocks(ice_dev, false))
  1422. pr_err("%s:%pK:%s Could not disable clocks\n", __func__,
  1423. ice_dev, ice_dev->ice_instance_type);
  1424. /* Setup Regulator */
  1425. if (ice_dev->is_regulator_available) {
  1426. if (qcom_ice_get_vreg(ice_dev)) {
  1427. pr_err("%s: Could not get regulator\n", __func__);
  1428. goto out;
  1429. }
  1430. ret = regulator_disable(ice_dev->reg);
  1431. if (ret) {
  1432. pr_err("%s:%pK: Could not disable regulator\n",
  1433. __func__, ice_dev);
  1434. goto out;
  1435. }
  1436. }
  1437. out:
  1438. return ret;
  1439. }
  1440. int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
  1441. {
  1442. int ret = -1;
  1443. struct ice_device *ice_dev = NULL;
  1444. ice_dev = get_ice_device_from_storage_type(storage_type);
  1445. if (ice_dev == ERR_PTR(-EPROBE_DEFER))
  1446. return -EPROBE_DEFER;
  1447. if (!ice_dev || (ice_dev->is_ice_enabled == false))
  1448. return ret;
  1449. if (enable)
  1450. return enable_ice_setup(ice_dev);
  1451. else
  1452. return disable_ice_setup(ice_dev);
  1453. }
  1454. struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
  1455. {
  1456. return &qcom_ice_ops;
  1457. }
  1458. EXPORT_SYMBOL(qcom_ice_get_variant_ops);
  1459. /* Following struct is required to match device with driver from dts file */
  1460. static const struct of_device_id qcom_ice_match[] = {
  1461. { .compatible = "qcom,ice" },
  1462. {},
  1463. };
  1464. MODULE_DEVICE_TABLE(of, qcom_ice_match);
  1465. static struct platform_driver qcom_ice_driver = {
  1466. .probe = qcom_ice_probe,
  1467. .remove = qcom_ice_remove,
  1468. .driver = {
  1469. .owner = THIS_MODULE,
  1470. .name = "qcom_ice",
  1471. .of_match_table = qcom_ice_match,
  1472. },
  1473. };
  1474. module_platform_driver(qcom_ice_driver);
  1475. MODULE_LICENSE("GPL v2");
  1476. MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");