msm_smem.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668
  1. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/err.h>
  14. #include <linux/init.h>
  15. #include <linux/ipc_logging.h>
  16. #include <linux/kernel.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/notifier.h>
  19. #include <linux/of.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/printk.h>
  23. #include <linux/slab.h>
  24. #include <linux/stat.h>
  25. #include <soc/qcom/subsystem_notif.h>
  26. #include <soc/qcom/subsystem_restart.h>
  27. #include <soc/qcom/ramdump.h>
  28. #include <soc/qcom/scm.h>
  29. #include <soc/qcom/smem.h>
  30. #include "smem_private.h"
  31. #define MODEM_SBL_VERSION_INDEX 7
  32. #define SMEM_VERSION_INFO_SIZE (32 * 4)
  33. #define SMEM_VERSION 0x000B
  34. enum {
  35. MSM_SMEM_DEBUG = 1U << 0,
  36. MSM_SMEM_INFO = 1U << 1,
  37. };
  38. static int msm_smem_debug_mask = MSM_SMEM_INFO;
  39. module_param_named(debug_mask, msm_smem_debug_mask,
  40. int, S_IRUGO | S_IWUSR | S_IWGRP);
  41. static void *smem_ipc_log_ctx;
  42. #define NUM_LOG_PAGES 4
  43. #define IPC_LOG(x...) do { \
  44. if (smem_ipc_log_ctx) \
  45. ipc_log_string(smem_ipc_log_ctx, x); \
  46. } while (0)
  47. #define LOG_ERR(x...) do { \
  48. pr_err(x); \
  49. IPC_LOG(x); \
  50. } while (0)
  51. #define SMEM_DBG(x...) do { \
  52. if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
  53. IPC_LOG(x); \
  54. } while (0)
  55. #define SMEM_INFO(x...) do { \
  56. if (msm_smem_debug_mask & MSM_SMEM_INFO) \
  57. IPC_LOG(x); \
  58. } while (0)
  59. #define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
  60. static void *smem_ram_base;
  61. static resource_size_t smem_ram_size;
  62. static phys_addr_t smem_ram_phys;
  63. static remote_spinlock_t remote_spinlock;
  64. static uint32_t num_smem_areas;
  65. static struct smem_area *smem_areas;
  66. static struct ramdump_segment *smem_ramdump_segments;
  67. static int spinlocks_initialized;
  68. static void *smem_ramdump_dev;
  69. static DEFINE_MUTEX(spinlock_init_lock);
  70. static DEFINE_SPINLOCK(smem_init_check_lock);
  71. static struct device *smem_dev;
  72. static int smem_module_inited;
  73. static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
  74. static DEFINE_MUTEX(smem_module_init_notifier_lock);
  75. static bool probe_done;
  76. uint32_t smem_max_items;
  77. /* smem security feature components */
  78. #define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
  79. #define SMEM_TOC_MAX_EXCLUSIONS 4
  80. #define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
  81. #define SMEM_ALLOCATION_CANARY 0xa5a5
  82. struct smem_toc_entry {
  83. uint32_t offset;
  84. uint32_t size;
  85. uint32_t flags;
  86. uint16_t host0;
  87. uint16_t host1;
  88. uint32_t size_cacheline;
  89. uint32_t reserved[3];
  90. uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
  91. };
  92. struct smem_toc {
  93. /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
  94. uint32_t identifier;
  95. uint32_t version;
  96. uint32_t num_entries;
  97. uint32_t reserved[5];
  98. struct smem_toc_entry entry[];
  99. };
  100. struct smem_partition_header {
  101. /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
  102. uint32_t identifier;
  103. uint16_t host0;
  104. uint16_t host1;
  105. uint32_t size;
  106. uint32_t offset_free_uncached;
  107. uint32_t offset_free_cached;
  108. uint32_t reserved[3];
  109. };
  110. struct smem_partition_allocation_header {
  111. /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
  112. uint16_t canary;
  113. uint16_t smem_type;
  114. uint32_t size; /* includes padding bytes */
  115. uint16_t padding_data;
  116. uint16_t padding_hdr;
  117. uint32_t reserved[1];
  118. };
  119. struct smem_partition_info {
  120. uint32_t partition_num;
  121. uint32_t offset;
  122. uint32_t size_cacheline;
  123. };
  124. static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
  125. #define SMEM_COMM_PART_VERSION 0x000C
  126. #define SMEM_COMM_HOST 0xFFFE
  127. static bool use_comm_partition;
  128. static struct smem_partition_info comm_partition;
  129. /* end smem security feature components */
  130. /* Identifier for the SMEM target info struct. */
  131. #define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
  132. struct smem_targ_info_type {
  133. /* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
  134. uint32_t identifier;
  135. uint32_t size;
  136. phys_addr_t phys_base_addr;
  137. uint32_t max_items;
  138. };
  139. struct restart_notifier_block {
  140. unsigned int processor;
  141. char *name;
  142. struct notifier_block nb;
  143. };
  144. static int restart_notifier_cb(struct notifier_block *this,
  145. unsigned long code,
  146. void *data);
  147. static struct restart_notifier_block restart_notifiers[] = {
  148. {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
  149. {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
  150. {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
  151. {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
  152. {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
  153. {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
  154. {SMEM_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
  155. };
  156. static int init_smem_remote_spinlock(void);
  157. /**
  158. * smem_get_toc() - Used for getting partitions TOC
  159. *
  160. * @return - Base address off partitions TOC
  161. *
  162. * Helper function to get base address of partition TOC,
  163. * that is present in top 4K of first smem region.
  164. */
  165. static struct smem_toc __iomem *smem_get_toc(void)
  166. {
  167. return smem_areas[0].virt_addr +
  168. smem_areas[0].size - 4 * 1024;
  169. }
  170. /**
  171. * is_probe_done() - Did the probe function successfully complete
  172. *
  173. * @return - true if probe successfully completed, false if otherwise
  174. *
  175. * Helper function for EPROBE_DEFER support. If this function returns false,
  176. * the calling function should immediately return -EPROBE_DEFER.
  177. */
  178. static bool is_probe_done(void)
  179. {
  180. return probe_done;
  181. }
  182. /**
  183. * smem_phys_to_virt() - Convert a physical base and offset to virtual address
  184. *
  185. * @base: physical base address to check
  186. * @offset: offset from the base to get the final address
  187. * @returns: virtual SMEM address; NULL for failure
  188. *
  189. * Takes a physical address and an offset and checks if the resulting physical
  190. * address would fit into one of the smem regions. If so, returns the
  191. * corresponding virtual address. Otherwise returns NULL.
  192. */
  193. static void *smem_phys_to_virt(phys_addr_t base, unsigned int offset)
  194. {
  195. int i;
  196. phys_addr_t phys_addr;
  197. resource_size_t size;
  198. if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
  199. return NULL;
  200. if (!smem_areas) {
  201. /*
  202. * Early boot - no area configuration yet, so default
  203. * to using the main memory region.
  204. *
  205. * To remove the MSM_SHARED_RAM_BASE and the static
  206. * mapping of SMEM in the future, add dump_stack()
  207. * to identify the early callers of smem_get_entry()
  208. * (which calls this function) and replace those calls
  209. * with a new function that knows how to lookup the
  210. * SMEM base address before SMEM has been probed.
  211. */
  212. phys_addr = smem_ram_phys;
  213. size = smem_ram_size;
  214. if (base >= phys_addr && base + offset < phys_addr + size) {
  215. if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
  216. (uintptr_t)smem_ram_base, offset)) {
  217. SMEM_INFO("%s: overflow %p %x\n", __func__,
  218. smem_ram_base, offset);
  219. return NULL;
  220. }
  221. return smem_ram_base + offset;
  222. } else {
  223. return NULL;
  224. }
  225. }
  226. for (i = 0; i < num_smem_areas; ++i) {
  227. phys_addr = smem_areas[i].phys_addr;
  228. size = smem_areas[i].size;
  229. if (base < phys_addr || base + offset >= phys_addr + size)
  230. continue;
  231. if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
  232. (uintptr_t)smem_areas[i].virt_addr, offset)) {
  233. SMEM_INFO("%s: overflow %p %x\n", __func__,
  234. smem_areas[i].virt_addr, offset);
  235. return NULL;
  236. }
  237. return smem_areas[i].virt_addr + offset;
  238. }
  239. return NULL;
  240. }
  241. /**
  242. * smem_virt_to_phys() - Convert SMEM address to physical address.
  243. *
  244. * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
  245. * @returns: Physical address (or NULL if there is a failure)
  246. *
  247. * This function should only be used if an SMEM item needs to be handed
  248. * off to a DMA engine. This function will not return a version of EPROBE_DEFER
  249. * if the driver is not ready since the caller should obtain @smem_address from
  250. * one of the other public APIs and get EPROBE_DEFER at that time, if
  251. * applicable.
  252. */
  253. phys_addr_t smem_virt_to_phys(void *smem_address)
  254. {
  255. phys_addr_t phys_addr = 0;
  256. int i;
  257. void *vend;
  258. if (!smem_areas)
  259. return phys_addr;
  260. for (i = 0; i < num_smem_areas; ++i) {
  261. vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
  262. if (smem_address >= smem_areas[i].virt_addr &&
  263. smem_address < vend) {
  264. phys_addr = smem_address - smem_areas[i].virt_addr;
  265. phys_addr += smem_areas[i].phys_addr;
  266. break;
  267. }
  268. }
  269. return phys_addr;
  270. }
  271. EXPORT_SYMBOL(smem_virt_to_phys);
  272. /**
  273. * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
  274. *
  275. * @id: ID of SMEM item
  276. * @size: Pointer to size variable for storing the result
  277. * @skip_init_check: True means do not verify that SMEM has been initialized
  278. * @use_rspinlock: True to use the remote spinlock
  279. * @returns: Pointer to SMEM item or NULL if it doesn't exist
  280. */
  281. static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
  282. bool skip_init_check, bool use_rspinlock)
  283. {
  284. struct smem_shared *shared = smem_ram_base;
  285. struct smem_heap_entry *toc = shared->heap_toc;
  286. int use_spinlocks = spinlocks_initialized && use_rspinlock;
  287. void *ret = 0;
  288. unsigned long flags = 0;
  289. uint32_t e_size;
  290. int rc;
  291. if (!skip_init_check && !smem_initialized_check())
  292. return ret;
  293. if (id >= smem_max_items)
  294. return ret;
  295. if (use_spinlocks) {
  296. do {
  297. rc = remote_spin_trylock_irqsave(&remote_spinlock,
  298. flags);
  299. } while (!rc);
  300. }
  301. /* toc is in device memory and cannot be speculatively accessed */
  302. if (toc[id].allocated) {
  303. phys_addr_t phys_base;
  304. e_size = toc[id].size;
  305. if (e_size > smem_ram_size)
  306. return ret;
  307. *size = e_size;
  308. barrier();
  309. phys_base = toc[id].reserved & BASE_ADDR_MASK;
  310. if (!phys_base)
  311. phys_base = smem_ram_phys;
  312. ret = smem_phys_to_virt(phys_base, toc[id].offset);
  313. } else {
  314. *size = 0;
  315. }
  316. if (use_spinlocks)
  317. remote_spin_unlock_irqrestore(&remote_spinlock, flags);
  318. return ret;
  319. }
  320. /**
  321. * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
  322. * security support
  323. *
  324. * @id: ID of SMEM item
  325. * @size: Pointer to size variable for storing the result
  326. * @to_proc: SMEM host that shares the item with apps
  327. * @flags: Item attribute flags
  328. * @skip_init_check: True means do not verify that SMEM has been initialized
  329. * @use_rspinlock: True to use the remote spinlock
  330. * @returns: Pointer to SMEM item or NULL if it doesn't exist
  331. */
  332. static void *__smem_get_entry_secure(unsigned int id,
  333. unsigned int *size,
  334. unsigned int to_proc,
  335. unsigned int flags,
  336. bool skip_init_check,
  337. bool use_rspinlock)
  338. {
  339. struct smem_partition_allocation_header *alloc_hdr;
  340. struct smem_partition_header *hdr;
  341. uint32_t offset_free_uncached;
  342. struct smem_toc __iomem *toc;
  343. uint32_t offset_free_cached;
  344. unsigned long lflags = 0;
  345. uint32_t partition_size;
  346. uint32_t partition_num;
  347. uint32_t padding_data;
  348. uint32_t padding_hdr;
  349. uint32_t a_hdr_size;
  350. uint32_t item_size;
  351. void *item = NULL;
  352. int rc;
  353. SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
  354. flags, skip_init_check, use_rspinlock);
  355. if (!skip_init_check && !smem_initialized_check())
  356. return NULL;
  357. if (id >= smem_max_items) {
  358. SMEM_INFO("%s: invalid id %d\n", __func__, id);
  359. return NULL;
  360. }
  361. if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
  362. SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
  363. to_proc);
  364. return NULL;
  365. }
  366. toc = smem_get_toc();
  367. if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
  368. if (use_comm_partition) {
  369. partition_num = comm_partition.partition_num;
  370. partition_size =
  371. readl_relaxed(&toc->entry[partition_num].size);
  372. hdr = smem_areas[0].virt_addr + comm_partition.offset;
  373. } else {
  374. return __smem_get_entry_nonsecure(id, size,
  375. skip_init_check, use_rspinlock);
  376. }
  377. } else {
  378. partition_num = partitions[to_proc].partition_num;
  379. partition_size = readl_relaxed(&toc->entry[partition_num].size);
  380. hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
  381. }
  382. if (unlikely(!spinlocks_initialized)) {
  383. rc = init_smem_remote_spinlock();
  384. if (unlikely(rc)) {
  385. SMEM_INFO(
  386. "%s: id:%u remote spinlock init failed %d\n",
  387. __func__, id, rc);
  388. return NULL;
  389. }
  390. }
  391. if (use_rspinlock) {
  392. do {
  393. rc = remote_spin_trylock_irqsave(&remote_spinlock,
  394. lflags);
  395. } while (!rc);
  396. }
  397. if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
  398. LOG_ERR(
  399. "%s: SMEM corruption detected. Partition %d to %d at %p\n",
  400. __func__,
  401. partition_num,
  402. to_proc,
  403. hdr);
  404. BUG();
  405. }
  406. if (flags & SMEM_ITEM_CACHED_FLAG) {
  407. a_hdr_size = ALIGN(sizeof(*alloc_hdr),
  408. partitions[to_proc].size_cacheline);
  409. offset_free_cached = hdr->offset_free_cached;
  410. if (WARN_ON(offset_free_cached > partition_size))
  411. return NULL;
  412. for (alloc_hdr = (void *)(hdr) + partition_size - a_hdr_size;
  413. (void *)(alloc_hdr) > (void *)(hdr) +
  414. offset_free_cached;
  415. alloc_hdr = (void *)(alloc_hdr) -
  416. item_size - a_hdr_size) {
  417. item_size = alloc_hdr->size;
  418. padding_data = alloc_hdr->padding_data;
  419. if (WARN_ON(padding_data > item_size
  420. || item_size > partition_size))
  421. return NULL;
  422. if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
  423. LOG_ERR(
  424. "%s: SMEM corruption detected. Partition %d to %d at %p\n",
  425. __func__,
  426. partition_num,
  427. to_proc,
  428. alloc_hdr);
  429. BUG();
  430. }
  431. if (alloc_hdr->smem_type == id) {
  432. /* 8 byte alignment to match legacy */
  433. *size = ALIGN(item_size - padding_data, 8);
  434. item = (void *)(alloc_hdr) - item_size;
  435. break;
  436. }
  437. }
  438. } else {
  439. offset_free_uncached = hdr->offset_free_uncached;
  440. if (WARN_ON(offset_free_uncached > partition_size))
  441. return NULL;
  442. for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
  443. (void *)(alloc_hdr) < (void *)(hdr) +
  444. offset_free_uncached;
  445. alloc_hdr = (void *)(alloc_hdr) +
  446. sizeof(*alloc_hdr) +
  447. padding_hdr +
  448. item_size) {
  449. padding_hdr = alloc_hdr->padding_hdr;
  450. padding_data = alloc_hdr->padding_data;
  451. item_size = alloc_hdr->size;
  452. if (WARN_ON(padding_hdr > partition_size
  453. || item_size > partition_size
  454. || padding_data > item_size))
  455. return NULL;
  456. if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
  457. LOG_ERR(
  458. "%s: SMEM corruption detected. Partition %d to %d at %p\n",
  459. __func__,
  460. partition_num,
  461. to_proc,
  462. alloc_hdr);
  463. BUG();
  464. }
  465. if (alloc_hdr->smem_type == id) {
  466. /* 8 byte alignment to match legacy */
  467. *size = ALIGN(item_size - padding_data, 8);
  468. item = (void *)(alloc_hdr) +
  469. sizeof(*alloc_hdr) +
  470. padding_hdr;
  471. break;
  472. }
  473. }
  474. }
  475. if (use_rspinlock)
  476. remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
  477. return item;
  478. }
  479. static void *__smem_find(unsigned int id, unsigned int size_in,
  480. bool skip_init_check)
  481. {
  482. unsigned int size;
  483. void *ptr;
  484. ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
  485. if (!ptr)
  486. return 0;
  487. size_in = ALIGN(size_in, 8);
  488. if (size_in != size) {
  489. SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
  490. id, size_in, size);
  491. return 0;
  492. }
  493. return ptr;
  494. }
  495. /**
  496. * smem_find - Find existing item with security support
  497. *
  498. * @id: ID of SMEM item
  499. * @size_in: Size of the SMEM item
  500. * @to_proc: SMEM host that shares the item with apps
  501. * @flags: Item attribute flags
  502. * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
  503. * if the driver is not ready
  504. */
  505. void *smem_find(unsigned int id, unsigned int size_in, unsigned int to_proc,
  506. unsigned int flags)
  507. {
  508. unsigned int size;
  509. void *ptr;
  510. SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
  511. flags);
  512. /*
  513. * Handle the circular dependecy between SMEM and software implemented
  514. * remote spinlocks. SMEM must initialize the remote spinlocks in
  515. * probe() before it is done. EPROBE_DEFER handling will not resolve
  516. * this code path, so we must be intellegent to know that the spinlock
  517. * item is a special case.
  518. */
  519. if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
  520. return ERR_PTR(-EPROBE_DEFER);
  521. ptr = smem_get_entry(id, &size, to_proc, flags);
  522. if (!ptr)
  523. return 0;
  524. size_in = ALIGN(size_in, 8);
  525. if (size_in != size) {
  526. SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
  527. id, size_in, to_proc, flags, size);
  528. return 0;
  529. }
  530. return ptr;
  531. }
  532. EXPORT_SYMBOL(smem_find);
  533. /**
  534. * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
  535. *
  536. * @id: ID of SMEM item
  537. * @size_in: Size to allocate
  538. * @returns: Pointer to SMEM item or NULL for error
  539. *
  540. * Assumes the id parameter is valid and does not already exist. Assumes
  541. * size_in is already adjusted for alignment, if necessary. Requires the
  542. * remote spinlock to already be locked.
  543. */
  544. static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
  545. {
  546. void *smem_base = smem_ram_base;
  547. struct smem_shared *shared = smem_base;
  548. struct smem_heap_entry *toc = shared->heap_toc;
  549. uint32_t free_offset, heap_remaining;
  550. void *ret = NULL;
  551. heap_remaining = shared->heap_info.heap_remaining;
  552. free_offset = shared->heap_info.free_offset;
  553. if (WARN_ON(heap_remaining > smem_ram_size
  554. || free_offset > smem_ram_size))
  555. return NULL;
  556. if (heap_remaining >= size_in) {
  557. toc[id].offset = free_offset;
  558. toc[id].size = size_in;
  559. /*
  560. * wmb() is necessary to ensure the allocation data is
  561. * consistent before setting the allocated flag to prevent race
  562. * conditions with remote processors
  563. */
  564. wmb();
  565. toc[id].allocated = 1;
  566. shared->heap_info.free_offset += size_in;
  567. shared->heap_info.heap_remaining -= size_in;
  568. ret = smem_base + free_offset;
  569. /*
  570. * wmb() is necessary to ensure the heap data is consistent
  571. * before continuing to prevent race conditions with remote
  572. * processors
  573. */
  574. wmb();
  575. } else {
  576. SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
  577. __func__, id, shared->heap_info.heap_remaining,
  578. size_in);
  579. }
  580. return ret;
  581. }
  582. /**
  583. * alloc_item_secure - Allocate an SMEM item in a secure partition
  584. *
  585. * @id: ID of SMEM item
  586. * @size_in: Size to allocate
  587. * @to_proc: SMEM host that shares the item with apps
  588. * @flags: Item attribute flags
  589. * @returns: Pointer to SMEM item or NULL for error
  590. *
  591. * Assumes the id parameter is valid and does not already exist. Assumes
  592. * size_in is the raw size requested by the client. Assumes to_proc is a valid
  593. * host, and a valid partition to that host exists. Requires the remote
  594. * spinlock to already be locked.
  595. */
  596. static void *alloc_item_secure(unsigned int id, unsigned int size_in,
  597. unsigned int to_proc, unsigned int flags)
  598. {
  599. void *smem_base = smem_ram_base;
  600. struct smem_partition_header *hdr;
  601. struct smem_partition_allocation_header *alloc_hdr;
  602. uint32_t offset_free_uncached;
  603. struct smem_toc __iomem *toc;
  604. uint32_t offset_free_cached;
  605. uint32_t partition_size;
  606. uint32_t partition_num;
  607. uint32_t a_hdr_size;
  608. uint32_t a_data_size;
  609. uint32_t size_cacheline;
  610. uint32_t free_space;
  611. void *ret = NULL;
  612. if (to_proc == SMEM_COMM_HOST) {
  613. hdr = smem_base + comm_partition.offset;
  614. partition_num = comm_partition.partition_num;
  615. size_cacheline = comm_partition.size_cacheline;
  616. } else if (to_proc < NUM_SMEM_SUBSYSTEMS) {
  617. hdr = smem_base + partitions[to_proc].offset;
  618. partition_num = partitions[to_proc].partition_num;
  619. size_cacheline = partitions[to_proc].size_cacheline;
  620. } else {
  621. SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
  622. to_proc, id);
  623. return NULL;
  624. }
  625. if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
  626. LOG_ERR(
  627. "%s: SMEM corruption detected. Partition %d to %d at %p\n",
  628. __func__,
  629. partition_num,
  630. to_proc,
  631. hdr);
  632. BUG();
  633. }
  634. toc = smem_get_toc();
  635. partition_size = readl_relaxed(&toc->entry[partition_num].size);
  636. offset_free_cached = hdr->offset_free_cached;
  637. offset_free_uncached = hdr->offset_free_uncached;
  638. if (WARN_ON(offset_free_uncached > offset_free_cached
  639. || offset_free_cached > partition_size))
  640. return NULL;
  641. free_space = offset_free_cached - offset_free_uncached;
  642. if (flags & SMEM_ITEM_CACHED_FLAG) {
  643. a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
  644. a_data_size = ALIGN(size_in, size_cacheline);
  645. if (free_space < a_hdr_size + a_data_size
  646. || free_space < size_in) {
  647. SMEM_INFO(
  648. "%s: id %u not enough memory %u (required %u), (size_in %u)\n",
  649. __func__, id, free_space,
  650. a_hdr_size + a_data_size, size_in);
  651. return ret;
  652. }
  653. alloc_hdr = (void *)(hdr) + offset_free_cached - a_hdr_size;
  654. alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
  655. alloc_hdr->smem_type = id;
  656. alloc_hdr->size = a_data_size;
  657. alloc_hdr->padding_data = a_data_size - size_in;
  658. alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
  659. hdr->offset_free_cached = offset_free_cached -
  660. a_hdr_size - a_data_size;
  661. ret = (void *)(alloc_hdr) - a_data_size;
  662. /*
  663. * The SMEM protocol currently does not support cacheable
  664. * areas within the smem region, but if it ever does in the
  665. * future, then cache management needs to be done here.
  666. * The area of memory this item is allocated from will need to
  667. * be dynamically made cachable, and a cache flush of the
  668. * allocation header using __cpuc_flush_dcache_area and
  669. * outer_flush_area will need to be done.
  670. */
  671. } else {
  672. a_hdr_size = sizeof(*alloc_hdr);
  673. a_data_size = ALIGN(size_in, 8);
  674. if (free_space < a_hdr_size + a_data_size
  675. || free_space < size_in) {
  676. SMEM_INFO(
  677. "%s: id %u not enough memory %u (required %u) (size_in %u)\n",
  678. __func__, id, free_space,
  679. a_hdr_size + a_data_size, size_in);
  680. return ret;
  681. }
  682. alloc_hdr = (void *)(hdr) + offset_free_uncached;
  683. alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
  684. alloc_hdr->smem_type = id;
  685. alloc_hdr->size = a_data_size;
  686. alloc_hdr->padding_data = a_data_size - size_in;
  687. alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
  688. hdr->offset_free_uncached = offset_free_uncached +
  689. a_hdr_size + a_data_size;
  690. ret = alloc_hdr + 1;
  691. }
  692. /*
  693. * wmb() is necessary to ensure the heap and allocation data is
  694. * consistent before continuing to prevent race conditions with remote
  695. * processors
  696. */
  697. wmb();
  698. return ret;
  699. }
  700. /**
  701. * smem_alloc - Find an existing item, otherwise allocate it with security
  702. * support
  703. *
  704. * @id: ID of SMEM item
  705. * @size_in: Size of the SMEM item
  706. * @to_proc: SMEM host that shares the item with apps
  707. * @flags: Item attribute flags
  708. * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated,
  709. * or -EPROBE_DEFER if the driver is not ready
  710. */
  711. void *smem_alloc(unsigned int id, unsigned int size_in, unsigned int to_proc,
  712. unsigned int flags)
  713. {
  714. unsigned long lflags;
  715. void *ret = NULL;
  716. int rc;
  717. unsigned int size_out;
  718. unsigned int a_size_in;
  719. SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
  720. flags);
  721. if (!is_probe_done())
  722. return ERR_PTR(-EPROBE_DEFER);
  723. if (!smem_initialized_check())
  724. return NULL;
  725. if (id >= smem_max_items) {
  726. SMEM_INFO("%s: invalid id %u\n", __func__, id);
  727. return NULL;
  728. }
  729. if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
  730. SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
  731. to_proc, id);
  732. return NULL;
  733. }
  734. if (unlikely(!spinlocks_initialized)) {
  735. rc = init_smem_remote_spinlock();
  736. if (unlikely(rc)) {
  737. SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
  738. __func__, id, rc);
  739. return NULL;
  740. }
  741. }
  742. a_size_in = ALIGN(size_in, 8);
  743. do {
  744. rc = remote_spin_trylock_irqsave(&remote_spinlock, lflags);
  745. } while (!rc);
  746. ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
  747. false);
  748. if (ret) {
  749. SMEM_INFO("%s: %u already allocated\n", __func__, id);
  750. if (a_size_in == size_out) {
  751. remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
  752. return ret;
  753. }
  754. remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
  755. SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
  756. __func__, id, size_out, a_size_in);
  757. return NULL;
  758. }
  759. if (id > SMEM_FIXED_ITEM_LAST) {
  760. SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
  761. __func__, id, size_in, to_proc, flags);
  762. if (flags & SMEM_ANY_HOST_FLAG
  763. || !partitions[to_proc].offset) {
  764. if (use_comm_partition)
  765. ret = alloc_item_secure(id, size_in,
  766. SMEM_COMM_HOST, flags);
  767. else
  768. ret = alloc_item_nonsecure(id, a_size_in);
  769. } else {
  770. ret = alloc_item_secure(id, size_in, to_proc, flags);
  771. }
  772. } else {
  773. SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
  774. __func__, id);
  775. }
  776. remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
  777. return ret;
  778. }
  779. EXPORT_SYMBOL(smem_alloc);
  780. /**
  781. * smem_get_entry - Get existing item with security support
  782. *
  783. * @id: ID of SMEM item
  784. * @size: Pointer to size variable for storing the result
  785. * @to_proc: SMEM host that shares the item with apps
  786. * @flags: Item attribute flags
  787. * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
  788. * if the driver isn't ready
  789. */
  790. void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
  791. unsigned int flags)
  792. {
  793. SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
  794. /*
  795. * Handle the circular dependecy between SMEM and software implemented
  796. * remote spinlocks. SMEM must initialize the remote spinlocks in
  797. * probe() before it is done. EPROBE_DEFER handling will not resolve
  798. * this code path, so we must be intellegent to know that the spinlock
  799. * item is a special case.
  800. */
  801. if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
  802. return ERR_PTR(-EPROBE_DEFER);
  803. return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
  804. }
  805. EXPORT_SYMBOL(smem_get_entry);
  806. /**
  807. * smem_get_entry_no_rlock - Get existing item without using remote spinlock
  808. *
  809. * @id: ID of SMEM item
  810. * @size_out: Pointer to size variable for storing the result
  811. * @to_proc: SMEM host that shares the item with apps
  812. * @flags: Item attribute flags
  813. * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
  814. * if the driver isn't ready
  815. *
  816. * This function does not lock the remote spinlock and should only be used in
  817. * failure-recover cases such as retrieving the subsystem failure reason during
  818. * subsystem restart.
  819. */
  820. void *smem_get_entry_no_rlock(unsigned int id, unsigned int *size_out,
  821. unsigned int to_proc, unsigned int flags)
  822. {
  823. if (!is_probe_done())
  824. return ERR_PTR(-EPROBE_DEFER);
  825. return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
  826. false);
  827. }
  828. EXPORT_SYMBOL(smem_get_entry_no_rlock);
  829. /**
  830. * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
  831. *
  832. * @returns: pointer to SMEM remote spinlock
  833. */
  834. remote_spinlock_t *smem_get_remote_spinlock(void)
  835. {
  836. if (unlikely(!spinlocks_initialized))
  837. init_smem_remote_spinlock();
  838. return &remote_spinlock;
  839. }
  840. EXPORT_SYMBOL(smem_get_remote_spinlock);
  841. /**
  842. * smem_get_free_space() - Get the available allocation free space for a
  843. * partition
  844. *
  845. * @to_proc: remote SMEM host. Determines the applicable partition
  846. * @returns: size in bytes available to allocate
  847. *
  848. * Helper function for SMD so that SMD only scans the channel allocation
  849. * table for a partition when it is reasonably certain that a channel has
  850. * actually been created, because scanning can be expensive. Creating a channel
  851. * will consume some of the free space in a partition, so SMD can compare the
  852. * last free space size against the current free space size to determine if
  853. * a channel may have been created. SMD can't do this directly, because the
  854. * necessary partition internals are restricted to just SMEM.
  855. */
  856. unsigned int smem_get_free_space(unsigned int to_proc)
  857. {
  858. struct smem_partition_header *hdr;
  859. struct smem_shared *shared;
  860. uint32_t offset_free_uncached;
  861. struct smem_toc __iomem *toc;
  862. uint32_t offset_free_cached;
  863. uint32_t heap_remaining;
  864. uint32_t p_size;
  865. uint32_t p_num;
  866. if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
  867. pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
  868. return UINT_MAX;
  869. }
  870. if (partitions[to_proc].offset) {
  871. if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
  872. (uintptr_t)smem_areas[0].virt_addr,
  873. partitions[to_proc].offset))) {
  874. pr_err("%s: unexpected overflow detected\n", __func__);
  875. return UINT_MAX;
  876. }
  877. hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
  878. offset_free_cached = hdr->offset_free_cached;
  879. offset_free_uncached = hdr->offset_free_uncached;
  880. toc = smem_get_toc();
  881. p_num = partitions[to_proc].partition_num;
  882. p_size = readl_relaxed(&toc->entry[p_num].size);
  883. if (WARN_ON(offset_free_uncached > offset_free_cached
  884. || offset_free_cached > p_size))
  885. return -EINVAL;
  886. return offset_free_cached - offset_free_uncached;
  887. }
  888. shared = smem_ram_base;
  889. heap_remaining = shared->heap_info.heap_remaining;
  890. if (WARN_ON(heap_remaining > smem_ram_size))
  891. return -EINVAL;
  892. return heap_remaining;
  893. }
  894. EXPORT_SYMBOL(smem_get_free_space);
  895. /**
  896. * smem_get_version() - Get the smem user version number
  897. *
  898. * @idx: SMEM user idx in SMEM_VERSION_INFO table.
  899. * @returns: smem version number if success otherwise zero.
  900. */
  901. unsigned int smem_get_version(unsigned int idx)
  902. {
  903. int *version_array;
  904. struct smem_shared *smem = smem_ram_base;
  905. if (idx >= 32) {
  906. pr_err("%s: invalid idx:%d\n", __func__, idx);
  907. return 0;
  908. }
  909. if (use_comm_partition)
  910. version_array = smem->version;
  911. else
  912. version_array = __smem_find(SMEM_VERSION_INFO,
  913. SMEM_VERSION_INFO_SIZE, true);
  914. if (version_array == NULL)
  915. return 0;
  916. return version_array[idx];
  917. }
  918. EXPORT_SYMBOL(smem_get_version);
  919. /**
  920. * init_smem_remote_spinlock - Reentrant remote spinlock initialization
  921. *
  922. * @returns: success or error code for failure
  923. */
  924. static int init_smem_remote_spinlock(void)
  925. {
  926. int rc = 0;
  927. /*
  928. * Optimistic locking. Init only needs to be done once by the first
  929. * caller. After that, serializing inits between different callers
  930. * is unnecessary. The second check after the lock ensures init
  931. * wasn't previously completed by someone else before the lock could
  932. * be grabbed.
  933. */
  934. if (!spinlocks_initialized) {
  935. mutex_lock(&spinlock_init_lock);
  936. if (!spinlocks_initialized) {
  937. rc = remote_spin_lock_init(&remote_spinlock,
  938. SMEM_SPINLOCK_SMEM_ALLOC);
  939. if (!rc)
  940. spinlocks_initialized = 1;
  941. }
  942. mutex_unlock(&spinlock_init_lock);
  943. }
  944. return rc;
  945. }
  946. /**
  947. * smem_initialized_check - Reentrant check that smem has been initialized
  948. *
  949. * @returns: true if initialized, false if not.
  950. */
  951. bool smem_initialized_check(void)
  952. {
  953. static int checked;
  954. static int is_inited;
  955. unsigned long flags;
  956. struct smem_shared *smem;
  957. unsigned int ver;
  958. if (likely(checked)) {
  959. if (unlikely(!is_inited))
  960. LOG_ERR("%s: smem not initialized\n", __func__);
  961. return is_inited;
  962. }
  963. spin_lock_irqsave(&smem_init_check_lock, flags);
  964. if (checked) {
  965. spin_unlock_irqrestore(&smem_init_check_lock, flags);
  966. if (unlikely(!is_inited))
  967. LOG_ERR("%s: smem not initialized\n", __func__);
  968. return is_inited;
  969. }
  970. smem = smem_ram_base;
  971. if (smem->heap_info.initialized != 1)
  972. goto failed;
  973. if (smem->heap_info.reserved != 0)
  974. goto failed;
  975. /*
  976. * The Modem SBL is now the Master SBL version and is required to
  977. * pre-initialize SMEM and fill in any necessary configuration
  978. * structures. Without the extra configuration data, the SMEM driver
  979. * cannot be properly initialized.
  980. */
  981. ver = smem->version[MODEM_SBL_VERSION_INDEX];
  982. if (ver == SMEM_COMM_PART_VERSION << 16) {
  983. use_comm_partition = true;
  984. } else if (ver != SMEM_VERSION << 16) {
  985. pr_err("%s: SBL version not correct 0x%x\n",
  986. __func__, smem->version[7]);
  987. goto failed;
  988. }
  989. is_inited = 1;
  990. checked = 1;
  991. spin_unlock_irqrestore(&smem_init_check_lock, flags);
  992. return is_inited;
  993. failed:
  994. is_inited = 0;
  995. checked = 1;
  996. spin_unlock_irqrestore(&smem_init_check_lock, flags);
  997. LOG_ERR(
  998. "%s: shared memory needs to be initialized by SBL before booting\n",
  999. __func__);
  1000. return is_inited;
  1001. }
  1002. EXPORT_SYMBOL(smem_initialized_check);
  1003. static int restart_notifier_cb(struct notifier_block *this,
  1004. unsigned long code,
  1005. void *data)
  1006. {
  1007. struct restart_notifier_block *notifier;
  1008. struct notif_data *notifdata = data;
  1009. int ret;
  1010. switch (code) {
  1011. case SUBSYS_AFTER_SHUTDOWN:
  1012. notifier = container_of(this,
  1013. struct restart_notifier_block, nb);
  1014. SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
  1015. __func__, notifier->processor,
  1016. notifier->name);
  1017. remote_spin_release(&remote_spinlock, notifier->processor);
  1018. remote_spin_release_all(notifier->processor);
  1019. break;
  1020. case SUBSYS_SOC_RESET:
  1021. if (!(smem_ramdump_dev && notifdata->enable_mini_ramdumps))
  1022. break;
  1023. case SUBSYS_RAMDUMP_NOTIFICATION:
  1024. if (!(smem_ramdump_dev && (notifdata->enable_mini_ramdumps
  1025. || notifdata->enable_ramdump)))
  1026. break;
  1027. SMEM_DBG("%s: saving ramdump\n", __func__);
  1028. /*
  1029. * XPU protection does not currently allow the
  1030. * auxiliary memory regions to be dumped. If this
  1031. * changes, then num_smem_areas + 1 should be passed
  1032. * into do_elf_ramdump() to dump all regions.
  1033. */
  1034. ret = do_elf_ramdump(smem_ramdump_dev,
  1035. smem_ramdump_segments, 1);
  1036. if (ret < 0)
  1037. LOG_ERR("%s: unable to dump smem %d\n", __func__, ret);
  1038. break;
  1039. default:
  1040. break;
  1041. }
  1042. return NOTIFY_DONE;
  1043. }
  1044. static __init int modem_restart_late_init(void)
  1045. {
  1046. int i;
  1047. void *handle;
  1048. struct restart_notifier_block *nb;
  1049. if (scm_is_secure_device()) {
  1050. if (smem_dev)
  1051. smem_ramdump_dev = create_ramdump_device("smem",
  1052. smem_dev);
  1053. if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
  1054. LOG_ERR("%s: Unable to create smem ramdump device.\n",
  1055. __func__);
  1056. smem_ramdump_dev = NULL;
  1057. }
  1058. }
  1059. for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
  1060. nb = &restart_notifiers[i];
  1061. handle = subsys_notif_register_notifier(nb->name, &nb->nb);
  1062. SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
  1063. __func__, nb->name, handle);
  1064. }
  1065. return 0;
  1066. }
  1067. late_initcall(modem_restart_late_init);
  1068. int smem_module_init_notifier_register(struct notifier_block *nb)
  1069. {
  1070. int ret;
  1071. if (!nb)
  1072. return -EINVAL;
  1073. mutex_lock(&smem_module_init_notifier_lock);
  1074. ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
  1075. if (smem_module_inited)
  1076. nb->notifier_call(nb, 0, NULL);
  1077. mutex_unlock(&smem_module_init_notifier_lock);
  1078. return ret;
  1079. }
  1080. EXPORT_SYMBOL(smem_module_init_notifier_register);
  1081. int smem_module_init_notifier_unregister(struct notifier_block *nb)
  1082. {
  1083. int ret;
  1084. if (!nb)
  1085. return -EINVAL;
  1086. mutex_lock(&smem_module_init_notifier_lock);
  1087. ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
  1088. nb);
  1089. mutex_unlock(&smem_module_init_notifier_lock);
  1090. return ret;
  1091. }
  1092. EXPORT_SYMBOL(smem_module_init_notifier_unregister);
  1093. static void smem_module_init_notify(uint32_t state, void *data)
  1094. {
  1095. mutex_lock(&smem_module_init_notifier_lock);
  1096. smem_module_inited = 1;
  1097. raw_notifier_call_chain(&smem_module_init_notifier_list,
  1098. state, data);
  1099. mutex_unlock(&smem_module_init_notifier_lock);
  1100. }
  1101. /**
  1102. * smem_init_security_partition - Init local structures for a secured smem
  1103. * partition that has apps as one of the hosts
  1104. *
  1105. * @entry: Entry in the security TOC for the partition to init
  1106. * @num: Partition ID
  1107. *
  1108. * Initialize local data structures to point to a secured smem partition
  1109. * that is accessible by apps and another processor. Assumes that one of the
  1110. * listed hosts is apps. Verifiess that the partition is valid, otherwise will
  1111. * skip. Checks for memory corruption and will BUG() if detected. Assumes
  1112. * smem_areas is already initialized and that smem_areas[0] corresponds to the
  1113. * smem region with the secured partitions.
  1114. */
  1115. static void smem_init_security_partition(struct smem_toc_entry *entry,
  1116. uint32_t num)
  1117. {
  1118. uint16_t remote_host = 0;
  1119. struct smem_partition_header *hdr;
  1120. bool is_comm_partition = false;
  1121. if (!entry->offset) {
  1122. SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
  1123. return;
  1124. }
  1125. if (!entry->size) {
  1126. SMEM_INFO("Skipping smem partition %d - bad size\n", num);
  1127. return;
  1128. }
  1129. if (!entry->size_cacheline) {
  1130. SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
  1131. return;
  1132. }
  1133. if (entry->host0 == SMEM_COMM_HOST && entry->host1 == SMEM_COMM_HOST)
  1134. is_comm_partition = true;
  1135. if (!is_comm_partition) {
  1136. if (entry->host0 == SMEM_APPS)
  1137. remote_host = entry->host1;
  1138. else
  1139. remote_host = entry->host0;
  1140. if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
  1141. SMEM_INFO(
  1142. "Skipping smem partition %d - bad remote:%d\n",
  1143. num, remote_host);
  1144. return;
  1145. }
  1146. if (partitions[remote_host].offset) {
  1147. SMEM_INFO(
  1148. "Skipping smem partition %d - duplicate of %d\n",
  1149. num, partitions[remote_host].partition_num);
  1150. return;
  1151. }
  1152. if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
  1153. SMEM_INFO(
  1154. "Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
  1155. num, entry->offset, entry->host0, entry->host1);
  1156. return;
  1157. }
  1158. }
  1159. hdr = smem_areas[0].virt_addr + entry->offset;
  1160. if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
  1161. LOG_ERR("Smem partition %d hdr magic is bad\n", num);
  1162. BUG();
  1163. }
  1164. if (hdr->size != entry->size) {
  1165. LOG_ERR("Smem partition %d size is invalid\n", num);
  1166. BUG();
  1167. }
  1168. if (hdr->offset_free_uncached > hdr->size) {
  1169. LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
  1170. BUG();
  1171. }
  1172. if (hdr->offset_free_cached > hdr->size) {
  1173. LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
  1174. BUG();
  1175. }
  1176. if (is_comm_partition) {
  1177. if (hdr->host0 == SMEM_COMM_HOST
  1178. && hdr->host1 == SMEM_COMM_HOST) {
  1179. comm_partition.partition_num = num;
  1180. comm_partition.offset = entry->offset;
  1181. comm_partition.size_cacheline = entry->size_cacheline;
  1182. SMEM_INFO("Common Partition %d offset:%x\n", num,
  1183. entry->offset);
  1184. } else {
  1185. LOG_ERR("Smem Comm partition hosts don't match TOC\n");
  1186. WARN_ON(1);
  1187. }
  1188. return;
  1189. }
  1190. if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
  1191. LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
  1192. BUG();
  1193. }
  1194. if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
  1195. LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
  1196. BUG();
  1197. }
  1198. partitions[remote_host].partition_num = num;
  1199. partitions[remote_host].offset = entry->offset;
  1200. partitions[remote_host].size_cacheline = entry->size_cacheline;
  1201. SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
  1202. remote_host);
  1203. }
  1204. /**
  1205. * smem_init_security - Init local support for secured smem
  1206. *
  1207. * Looks for a valid security TOC, and if one is found, parses it looking for
  1208. * partitions that apps can access. If any such partitions are found, do the
  1209. * required local initialization to support them. Assumes smem_areas is inited
  1210. * and smem_area[0] corresponds to the smem region with the TOC.
  1211. */
  1212. static void smem_init_security(void)
  1213. {
  1214. struct smem_toc *toc;
  1215. uint32_t i;
  1216. SMEM_DBG("%s\n", __func__);
  1217. toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
  1218. if (toc->identifier != SMEM_TOC_IDENTIFIER) {
  1219. LOG_ERR("%s failed: invalid TOC magic\n", __func__);
  1220. return;
  1221. }
  1222. for (i = 0; i < toc->num_entries; ++i) {
  1223. SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
  1224. toc->entry[i].host0,
  1225. toc->entry[i].host1);
  1226. smem_init_security_partition(&toc->entry[i], i);
  1227. }
  1228. SMEM_DBG("%s done\n", __func__);
  1229. }
  1230. /**
  1231. * smem_init_target_info - Init smem target information
  1232. *
  1233. * @info_addr : smem target info physical address.
  1234. * @size : size of the smem target info structure.
  1235. *
  1236. * This function is used to initialize the smem_targ_info structure and checks
  1237. * for valid identifier, if identifier is valid initialize smem variables.
  1238. */
  1239. static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
  1240. {
  1241. struct smem_targ_info_type *smem_targ_info;
  1242. void *smem_targ_info_addr;
  1243. smem_targ_info_addr = ioremap_nocache(info_addr, size);
  1244. if (!smem_targ_info_addr) {
  1245. LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
  1246. __func__, &info_addr, &size);
  1247. return -ENODEV;
  1248. }
  1249. smem_targ_info =
  1250. (struct smem_targ_info_type __iomem *)smem_targ_info_addr;
  1251. if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
  1252. LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
  1253. return -ENODEV;
  1254. }
  1255. smem_ram_phys = smem_targ_info->phys_base_addr;
  1256. smem_ram_size = smem_targ_info->size;
  1257. if (smem_targ_info->max_items)
  1258. smem_max_items = smem_targ_info->max_items;
  1259. iounmap(smem_targ_info_addr);
  1260. return 0;
  1261. }
  1262. static int msm_smem_probe(struct platform_device *pdev)
  1263. {
  1264. char *key;
  1265. struct resource *r;
  1266. phys_addr_t aux_mem_base;
  1267. resource_size_t aux_mem_size;
  1268. int temp_string_size = 11; /* max 3 digit count */
  1269. char temp_string[temp_string_size];
  1270. int ret;
  1271. struct ramdump_segment *ramdump_segments_tmp = NULL;
  1272. struct smem_area *smem_areas_tmp = NULL;
  1273. int smem_idx = 0;
  1274. bool security_enabled;
  1275. r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1276. "smem_targ_info_imem");
  1277. if (r) {
  1278. if (smem_init_target_info(r->start, resource_size(r)))
  1279. goto smem_targ_info_legacy;
  1280. goto smem_targ_info_done;
  1281. }
  1282. r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1283. "smem_targ_info_reg");
  1284. if (r) {
  1285. void *reg_base_addr;
  1286. uint64_t base_addr;
  1287. reg_base_addr = ioremap_nocache(r->start, resource_size(r));
  1288. base_addr = (uint32_t)readl_relaxed(reg_base_addr);
  1289. base_addr |=
  1290. ((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
  1291. iounmap(reg_base_addr);
  1292. if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
  1293. SMEM_INFO("%s: Invalid SMEM address\n", __func__);
  1294. goto smem_targ_info_legacy;
  1295. }
  1296. if (smem_init_target_info(base_addr,
  1297. sizeof(struct smem_targ_info_type)))
  1298. goto smem_targ_info_legacy;
  1299. goto smem_targ_info_done;
  1300. }
  1301. smem_targ_info_legacy:
  1302. SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
  1303. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
  1304. if (r) {
  1305. smem_ram_size = resource_size(r);
  1306. smem_ram_phys = r->start;
  1307. }
  1308. smem_targ_info_done:
  1309. if (!smem_ram_phys || !smem_ram_size) {
  1310. LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
  1311. return -ENODEV;
  1312. }
  1313. smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
  1314. if (!smem_ram_base) {
  1315. LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
  1316. __func__,
  1317. &smem_ram_phys, &smem_ram_size);
  1318. return -ENODEV;
  1319. }
  1320. if (!smem_initialized_check())
  1321. return -ENODEV;
  1322. /*
  1323. * The software implementation requires smem_find(), which needs
  1324. * smem_ram_base to be intitialized. The remote spinlock item is
  1325. * guaranteed to be allocated by the bootloader, so this is the
  1326. * safest and earliest place to init the spinlock.
  1327. */
  1328. ret = init_smem_remote_spinlock();
  1329. if (ret) {
  1330. LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
  1331. return ret;
  1332. }
  1333. key = "irq-reg-base";
  1334. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
  1335. if (!r) {
  1336. LOG_ERR("%s: missing '%s'\n", __func__, key);
  1337. return -ENODEV;
  1338. }
  1339. num_smem_areas = 1;
  1340. while (1) {
  1341. scnprintf(temp_string, temp_string_size, "aux-mem%d",
  1342. num_smem_areas);
  1343. r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1344. temp_string);
  1345. if (!r)
  1346. break;
  1347. ++num_smem_areas;
  1348. if (num_smem_areas > 999) {
  1349. LOG_ERR("%s: max num aux mem regions reached\n",
  1350. __func__);
  1351. break;
  1352. }
  1353. }
  1354. /* Initialize main SMEM region and SSR ramdump region */
  1355. smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
  1356. GFP_KERNEL);
  1357. if (!smem_areas_tmp) {
  1358. LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
  1359. ret = -ENOMEM;
  1360. goto free_smem_areas;
  1361. }
  1362. ramdump_segments_tmp = kcalloc(num_smem_areas,
  1363. sizeof(struct ramdump_segment), GFP_KERNEL);
  1364. if (!ramdump_segments_tmp) {
  1365. LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
  1366. ret = -ENOMEM;
  1367. goto free_smem_areas;
  1368. }
  1369. smem_areas_tmp[smem_idx].phys_addr = smem_ram_phys;
  1370. smem_areas_tmp[smem_idx].size = smem_ram_size;
  1371. smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
  1372. ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
  1373. ramdump_segments_tmp[smem_idx].size = smem_ram_size;
  1374. ++smem_idx;
  1375. /* Configure auxiliary SMEM regions */
  1376. while (1) {
  1377. scnprintf(temp_string, temp_string_size, "aux-mem%d",
  1378. smem_idx);
  1379. r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1380. temp_string);
  1381. if (!r)
  1382. break;
  1383. aux_mem_base = r->start;
  1384. aux_mem_size = resource_size(r);
  1385. ramdump_segments_tmp[smem_idx].address = aux_mem_base;
  1386. ramdump_segments_tmp[smem_idx].size = aux_mem_size;
  1387. smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
  1388. smem_areas_tmp[smem_idx].size = aux_mem_size;
  1389. smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
  1390. (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
  1391. smem_areas_tmp[smem_idx].size);
  1392. SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
  1393. &aux_mem_base, &aux_mem_size,
  1394. smem_areas_tmp[smem_idx].virt_addr);
  1395. if (!smem_areas_tmp[smem_idx].virt_addr) {
  1396. LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
  1397. __func__,
  1398. &smem_areas_tmp[smem_idx].phys_addr,
  1399. &smem_areas_tmp[smem_idx].size);
  1400. ret = -ENOMEM;
  1401. goto free_smem_areas;
  1402. }
  1403. if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
  1404. (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
  1405. smem_areas_tmp[smem_idx].size)) {
  1406. LOG_ERR(
  1407. "%s: invalid virtual address block %i: %p:%pa\n",
  1408. __func__, smem_idx,
  1409. smem_areas_tmp[smem_idx].virt_addr,
  1410. &smem_areas_tmp[smem_idx].size);
  1411. ++smem_idx;
  1412. ret = -EINVAL;
  1413. goto free_smem_areas;
  1414. }
  1415. ++smem_idx;
  1416. if (smem_idx > 999) {
  1417. LOG_ERR("%s: max num aux mem regions reached\n",
  1418. __func__);
  1419. break;
  1420. }
  1421. }
  1422. smem_areas = smem_areas_tmp;
  1423. smem_ramdump_segments = ramdump_segments_tmp;
  1424. key = "qcom,mpu-enabled";
  1425. security_enabled = of_property_read_bool(pdev->dev.of_node, key);
  1426. if (security_enabled) {
  1427. SMEM_INFO("smem security enabled\n");
  1428. smem_init_security();
  1429. }
  1430. smem_dev = &pdev->dev;
  1431. probe_done = true;
  1432. ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
  1433. if (ret)
  1434. LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
  1435. return 0;
  1436. free_smem_areas:
  1437. for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
  1438. iounmap(smem_areas_tmp[smem_idx].virt_addr);
  1439. num_smem_areas = 0;
  1440. kfree(ramdump_segments_tmp);
  1441. kfree(smem_areas_tmp);
  1442. return ret;
  1443. }
  1444. static const struct of_device_id msm_smem_match_table[] = {
  1445. { .compatible = "qcom,smem" },
  1446. {},
  1447. };
  1448. static struct platform_driver msm_smem_driver = {
  1449. .probe = msm_smem_probe,
  1450. .driver = {
  1451. .name = "msm_smem",
  1452. .owner = THIS_MODULE,
  1453. .of_match_table = msm_smem_match_table,
  1454. },
  1455. };
  1456. int __init msm_smem_init(void)
  1457. {
  1458. static bool registered;
  1459. int rc;
  1460. if (registered)
  1461. return 0;
  1462. registered = true;
  1463. smem_max_items = SMEM_NUM_ITEMS;
  1464. smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
  1465. if (!smem_ipc_log_ctx) {
  1466. pr_err("%s: unable to create logging context\n", __func__);
  1467. msm_smem_debug_mask = 0;
  1468. }
  1469. rc = platform_driver_register(&msm_smem_driver);
  1470. if (rc) {
  1471. LOG_ERR("%s: msm_smem_driver register failed %d\n",
  1472. __func__, rc);
  1473. return rc;
  1474. }
  1475. smem_module_init_notify(0, NULL);
  1476. return 0;
  1477. }
  1478. arch_initcall(msm_smem_init);