subsystem_restart.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937
  1. /* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/fs.h>
  18. #include <linux/delay.h>
  19. #include <linux/list.h>
  20. #include <linux/io.h>
  21. #include <linux/kthread.h>
  22. #include <linux/time.h>
  23. #include <linux/rtc.h>
  24. #include <linux/suspend.h>
  25. #include <linux/mutex.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/device.h>
  29. #include <linux/idr.h>
  30. #include <linux/debugfs.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/of_gpio.h>
  33. #include <linux/cdev.h>
  34. #include <linux/platform_device.h>
  35. #include <soc/qcom/subsystem_restart.h>
  36. #include <soc/qcom/subsystem_notif.h>
  37. #include <soc/qcom/sysmon.h>
  38. #include <trace/events/trace_msm_pil_event.h>
  39. #include <asm/current.h>
  40. #include "peripheral-loader.h"
  41. #define DISABLE_SSR 0x9889deed
  42. /* If set to 0x9889deed, call to subsystem_restart_dev() returns immediately */
  43. static uint disable_restart_work;
  44. module_param(disable_restart_work, uint, 0644);
  45. static int enable_debug;
  46. module_param(enable_debug, int, 0644);
  47. /* The maximum shutdown timeout is the product of MAX_LOOPS and DELAY_MS. */
  48. #define SHUTDOWN_ACK_MAX_LOOPS 100
  49. #define SHUTDOWN_ACK_DELAY_MS 100
  50. /**
  51. * enum p_subsys_state - state of a subsystem (private)
  52. * @SUBSYS_NORMAL: subsystem is operating normally
  53. * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
  54. * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
  55. *
  56. * The 'private' side of the subsytem state used to determine where in the
  57. * restart process the subsystem is.
  58. */
  59. enum p_subsys_state {
  60. SUBSYS_NORMAL,
  61. SUBSYS_CRASHED,
  62. SUBSYS_RESTARTING,
  63. };
  64. /**
  65. * enum subsys_state - state of a subsystem (public)
  66. * @SUBSYS_OFFLINING: subsystem is offlining
  67. * @SUBSYS_OFFLINE: subsystem is offline
  68. * @SUBSYS_ONLINE: subsystem is online
  69. *
  70. * The 'public' side of the subsytem state, exposed to userspace.
  71. */
  72. enum subsys_state {
  73. SUBSYS_OFFLINING,
  74. SUBSYS_OFFLINE,
  75. SUBSYS_ONLINE,
  76. };
  77. static const char * const subsys_states[] = {
  78. [SUBSYS_OFFLINING] = "OFFLINING",
  79. [SUBSYS_OFFLINE] = "OFFLINE",
  80. [SUBSYS_ONLINE] = "ONLINE",
  81. };
  82. static const char * const restart_levels[] = {
  83. [RESET_SOC] = "SYSTEM",
  84. [RESET_SUBSYS_COUPLED] = "RELATED",
  85. };
  86. /**
  87. * struct subsys_tracking - track state of a subsystem or restart order
  88. * @p_state: private state of subsystem/order
  89. * @state: public state of subsystem/order
  90. * @s_lock: protects p_state
  91. * @lock: protects subsystem/order callbacks and state
  92. *
  93. * Tracks the state of a subsystem or a set of subsystems (restart order).
  94. * Doing this avoids the need to grab each subsystem's lock and update
  95. * each subsystems state when restarting an order.
  96. */
  97. struct subsys_tracking {
  98. enum p_subsys_state p_state;
  99. spinlock_t s_lock;
  100. enum subsys_state state;
  101. struct mutex lock;
  102. };
  103. /**
  104. * struct subsys_soc_restart_order - subsystem restart order
  105. * @subsystem_list: names of subsystems in this restart order
  106. * @count: number of subsystems in order
  107. * @track: state tracking and locking
  108. * @subsys_ptrs: pointers to subsystems in this restart order
  109. */
  110. struct subsys_soc_restart_order {
  111. struct device_node **device_ptrs;
  112. int count;
  113. struct subsys_tracking track;
  114. struct subsys_device **subsys_ptrs;
  115. struct list_head list;
  116. };
  117. struct restart_log {
  118. struct timeval time;
  119. struct subsys_device *dev;
  120. struct list_head list;
  121. };
  122. /**
  123. * struct subsys_device - subsystem device
  124. * @desc: subsystem descriptor
  125. * @work: context for subsystem_restart_wq_func() for this device
  126. * @ssr_wlock: prevents suspend during subsystem_restart()
  127. * @wlname: name of wakeup source
  128. * @device_restart_work: work struct for device restart
  129. * @track: state tracking and locking
  130. * @notify: subsys notify handle
  131. * @dev: device
  132. * @owner: module that provides @desc
  133. * @count: reference count of subsystem_get()/subsystem_put()
  134. * @id: ida
  135. * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
  136. * @restart_order: order of other devices this devices restarts with
  137. * @crash_count: number of times the device has crashed
  138. * @dentry: debugfs directory for this device
  139. * @do_ramdump_on_put: ramdump on subsystem_put() if true
  140. * @err_ready: completion variable to record error ready from subsystem
  141. * @crashed: indicates if subsystem has crashed
  142. * @notif_state: current state of subsystem in terms of subsys notifications
  143. */
  144. struct subsys_device {
  145. struct subsys_desc *desc;
  146. struct work_struct work;
  147. struct wakeup_source ssr_wlock;
  148. char wlname[64];
  149. struct work_struct device_restart_work;
  150. struct subsys_tracking track;
  151. void *notify;
  152. struct device dev;
  153. struct module *owner;
  154. int count;
  155. int id;
  156. int restart_level;
  157. int crash_count;
  158. struct subsys_soc_restart_order *restart_order;
  159. #ifdef CONFIG_DEBUG_FS
  160. struct dentry *dentry;
  161. #endif
  162. bool do_ramdump_on_put;
  163. struct cdev char_dev;
  164. dev_t dev_no;
  165. struct completion err_ready;
  166. enum crash_status crashed;
  167. int notif_state;
  168. struct list_head list;
  169. };
  170. static struct subsys_device *to_subsys(struct device *d)
  171. {
  172. return container_of(d, struct subsys_device, dev);
  173. }
  174. void complete_err_ready(struct subsys_device *subsys)
  175. {
  176. complete(&subsys->err_ready);
  177. }
  178. static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
  179. {
  180. struct subsys_soc_restart_order *order = subsys->restart_order;
  181. if (order)
  182. return &order->track;
  183. else
  184. return &subsys->track;
  185. }
  186. static ssize_t name_show(struct device *dev, struct device_attribute *attr,
  187. char *buf)
  188. {
  189. return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
  190. }
  191. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  192. char *buf)
  193. {
  194. enum subsys_state state = to_subsys(dev)->track.state;
  195. return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
  196. }
  197. static ssize_t crash_count_show(struct device *dev,
  198. struct device_attribute *attr, char *buf)
  199. {
  200. return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
  201. }
  202. static ssize_t crash_reason_show(struct device *dev,
  203. struct device_attribute *attr, char *buf)
  204. {
  205. return snprintf(buf, PAGE_SIZE, "%s\n",
  206. to_subsys(dev)->desc->last_crash_reason);
  207. }
  208. static ssize_t crash_timestamp_show(struct device *dev,
  209. struct device_attribute *attr, char *buf)
  210. {
  211. return snprintf(buf, PAGE_SIZE, "%s\n",
  212. to_subsys(dev)->desc->last_crash_timestamp);
  213. }
  214. static ssize_t
  215. restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
  216. {
  217. int level = to_subsys(dev)->restart_level;
  218. return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
  219. }
  220. static ssize_t restart_level_store(struct device *dev,
  221. struct device_attribute *attr, const char *buf, size_t count)
  222. {
  223. struct subsys_device *subsys = to_subsys(dev);
  224. const char *p;
  225. int i, orig_count = count;
  226. p = memchr(buf, '\n', count);
  227. if (p)
  228. count = p - buf;
  229. for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
  230. if (!strncasecmp(buf, restart_levels[i], count)) {
  231. subsys->restart_level = i;
  232. return orig_count;
  233. }
  234. return -EPERM;
  235. }
  236. static ssize_t firmware_name_show(struct device *dev,
  237. struct device_attribute *attr, char *buf)
  238. {
  239. return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->fw_name);
  240. }
  241. static ssize_t firmware_name_store(struct device *dev,
  242. struct device_attribute *attr, const char *buf, size_t count)
  243. {
  244. struct subsys_device *subsys = to_subsys(dev);
  245. struct subsys_tracking *track = subsys_get_track(subsys);
  246. const char *p;
  247. int orig_count = count;
  248. p = memchr(buf, '\n', count);
  249. if (p)
  250. count = p - buf;
  251. pr_info("Changing subsys fw_name to %s\n", buf);
  252. mutex_lock(&track->lock);
  253. strlcpy(subsys->desc->fw_name, buf,
  254. min(count + 1, sizeof(subsys->desc->fw_name)));
  255. mutex_unlock(&track->lock);
  256. return orig_count;
  257. }
  258. static ssize_t system_debug_show(struct device *dev,
  259. struct device_attribute *attr, char *buf)
  260. {
  261. struct subsys_device *subsys = to_subsys(dev);
  262. char p[6] = "set";
  263. if (!subsys->desc->system_debug)
  264. strlcpy(p, "reset", sizeof(p));
  265. return snprintf(buf, PAGE_SIZE, "%s\n", p);
  266. }
  267. static ssize_t system_debug_store(struct device *dev,
  268. struct device_attribute *attr, const char *buf,
  269. size_t count)
  270. {
  271. struct subsys_device *subsys = to_subsys(dev);
  272. const char *p;
  273. int orig_count = count;
  274. p = memchr(buf, '\n', count);
  275. if (p)
  276. count = p - buf;
  277. if (!strncasecmp(buf, "set", count))
  278. subsys->desc->system_debug = true;
  279. else if (!strncasecmp(buf, "reset", count))
  280. subsys->desc->system_debug = false;
  281. else
  282. return -EPERM;
  283. return orig_count;
  284. }
  285. int subsys_get_restart_level(struct subsys_device *dev)
  286. {
  287. return dev->restart_level;
  288. }
  289. EXPORT_SYMBOL(subsys_get_restart_level);
  290. static void subsys_set_state(struct subsys_device *subsys,
  291. enum subsys_state state)
  292. {
  293. unsigned long flags;
  294. spin_lock_irqsave(&subsys->track.s_lock, flags);
  295. if (subsys->track.state != state) {
  296. subsys->track.state = state;
  297. spin_unlock_irqrestore(&subsys->track.s_lock, flags);
  298. sysfs_notify(&subsys->dev.kobj, NULL, "state");
  299. return;
  300. }
  301. spin_unlock_irqrestore(&subsys->track.s_lock, flags);
  302. }
  303. /**
  304. * subsytem_default_online() - Mark a subsystem as online by default
  305. * @dev: subsystem to mark as online
  306. *
  307. * Marks a subsystem as "online" without increasing the reference count
  308. * on the subsystem. This is typically used by subsystems that are already
  309. * online when the kernel boots up.
  310. */
  311. void subsys_default_online(struct subsys_device *dev)
  312. {
  313. subsys_set_state(dev, SUBSYS_ONLINE);
  314. }
  315. EXPORT_SYMBOL(subsys_default_online);
  316. static struct device_attribute subsys_attrs[] = {
  317. __ATTR_RO(name),
  318. __ATTR_RO(state),
  319. __ATTR_RO(crash_count),
  320. __ATTR_RO(crash_reason),
  321. __ATTR_RO(crash_timestamp),
  322. __ATTR(restart_level, 0644, restart_level_show, restart_level_store),
  323. __ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
  324. __ATTR(system_debug, 0644, system_debug_show, system_debug_store),
  325. __ATTR_NULL,
  326. };
  327. static struct bus_type subsys_bus_type = {
  328. .name = "msm_subsys",
  329. .dev_attrs = subsys_attrs,
  330. };
  331. static DEFINE_IDA(subsys_ida);
  332. static int enable_ramdumps;
  333. module_param(enable_ramdumps, int, 0644);
  334. static int enable_mini_ramdumps;
  335. module_param(enable_mini_ramdumps, int, 0644);
  336. struct workqueue_struct *ssr_wq;
  337. static struct class *char_class;
  338. static LIST_HEAD(restart_log_list);
  339. static LIST_HEAD(subsys_list);
  340. static LIST_HEAD(ssr_order_list);
  341. static DEFINE_MUTEX(soc_order_reg_lock);
  342. static DEFINE_MUTEX(restart_log_mutex);
  343. static DEFINE_MUTEX(subsys_list_lock);
  344. static DEFINE_MUTEX(char_device_lock);
  345. static DEFINE_MUTEX(ssr_order_mutex);
  346. static struct subsys_soc_restart_order *
  347. update_restart_order(struct subsys_device *dev)
  348. {
  349. int i;
  350. struct subsys_soc_restart_order *order;
  351. struct device_node *device = dev->desc->dev->of_node;
  352. mutex_lock(&soc_order_reg_lock);
  353. list_for_each_entry(order, &ssr_order_list, list) {
  354. for (i = 0; i < order->count; i++) {
  355. if (order->device_ptrs[i] == device) {
  356. order->subsys_ptrs[i] = dev;
  357. goto found;
  358. }
  359. }
  360. }
  361. order = NULL;
  362. found:
  363. mutex_unlock(&soc_order_reg_lock);
  364. return order;
  365. }
  366. static int max_restarts;
  367. module_param(max_restarts, int, 0644);
  368. static long max_history_time = 3600;
  369. module_param(max_history_time, long, 0644);
  370. static void do_epoch_check(struct subsys_device *dev)
  371. {
  372. int n = 0;
  373. struct timeval *time_first = NULL, *curr_time;
  374. struct restart_log *r_log, *temp;
  375. static int max_restarts_check;
  376. static long max_history_time_check;
  377. mutex_lock(&restart_log_mutex);
  378. max_restarts_check = max_restarts;
  379. max_history_time_check = max_history_time;
  380. /* Check if epoch checking is enabled */
  381. if (!max_restarts_check)
  382. goto out;
  383. r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
  384. if (!r_log)
  385. goto out;
  386. r_log->dev = dev;
  387. do_gettimeofday(&r_log->time);
  388. curr_time = &r_log->time;
  389. INIT_LIST_HEAD(&r_log->list);
  390. list_add_tail(&r_log->list, &restart_log_list);
  391. list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
  392. if ((curr_time->tv_sec - r_log->time.tv_sec) >
  393. max_history_time_check) {
  394. pr_debug("Deleted node with restart_time = %ld\n",
  395. r_log->time.tv_sec);
  396. list_del(&r_log->list);
  397. kfree(r_log);
  398. continue;
  399. }
  400. if (!n) {
  401. time_first = &r_log->time;
  402. pr_debug("Time_first: %ld\n", time_first->tv_sec);
  403. }
  404. n++;
  405. pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
  406. }
  407. if (time_first && n >= max_restarts_check) {
  408. if ((curr_time->tv_sec - time_first->tv_sec) <
  409. max_history_time_check)
  410. panic("Subsystems have crashed %d times in less than %ld seconds!",
  411. max_restarts_check, max_history_time_check);
  412. }
  413. out:
  414. mutex_unlock(&restart_log_mutex);
  415. }
  416. static int is_ramdump_enabled(struct subsys_device *dev)
  417. {
  418. if (dev->desc->ramdump_disable_gpio)
  419. return !dev->desc->ramdump_disable;
  420. return enable_ramdumps;
  421. }
  422. static void send_sysmon_notif(struct subsys_device *dev)
  423. {
  424. struct subsys_device *subsys;
  425. mutex_lock(&subsys_list_lock);
  426. list_for_each_entry(subsys, &subsys_list, list)
  427. if ((subsys->notif_state > 0) && (subsys != dev))
  428. sysmon_send_event(dev->desc, subsys->desc,
  429. subsys->notif_state);
  430. mutex_unlock(&subsys_list_lock);
  431. }
  432. static int for_each_subsys_device(struct subsys_device **list,
  433. unsigned int count, void *data,
  434. int (*fn)(struct subsys_device *, void *))
  435. {
  436. int ret;
  437. while (count--) {
  438. struct subsys_device *dev = *list++;
  439. if (!dev)
  440. continue;
  441. ret = fn(dev, data);
  442. if (ret)
  443. return ret;
  444. }
  445. return 0;
  446. }
  447. static void notify_each_subsys_device(struct subsys_device **list,
  448. unsigned int count,
  449. enum subsys_notif_type notif, void *data)
  450. {
  451. struct subsys_device *subsys;
  452. while (count--) {
  453. struct subsys_device *dev = *list++;
  454. struct notif_data notif_data;
  455. struct platform_device *pdev;
  456. if (!dev)
  457. continue;
  458. pdev = container_of(dev->desc->dev, struct platform_device,
  459. dev);
  460. dev->notif_state = notif;
  461. mutex_lock(&subsys_list_lock);
  462. list_for_each_entry(subsys, &subsys_list, list)
  463. if (dev != subsys &&
  464. subsys->track.state == SUBSYS_ONLINE)
  465. sysmon_send_event(subsys->desc, dev->desc,
  466. notif);
  467. mutex_unlock(&subsys_list_lock);
  468. if (notif == SUBSYS_AFTER_POWERUP &&
  469. dev->track.state == SUBSYS_ONLINE)
  470. send_sysmon_notif(dev);
  471. notif_data.crashed = subsys_get_crash_status(dev);
  472. notif_data.enable_ramdump = is_ramdump_enabled(dev);
  473. notif_data.enable_mini_ramdumps = enable_mini_ramdumps;
  474. notif_data.no_auth = dev->desc->no_auth;
  475. notif_data.pdev = pdev;
  476. trace_pil_notif("before_send_notif", notif, dev->desc->fw_name);
  477. subsys_notif_queue_notification(dev->notify, notif,
  478. &notif_data);
  479. trace_pil_notif("after_send_notif", notif, dev->desc->fw_name);
  480. }
  481. }
  482. static void enable_all_irqs(struct subsys_device *dev)
  483. {
  484. if (dev->desc->err_ready_irq)
  485. enable_irq(dev->desc->err_ready_irq);
  486. if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
  487. enable_irq(dev->desc->wdog_bite_irq);
  488. irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
  489. }
  490. if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
  491. enable_irq(dev->desc->err_fatal_irq);
  492. if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
  493. enable_irq(dev->desc->stop_ack_irq);
  494. if (dev->desc->generic_irq && dev->desc->generic_handler) {
  495. enable_irq(dev->desc->generic_irq);
  496. irq_set_irq_wake(dev->desc->generic_irq, 1);
  497. }
  498. }
  499. static void disable_all_irqs(struct subsys_device *dev)
  500. {
  501. if (dev->desc->err_ready_irq)
  502. disable_irq(dev->desc->err_ready_irq);
  503. if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
  504. disable_irq(dev->desc->wdog_bite_irq);
  505. irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
  506. }
  507. if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
  508. disable_irq(dev->desc->err_fatal_irq);
  509. if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
  510. disable_irq(dev->desc->stop_ack_irq);
  511. if (dev->desc->generic_irq && dev->desc->generic_handler) {
  512. disable_irq(dev->desc->generic_irq);
  513. irq_set_irq_wake(dev->desc->generic_irq, 0);
  514. }
  515. }
  516. static int wait_for_err_ready(struct subsys_device *subsys)
  517. {
  518. int ret;
  519. /*
  520. * If subsys is using generic_irq in which case err_ready_irq will be 0,
  521. * don't return.
  522. */
  523. if ((subsys->desc->generic_irq <= 0 && !subsys->desc->err_ready_irq) ||
  524. enable_debug == 1 || is_timeout_disabled())
  525. return 0;
  526. ret = wait_for_completion_timeout(&subsys->err_ready,
  527. msecs_to_jiffies(10000));
  528. if (!ret) {
  529. pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
  530. return -ETIMEDOUT;
  531. }
  532. return 0;
  533. }
  534. static int subsystem_shutdown(struct subsys_device *dev, void *data)
  535. {
  536. const char *name = dev->desc->name;
  537. char *timestamp = dev->desc->last_crash_timestamp;
  538. int ret;
  539. struct timespec ts_rtc;
  540. struct rtc_time tm;
  541. pr_info("[%s:%d]: Shutting down %s\n",
  542. current->comm, current->pid, name);
  543. ret = dev->desc->shutdown(dev->desc, true);
  544. if (ret < 0) {
  545. if (!dev->desc->ignore_ssr_failure) {
  546. panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
  547. current->comm, current->pid, name);
  548. } else {
  549. pr_err("Shutdown failure on %s\n", name);
  550. return ret;
  551. }
  552. }
  553. /* record crash time */
  554. getnstimeofday(&ts_rtc);
  555. rtc_time_to_tm(ts_rtc.tv_sec - (sys_tz.tz_minuteswest * 60), &tm);
  556. snprintf(timestamp, MAX_CRASH_TIMESTAMP_LEN,
  557. "%d-%02d-%02d_%02d-%02d-%02d",
  558. tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
  559. tm.tm_hour, tm.tm_min, tm.tm_sec);
  560. dev->crash_count++;
  561. subsys_set_state(dev, SUBSYS_OFFLINE);
  562. disable_all_irqs(dev);
  563. return 0;
  564. }
  565. static int subsystem_ramdump(struct subsys_device *dev, void *data)
  566. {
  567. const char *name = dev->desc->name;
  568. if (dev->desc->ramdump)
  569. if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
  570. pr_warn("%s[%s:%d]: Ramdump failed.\n",
  571. name, current->comm, current->pid);
  572. dev->do_ramdump_on_put = false;
  573. return 0;
  574. }
  575. static int subsystem_free_memory(struct subsys_device *dev, void *data)
  576. {
  577. if (dev->desc->free_memory)
  578. dev->desc->free_memory(dev->desc);
  579. return 0;
  580. }
  581. static int subsystem_powerup(struct subsys_device *dev, void *data)
  582. {
  583. const char *name = dev->desc->name;
  584. int ret;
  585. pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
  586. init_completion(&dev->err_ready);
  587. ret = dev->desc->powerup(dev->desc);
  588. if (ret < 0) {
  589. notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
  590. NULL);
  591. if (system_state == SYSTEM_RESTART
  592. || system_state == SYSTEM_POWER_OFF)
  593. WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
  594. name);
  595. else if (!dev->desc->ignore_ssr_failure)
  596. panic("[%s:%d]: Powerup error: %s!",
  597. current->comm, current->pid, name);
  598. else
  599. pr_err("Powerup failure on %s\n", name);
  600. return ret;
  601. }
  602. enable_all_irqs(dev);
  603. ret = wait_for_err_ready(dev);
  604. if (ret) {
  605. notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
  606. NULL);
  607. if (!dev->desc->ignore_ssr_failure)
  608. panic("[%s:%d]: Timed out waiting for error ready: %s!",
  609. current->comm, current->pid, name);
  610. else
  611. return ret;
  612. }
  613. subsys_set_state(dev, SUBSYS_ONLINE);
  614. subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
  615. return 0;
  616. }
  617. static int __find_subsys(struct device *dev, void *data)
  618. {
  619. struct subsys_device *subsys = to_subsys(dev);
  620. return !strcmp(subsys->desc->name, data);
  621. }
  622. static struct subsys_device *find_subsys(const char *str)
  623. {
  624. struct device *dev;
  625. if (!str)
  626. return NULL;
  627. dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
  628. __find_subsys);
  629. return dev ? to_subsys(dev) : NULL;
  630. }
  631. static int subsys_start(struct subsys_device *subsys)
  632. {
  633. int ret;
  634. notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
  635. NULL);
  636. init_completion(&subsys->err_ready);
  637. ret = subsys->desc->powerup(subsys->desc);
  638. if (ret) {
  639. notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
  640. NULL);
  641. return ret;
  642. }
  643. enable_all_irqs(subsys);
  644. if (subsys->desc->is_not_loadable) {
  645. subsys_set_state(subsys, SUBSYS_ONLINE);
  646. return 0;
  647. }
  648. ret = wait_for_err_ready(subsys);
  649. if (ret) {
  650. /* pil-boot succeeded but we need to shutdown
  651. * the device because error ready timed out.
  652. */
  653. notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
  654. NULL);
  655. subsys->desc->shutdown(subsys->desc, false);
  656. disable_all_irqs(subsys);
  657. return ret;
  658. }
  659. subsys_set_state(subsys, SUBSYS_ONLINE);
  660. notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
  661. NULL);
  662. return ret;
  663. }
  664. static void subsys_stop(struct subsys_device *subsys)
  665. {
  666. const char *name = subsys->desc->name;
  667. notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
  668. if (!of_property_read_bool(subsys->desc->dev->of_node,
  669. "qcom,pil-force-shutdown")) {
  670. subsys_set_state(subsys, SUBSYS_OFFLINING);
  671. subsys->desc->sysmon_shutdown_ret =
  672. sysmon_send_shutdown(subsys->desc);
  673. if (subsys->desc->sysmon_shutdown_ret)
  674. pr_debug("Graceful shutdown failed for %s\n", name);
  675. }
  676. subsys->desc->shutdown(subsys->desc, false);
  677. subsys_set_state(subsys, SUBSYS_OFFLINE);
  678. disable_all_irqs(subsys);
  679. notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
  680. }
  681. int subsystem_set_fwname(const char *name, const char *fw_name)
  682. {
  683. struct subsys_device *subsys;
  684. if (!name)
  685. return -EINVAL;
  686. if (!fw_name)
  687. return -EINVAL;
  688. subsys = find_subsys(name);
  689. if (!subsys)
  690. return -EINVAL;
  691. pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name);
  692. strlcpy(subsys->desc->fw_name, fw_name,
  693. sizeof(subsys->desc->fw_name));
  694. return 0;
  695. }
  696. EXPORT_SYMBOL(subsystem_set_fwname);
  697. int wait_for_shutdown_ack(struct subsys_desc *desc)
  698. {
  699. int count;
  700. struct subsys_device *dev;
  701. if (!desc || !desc->shutdown_ack_gpio)
  702. return 0;
  703. dev = find_subsys(desc->name);
  704. if (!dev)
  705. return 0;
  706. for (count = SHUTDOWN_ACK_MAX_LOOPS; count > 0; count--) {
  707. if (gpio_get_value(desc->shutdown_ack_gpio))
  708. return count;
  709. else if (subsys_get_crash_status(dev))
  710. break;
  711. msleep(SHUTDOWN_ACK_DELAY_MS);
  712. }
  713. pr_err("[%s]: Timed out waiting for shutdown ack\n", desc->name);
  714. return -ETIMEDOUT;
  715. }
  716. EXPORT_SYMBOL(wait_for_shutdown_ack);
  717. void *__subsystem_get(const char *name, const char *fw_name)
  718. {
  719. struct subsys_device *subsys;
  720. struct subsys_device *subsys_d;
  721. int ret;
  722. void *retval;
  723. struct subsys_tracking *track;
  724. if (!name)
  725. return NULL;
  726. subsys = retval = find_subsys(name);
  727. if (!subsys)
  728. return ERR_PTR(-ENODEV);
  729. if (!try_module_get(subsys->owner)) {
  730. retval = ERR_PTR(-ENODEV);
  731. goto err_module;
  732. }
  733. subsys_d = subsystem_get(subsys->desc->depends_on);
  734. if (IS_ERR(subsys_d)) {
  735. retval = subsys_d;
  736. goto err_depends;
  737. }
  738. track = subsys_get_track(subsys);
  739. mutex_lock(&track->lock);
  740. if (!subsys->count) {
  741. if (fw_name) {
  742. pr_info("Changing subsys fw_name to %s\n", fw_name);
  743. strlcpy(subsys->desc->fw_name, fw_name,
  744. sizeof(subsys->desc->fw_name));
  745. }
  746. ret = subsys_start(subsys);
  747. if (ret) {
  748. retval = ERR_PTR(ret);
  749. goto err_start;
  750. }
  751. }
  752. subsys->count++;
  753. mutex_unlock(&track->lock);
  754. return retval;
  755. err_start:
  756. mutex_unlock(&track->lock);
  757. subsystem_put(subsys_d);
  758. err_depends:
  759. module_put(subsys->owner);
  760. err_module:
  761. put_device(&subsys->dev);
  762. return retval;
  763. }
  764. /**
  765. * subsytem_get() - Boot a subsystem
  766. * @name: pointer to a string containing the name of the subsystem to boot
  767. *
  768. * This function returns a pointer if it succeeds. If an error occurs an
  769. * ERR_PTR is returned.
  770. *
  771. * If this feature is disable, the value %NULL will be returned.
  772. */
  773. void *subsystem_get(const char *name)
  774. {
  775. return __subsystem_get(name, NULL);
  776. }
  777. EXPORT_SYMBOL(subsystem_get);
  778. /**
  779. * subsystem_get_with_fwname() - Boot a subsystem using the firmware name passed
  780. * @name: pointer to a string containing the name of the subsystem to boot
  781. * @fw_name: pointer to a string containing the subsystem firmware image name
  782. *
  783. * This function returns a pointer if it succeeds. If an error occurs an
  784. * ERR_PTR is returned.
  785. *
  786. * If this feature is disable, the value %NULL will be returned.
  787. */
  788. void *subsystem_get_with_fwname(const char *name, const char *fw_name)
  789. {
  790. return __subsystem_get(name, fw_name);
  791. }
  792. EXPORT_SYMBOL(subsystem_get_with_fwname);
  793. /**
  794. * subsystem_put() - Shutdown a subsystem
  795. * @peripheral_handle: pointer from a previous call to subsystem_get()
  796. *
  797. * This doesn't imply that a subsystem is shutdown until all callers of
  798. * subsystem_get() have called subsystem_put().
  799. */
  800. void subsystem_put(void *subsystem)
  801. {
  802. struct subsys_device *subsys_d, *subsys = subsystem;
  803. struct subsys_tracking *track;
  804. if (IS_ERR_OR_NULL(subsys))
  805. return;
  806. track = subsys_get_track(subsys);
  807. mutex_lock(&track->lock);
  808. if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
  809. subsys->desc->name, __func__))
  810. goto err_out;
  811. if (!--subsys->count) {
  812. subsys_stop(subsys);
  813. if (subsys->do_ramdump_on_put)
  814. subsystem_ramdump(subsys, NULL);
  815. subsystem_free_memory(subsys, NULL);
  816. }
  817. mutex_unlock(&track->lock);
  818. subsys_d = find_subsys(subsys->desc->depends_on);
  819. if (subsys_d) {
  820. subsystem_put(subsys_d);
  821. put_device(&subsys_d->dev);
  822. }
  823. module_put(subsys->owner);
  824. put_device(&subsys->dev);
  825. return;
  826. err_out:
  827. mutex_unlock(&track->lock);
  828. }
  829. EXPORT_SYMBOL(subsystem_put);
  830. static void subsystem_restart_wq_func(struct work_struct *work)
  831. {
  832. struct subsys_device *dev = container_of(work,
  833. struct subsys_device, work);
  834. struct subsys_device **list;
  835. struct subsys_desc *desc = dev->desc;
  836. struct subsys_soc_restart_order *order = dev->restart_order;
  837. struct subsys_tracking *track;
  838. unsigned int count;
  839. unsigned long flags;
  840. int ret;
  841. /*
  842. * It's OK to not take the registration lock at this point.
  843. * This is because the subsystem list inside the relevant
  844. * restart order is not being traversed.
  845. */
  846. if (order) {
  847. list = order->subsys_ptrs;
  848. count = order->count;
  849. track = &order->track;
  850. } else {
  851. list = &dev;
  852. count = 1;
  853. track = &dev->track;
  854. }
  855. /*
  856. * If a system reboot/shutdown is under way, ignore subsystem errors.
  857. * However, print a message so that we know that a subsystem behaved
  858. * unexpectedly here.
  859. */
  860. if (system_state == SYSTEM_RESTART
  861. || system_state == SYSTEM_POWER_OFF) {
  862. WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
  863. desc->name);
  864. return;
  865. }
  866. mutex_lock(&track->lock);
  867. do_epoch_check(dev);
  868. if (dev->track.state == SUBSYS_OFFLINE) {
  869. mutex_unlock(&track->lock);
  870. WARN(1, "SSR aborted: %s subsystem not online\n", desc->name);
  871. return;
  872. }
  873. /*
  874. * It's necessary to take the registration lock because the subsystem
  875. * list in the SoC restart order will be traversed and it shouldn't be
  876. * changed until _this_ restart sequence completes.
  877. */
  878. mutex_lock(&soc_order_reg_lock);
  879. pr_debug("[%s:%d]: Starting restart sequence for %s\n",
  880. current->comm, current->pid, desc->name);
  881. notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
  882. ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
  883. if (ret)
  884. goto err;
  885. notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
  886. notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
  887. NULL);
  888. spin_lock_irqsave(&track->s_lock, flags);
  889. track->p_state = SUBSYS_RESTARTING;
  890. spin_unlock_irqrestore(&track->s_lock, flags);
  891. /* Collect ram dumps for all subsystems in order here */
  892. for_each_subsys_device(list, count, NULL, subsystem_ramdump);
  893. for_each_subsys_device(list, count, NULL, subsystem_free_memory);
  894. notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
  895. ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
  896. if (ret)
  897. goto err;
  898. notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
  899. pr_info("[%s:%d]: Restart sequence for %s completed.\n",
  900. current->comm, current->pid, desc->name);
  901. err:
  902. /* Reset subsys count */
  903. if (ret)
  904. dev->count = 0;
  905. mutex_unlock(&soc_order_reg_lock);
  906. mutex_unlock(&track->lock);
  907. spin_lock_irqsave(&track->s_lock, flags);
  908. track->p_state = SUBSYS_NORMAL;
  909. __pm_relax(&dev->ssr_wlock);
  910. spin_unlock_irqrestore(&track->s_lock, flags);
  911. }
  912. static void __subsystem_restart_dev(struct subsys_device *dev)
  913. {
  914. struct subsys_desc *desc = dev->desc;
  915. const char *name = dev->desc->name;
  916. struct subsys_tracking *track;
  917. unsigned long flags;
  918. pr_debug("Restarting %s [level=%s]!\n", desc->name,
  919. restart_levels[dev->restart_level]);
  920. track = subsys_get_track(dev);
  921. /*
  922. * Allow drivers to call subsystem_restart{_dev}() as many times as
  923. * they want up until the point where the subsystem is shutdown.
  924. */
  925. spin_lock_irqsave(&track->s_lock, flags);
  926. if (track->p_state != SUBSYS_CRASHED &&
  927. dev->track.state == SUBSYS_ONLINE) {
  928. if (track->p_state != SUBSYS_RESTARTING) {
  929. track->p_state = SUBSYS_CRASHED;
  930. __pm_stay_awake(&dev->ssr_wlock);
  931. queue_work(ssr_wq, &dev->work);
  932. } else {
  933. panic("Subsystem %s crashed during SSR!", name);
  934. }
  935. } else
  936. WARN(dev->track.state == SUBSYS_OFFLINE,
  937. "SSR aborted: %s subsystem not online\n", name);
  938. spin_unlock_irqrestore(&track->s_lock, flags);
  939. }
  940. static void device_restart_work_hdlr(struct work_struct *work)
  941. {
  942. struct subsys_device *dev = container_of(work, struct subsys_device,
  943. device_restart_work);
  944. notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
  945. /*
  946. * Temporary workaround until ramdump userspace application calls
  947. * sync() and fclose() on attempting the dump.
  948. */
  949. msleep(100);
  950. panic("subsys-restart: Resetting the SoC - %s crashed.",
  951. dev->desc->name);
  952. }
  953. int subsystem_restart_dev(struct subsys_device *dev)
  954. {
  955. const char *name;
  956. if (!get_device(&dev->dev))
  957. return -ENODEV;
  958. if (!try_module_get(dev->owner)) {
  959. put_device(&dev->dev);
  960. return -ENODEV;
  961. }
  962. name = dev->desc->name;
  963. /*
  964. * If a system reboot/shutdown is underway, ignore subsystem errors.
  965. * However, print a message so that we know that a subsystem behaved
  966. * unexpectedly here.
  967. */
  968. if (system_state == SYSTEM_RESTART
  969. || system_state == SYSTEM_POWER_OFF) {
  970. pr_err("%s crashed during a system poweroff/shutdown.\n", name);
  971. return -EBUSY;
  972. }
  973. pr_info("Restart sequence requested for %s, restart_level = %s.\n",
  974. name, restart_levels[dev->restart_level]);
  975. if (disable_restart_work == DISABLE_SSR) {
  976. pr_warn("subsys-restart: Ignoring restart request for %s\n",
  977. name);
  978. return 0;
  979. }
  980. switch (dev->restart_level) {
  981. case RESET_SUBSYS_COUPLED:
  982. __subsystem_restart_dev(dev);
  983. break;
  984. case RESET_SOC:
  985. __pm_stay_awake(&dev->ssr_wlock);
  986. schedule_work(&dev->device_restart_work);
  987. return 0;
  988. default:
  989. panic("subsys-restart: Unknown restart level!\n");
  990. break;
  991. }
  992. module_put(dev->owner);
  993. put_device(&dev->dev);
  994. return 0;
  995. }
  996. EXPORT_SYMBOL(subsystem_restart_dev);
  997. int subsystem_restart(const char *name)
  998. {
  999. int ret;
  1000. struct subsys_device *dev = find_subsys(name);
  1001. if (!dev)
  1002. return -ENODEV;
  1003. ret = subsystem_restart_dev(dev);
  1004. put_device(&dev->dev);
  1005. return ret;
  1006. }
  1007. EXPORT_SYMBOL(subsystem_restart);
  1008. int subsystem_crashed(const char *name)
  1009. {
  1010. struct subsys_device *dev = find_subsys(name);
  1011. struct subsys_tracking *track;
  1012. if (!dev)
  1013. return -ENODEV;
  1014. if (!get_device(&dev->dev))
  1015. return -ENODEV;
  1016. track = subsys_get_track(dev);
  1017. mutex_lock(&track->lock);
  1018. dev->do_ramdump_on_put = true;
  1019. /*
  1020. * TODO: Make this work with multiple consumers where one is calling
  1021. * subsystem_restart() and another is calling this function. To do
  1022. * so would require updating private state, etc.
  1023. */
  1024. mutex_unlock(&track->lock);
  1025. put_device(&dev->dev);
  1026. return 0;
  1027. }
  1028. EXPORT_SYMBOL(subsystem_crashed);
  1029. void subsys_set_crash_status(struct subsys_device *dev,
  1030. enum crash_status crashed)
  1031. {
  1032. dev->crashed = crashed;
  1033. }
  1034. EXPORT_SYMBOL(subsys_set_crash_status);
  1035. enum crash_status subsys_get_crash_status(struct subsys_device *dev)
  1036. {
  1037. return dev->crashed;
  1038. }
  1039. static struct subsys_device *desc_to_subsys(struct device *d)
  1040. {
  1041. struct subsys_device *device, *subsys_dev = 0;
  1042. mutex_lock(&subsys_list_lock);
  1043. list_for_each_entry(device, &subsys_list, list)
  1044. if (device->desc->dev == d)
  1045. subsys_dev = device;
  1046. mutex_unlock(&subsys_list_lock);
  1047. return subsys_dev;
  1048. }
  1049. void notify_proxy_vote(struct device *device)
  1050. {
  1051. struct subsys_device *dev = desc_to_subsys(device);
  1052. if (dev)
  1053. notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
  1054. }
  1055. void notify_proxy_unvote(struct device *device)
  1056. {
  1057. struct subsys_device *dev = desc_to_subsys(device);
  1058. if (dev)
  1059. notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
  1060. }
  1061. #ifdef CONFIG_DEBUG_FS
  1062. static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
  1063. size_t cnt, loff_t *ppos)
  1064. {
  1065. int r;
  1066. char buf[40];
  1067. struct subsys_device *subsys = filp->private_data;
  1068. r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
  1069. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1070. }
  1071. static ssize_t subsys_debugfs_write(struct file *filp,
  1072. const char __user *ubuf, size_t cnt, loff_t *ppos)
  1073. {
  1074. struct subsys_device *subsys = filp->private_data;
  1075. char buf[10];
  1076. char *cmp;
  1077. cnt = min(cnt, sizeof(buf) - 1);
  1078. if (copy_from_user(&buf, ubuf, cnt))
  1079. return -EFAULT;
  1080. buf[cnt] = '\0';
  1081. cmp = strstrip(buf);
  1082. if (!strcmp(cmp, "restart")) {
  1083. if (subsystem_restart_dev(subsys))
  1084. return -EIO;
  1085. } else if (!strcmp(cmp, "get")) {
  1086. if (subsystem_get(subsys->desc->name))
  1087. return -EIO;
  1088. } else if (!strcmp(cmp, "put")) {
  1089. subsystem_put(subsys);
  1090. } else {
  1091. return -EINVAL;
  1092. }
  1093. return cnt;
  1094. }
  1095. static const struct file_operations subsys_debugfs_fops = {
  1096. .open = simple_open,
  1097. .read = subsys_debugfs_read,
  1098. .write = subsys_debugfs_write,
  1099. };
  1100. static struct dentry *subsys_base_dir;
  1101. static int __init subsys_debugfs_init(void)
  1102. {
  1103. subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
  1104. return !subsys_base_dir ? -ENOMEM : 0;
  1105. }
  1106. static void subsys_debugfs_exit(void)
  1107. {
  1108. debugfs_remove_recursive(subsys_base_dir);
  1109. }
  1110. static int subsys_debugfs_add(struct subsys_device *subsys)
  1111. {
  1112. if (!subsys_base_dir)
  1113. return -ENOMEM;
  1114. subsys->dentry = debugfs_create_file(subsys->desc->name,
  1115. 0644, subsys_base_dir,
  1116. subsys, &subsys_debugfs_fops);
  1117. return !subsys->dentry ? -ENOMEM : 0;
  1118. }
  1119. static void subsys_debugfs_remove(struct subsys_device *subsys)
  1120. {
  1121. debugfs_remove(subsys->dentry);
  1122. }
  1123. #else
  1124. static int __init subsys_debugfs_init(void) { return 0; };
  1125. static void subsys_debugfs_exit(void) { }
  1126. static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
  1127. static void subsys_debugfs_remove(struct subsys_device *subsys) { }
  1128. #endif
  1129. static int subsys_device_open(struct inode *inode, struct file *file)
  1130. {
  1131. struct subsys_device *device, *subsys_dev = 0;
  1132. void *retval;
  1133. mutex_lock(&subsys_list_lock);
  1134. list_for_each_entry(device, &subsys_list, list)
  1135. if (MINOR(device->dev_no) == iminor(inode))
  1136. subsys_dev = device;
  1137. mutex_unlock(&subsys_list_lock);
  1138. if (!subsys_dev)
  1139. return -EINVAL;
  1140. retval = subsystem_get_with_fwname(subsys_dev->desc->name,
  1141. subsys_dev->desc->fw_name);
  1142. if (IS_ERR(retval))
  1143. return PTR_ERR(retval);
  1144. return 0;
  1145. }
  1146. static int subsys_device_close(struct inode *inode, struct file *file)
  1147. {
  1148. struct subsys_device *device, *subsys_dev = 0;
  1149. mutex_lock(&subsys_list_lock);
  1150. list_for_each_entry(device, &subsys_list, list)
  1151. if (MINOR(device->dev_no) == iminor(inode))
  1152. subsys_dev = device;
  1153. mutex_unlock(&subsys_list_lock);
  1154. if (!subsys_dev)
  1155. return -EINVAL;
  1156. subsystem_put(subsys_dev);
  1157. return 0;
  1158. }
  1159. static const struct file_operations subsys_device_fops = {
  1160. .owner = THIS_MODULE,
  1161. .open = subsys_device_open,
  1162. .release = subsys_device_close,
  1163. };
  1164. static void subsys_device_release(struct device *dev)
  1165. {
  1166. struct subsys_device *subsys = to_subsys(dev);
  1167. wakeup_source_trash(&subsys->ssr_wlock);
  1168. mutex_destroy(&subsys->track.lock);
  1169. ida_simple_remove(&subsys_ida, subsys->id);
  1170. kfree(subsys);
  1171. }
  1172. static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
  1173. {
  1174. struct subsys_device *subsys_dev = subsys;
  1175. dev_info(subsys_dev->desc->dev,
  1176. "Subsystem error monitoring/handling services are up\n");
  1177. if (subsys_dev->desc->is_not_loadable)
  1178. return IRQ_HANDLED;
  1179. complete(&subsys_dev->err_ready);
  1180. return IRQ_HANDLED;
  1181. }
  1182. static int subsys_char_device_add(struct subsys_device *subsys_dev)
  1183. {
  1184. int ret = 0;
  1185. static int major, minor;
  1186. dev_t dev_no;
  1187. mutex_lock(&char_device_lock);
  1188. if (!major) {
  1189. ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
  1190. if (ret < 0) {
  1191. pr_err("Failed to alloc subsys_dev region, err %d\n",
  1192. ret);
  1193. goto fail;
  1194. }
  1195. major = MAJOR(dev_no);
  1196. minor = MINOR(dev_no);
  1197. } else
  1198. dev_no = MKDEV(major, minor);
  1199. if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
  1200. NULL, "subsys_%s", subsys_dev->desc->name)) {
  1201. pr_err("Failed to create subsys_%s device\n",
  1202. subsys_dev->desc->name);
  1203. goto fail_unregister_cdev_region;
  1204. }
  1205. cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
  1206. subsys_dev->char_dev.owner = THIS_MODULE;
  1207. ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
  1208. if (ret < 0)
  1209. goto fail_destroy_device;
  1210. subsys_dev->dev_no = dev_no;
  1211. minor++;
  1212. mutex_unlock(&char_device_lock);
  1213. return 0;
  1214. fail_destroy_device:
  1215. device_destroy(char_class, dev_no);
  1216. fail_unregister_cdev_region:
  1217. unregister_chrdev_region(dev_no, 1);
  1218. fail:
  1219. mutex_unlock(&char_device_lock);
  1220. return ret;
  1221. }
  1222. static void subsys_char_device_remove(struct subsys_device *subsys_dev)
  1223. {
  1224. cdev_del(&subsys_dev->char_dev);
  1225. device_destroy(char_class, subsys_dev->dev_no);
  1226. unregister_chrdev_region(subsys_dev->dev_no, 1);
  1227. }
  1228. static void subsys_remove_restart_order(struct device_node *device)
  1229. {
  1230. struct subsys_soc_restart_order *order;
  1231. int i;
  1232. mutex_lock(&ssr_order_mutex);
  1233. list_for_each_entry(order, &ssr_order_list, list)
  1234. for (i = 0; i < order->count; i++)
  1235. if (order->device_ptrs[i] == device)
  1236. order->subsys_ptrs[i] = NULL;
  1237. mutex_unlock(&ssr_order_mutex);
  1238. }
  1239. static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
  1240. subsys_desc * desc)
  1241. {
  1242. int i, j, count, num = 0;
  1243. struct subsys_soc_restart_order *order, *tmp;
  1244. struct device *dev = desc->dev;
  1245. struct device_node *ssr_node;
  1246. uint32_t len;
  1247. if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
  1248. return NULL;
  1249. count = len/sizeof(uint32_t);
  1250. order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
  1251. if (!order)
  1252. return ERR_PTR(-ENOMEM);
  1253. order->subsys_ptrs = devm_kzalloc(dev,
  1254. count * sizeof(struct subsys_device *),
  1255. GFP_KERNEL);
  1256. if (!order->subsys_ptrs)
  1257. return ERR_PTR(-ENOMEM);
  1258. order->device_ptrs = devm_kzalloc(dev,
  1259. count * sizeof(struct device_node *),
  1260. GFP_KERNEL);
  1261. if (!order->device_ptrs)
  1262. return ERR_PTR(-ENOMEM);
  1263. for (i = 0; i < count; i++) {
  1264. ssr_node = of_parse_phandle(dev->of_node,
  1265. "qcom,restart-group", i);
  1266. if (!ssr_node)
  1267. return ERR_PTR(-ENXIO);
  1268. of_node_put(ssr_node);
  1269. pr_info("%s device has been added to %s's restart group\n",
  1270. ssr_node->name, desc->name);
  1271. order->device_ptrs[i] = ssr_node;
  1272. }
  1273. /*
  1274. * Check for similar restart groups. If found, return
  1275. * without adding the new group to the ssr_order_list.
  1276. */
  1277. mutex_lock(&ssr_order_mutex);
  1278. list_for_each_entry(tmp, &ssr_order_list, list) {
  1279. for (i = 0; i < count; i++) {
  1280. for (j = 0; j < count; j++) {
  1281. if (order->device_ptrs[j] !=
  1282. tmp->device_ptrs[i])
  1283. continue;
  1284. else
  1285. num++;
  1286. }
  1287. }
  1288. if (num == count && tmp->count == count)
  1289. goto err;
  1290. else if (num) {
  1291. tmp = ERR_PTR(-EINVAL);
  1292. goto err;
  1293. }
  1294. }
  1295. order->count = count;
  1296. mutex_init(&order->track.lock);
  1297. spin_lock_init(&order->track.s_lock);
  1298. INIT_LIST_HEAD(&order->list);
  1299. list_add_tail(&order->list, &ssr_order_list);
  1300. mutex_unlock(&ssr_order_mutex);
  1301. return order;
  1302. err:
  1303. mutex_unlock(&ssr_order_mutex);
  1304. return tmp;
  1305. }
  1306. static int __get_gpio(struct subsys_desc *desc, const char *prop,
  1307. int *gpio)
  1308. {
  1309. struct device_node *dnode = desc->dev->of_node;
  1310. int ret = -ENOENT;
  1311. if (of_find_property(dnode, prop, NULL)) {
  1312. *gpio = of_get_named_gpio(dnode, prop, 0);
  1313. ret = *gpio < 0 ? *gpio : 0;
  1314. }
  1315. return ret;
  1316. }
  1317. static int __get_irq(struct subsys_desc *desc, const char *prop,
  1318. unsigned int *irq, int *gpio)
  1319. {
  1320. int ret, gpiol, irql;
  1321. ret = __get_gpio(desc, prop, &gpiol);
  1322. if (ret)
  1323. return ret;
  1324. irql = gpio_to_irq(gpiol);
  1325. if (irql == -ENOENT)
  1326. irql = -ENXIO;
  1327. if (irql < 0) {
  1328. pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
  1329. prop);
  1330. return irql;
  1331. }
  1332. if (gpio)
  1333. *gpio = gpiol;
  1334. *irq = irql;
  1335. return 0;
  1336. }
  1337. static int subsys_parse_devicetree(struct subsys_desc *desc)
  1338. {
  1339. struct subsys_soc_restart_order *order;
  1340. int ret;
  1341. struct platform_device *pdev = container_of(desc->dev,
  1342. struct platform_device, dev);
  1343. ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq,
  1344. &desc->err_fatal_gpio);
  1345. if (ret && ret != -ENOENT)
  1346. return ret;
  1347. ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq,
  1348. NULL);
  1349. if (ret && ret != -ENOENT)
  1350. return ret;
  1351. ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq, NULL);
  1352. if (ret && ret != -ENOENT)
  1353. return ret;
  1354. ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
  1355. if (ret && ret != -ENOENT)
  1356. return ret;
  1357. ret = __get_gpio(desc, "qcom,gpio-ramdump-disable",
  1358. &desc->ramdump_disable_gpio);
  1359. if (ret && ret != -ENOENT)
  1360. return ret;
  1361. ret = __get_gpio(desc, "qcom,gpio-shutdown-ack",
  1362. &desc->shutdown_ack_gpio);
  1363. if (ret && ret != -ENOENT)
  1364. return ret;
  1365. ret = platform_get_irq(pdev, 0);
  1366. if (ret > 0)
  1367. desc->wdog_bite_irq = ret;
  1368. if (of_property_read_bool(pdev->dev.of_node,
  1369. "qcom,pil-generic-irq-handler")) {
  1370. ret = platform_get_irq(pdev, 0);
  1371. if (ret > 0)
  1372. desc->generic_irq = ret;
  1373. }
  1374. desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
  1375. "qcom,ignore-ssr-failure");
  1376. order = ssr_parse_restart_orders(desc);
  1377. if (IS_ERR(order)) {
  1378. pr_err("Could not initialize SSR restart order, err = %ld\n",
  1379. PTR_ERR(order));
  1380. return PTR_ERR(order);
  1381. }
  1382. return 0;
  1383. }
  1384. static int subsys_setup_irqs(struct subsys_device *subsys)
  1385. {
  1386. struct subsys_desc *desc = subsys->desc;
  1387. int ret;
  1388. if (desc->err_fatal_irq && desc->err_fatal_handler) {
  1389. ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
  1390. desc->err_fatal_handler,
  1391. IRQF_TRIGGER_RISING, desc->name, desc);
  1392. if (ret < 0) {
  1393. dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
  1394. desc->name, ret);
  1395. return ret;
  1396. }
  1397. disable_irq(desc->err_fatal_irq);
  1398. }
  1399. if (desc->stop_ack_irq && desc->stop_ack_handler) {
  1400. ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
  1401. desc->stop_ack_handler,
  1402. IRQF_TRIGGER_RISING, desc->name, desc);
  1403. if (ret < 0) {
  1404. dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
  1405. desc->name, ret);
  1406. return ret;
  1407. }
  1408. disable_irq(desc->stop_ack_irq);
  1409. }
  1410. if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
  1411. ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
  1412. desc->wdog_bite_handler,
  1413. IRQF_TRIGGER_RISING, desc->name, desc);
  1414. if (ret < 0) {
  1415. dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
  1416. desc->name, ret);
  1417. return ret;
  1418. }
  1419. disable_irq(desc->wdog_bite_irq);
  1420. }
  1421. if (desc->generic_irq && desc->generic_handler) {
  1422. ret = devm_request_irq(desc->dev, desc->generic_irq,
  1423. desc->generic_handler,
  1424. IRQF_TRIGGER_HIGH, desc->name, desc);
  1425. if (ret < 0) {
  1426. dev_err(desc->dev, "[%s]: Unable to register generic irq handler!: %d\n",
  1427. desc->name, ret);
  1428. return ret;
  1429. }
  1430. disable_irq(desc->generic_irq);
  1431. }
  1432. if (desc->err_ready_irq) {
  1433. ret = devm_request_irq(desc->dev,
  1434. desc->err_ready_irq,
  1435. subsys_err_ready_intr_handler,
  1436. IRQF_TRIGGER_RISING,
  1437. "error_ready_interrupt", subsys);
  1438. if (ret < 0) {
  1439. dev_err(desc->dev,
  1440. "[%s]: Unable to register err ready handler\n",
  1441. desc->name);
  1442. return ret;
  1443. }
  1444. disable_irq(desc->err_ready_irq);
  1445. }
  1446. return 0;
  1447. }
  1448. static void subsys_free_irqs(struct subsys_device *subsys)
  1449. {
  1450. struct subsys_desc *desc = subsys->desc;
  1451. if (desc->err_fatal_irq && desc->err_fatal_handler)
  1452. devm_free_irq(desc->dev, desc->err_fatal_irq, desc);
  1453. if (desc->stop_ack_irq && desc->stop_ack_handler)
  1454. devm_free_irq(desc->dev, desc->stop_ack_irq, desc);
  1455. if (desc->wdog_bite_irq && desc->wdog_bite_handler)
  1456. devm_free_irq(desc->dev, desc->wdog_bite_irq, desc);
  1457. if (desc->err_ready_irq)
  1458. devm_free_irq(desc->dev, desc->err_ready_irq, subsys);
  1459. }
  1460. struct subsys_device *subsys_register(struct subsys_desc *desc)
  1461. {
  1462. struct subsys_device *subsys;
  1463. struct device_node *ofnode = desc->dev->of_node;
  1464. int ret;
  1465. subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
  1466. if (!subsys)
  1467. return ERR_PTR(-ENOMEM);
  1468. subsys->desc = desc;
  1469. subsys->owner = desc->owner;
  1470. subsys->dev.parent = desc->dev;
  1471. subsys->dev.bus = &subsys_bus_type;
  1472. subsys->dev.release = subsys_device_release;
  1473. subsys->notif_state = -1;
  1474. subsys->desc->sysmon_pid = -1;
  1475. strlcpy(subsys->desc->fw_name, desc->name,
  1476. sizeof(subsys->desc->fw_name));
  1477. subsys->notify = subsys_notif_add_subsys(desc->name);
  1478. snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
  1479. wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
  1480. INIT_WORK(&subsys->work, subsystem_restart_wq_func);
  1481. INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
  1482. spin_lock_init(&subsys->track.s_lock);
  1483. subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
  1484. if (subsys->id < 0) {
  1485. wakeup_source_trash(&subsys->ssr_wlock);
  1486. ret = subsys->id;
  1487. kfree(subsys);
  1488. return ERR_PTR(ret);
  1489. }
  1490. dev_set_name(&subsys->dev, "subsys%d", subsys->id);
  1491. mutex_init(&subsys->track.lock);
  1492. ret = subsys_debugfs_add(subsys);
  1493. if (ret) {
  1494. ida_simple_remove(&subsys_ida, subsys->id);
  1495. wakeup_source_trash(&subsys->ssr_wlock);
  1496. kfree(subsys);
  1497. return ERR_PTR(ret);
  1498. }
  1499. ret = device_register(&subsys->dev);
  1500. if (ret) {
  1501. subsys_debugfs_remove(subsys);
  1502. put_device(&subsys->dev);
  1503. return ERR_PTR(ret);
  1504. }
  1505. ret = subsys_char_device_add(subsys);
  1506. if (ret)
  1507. goto err_register;
  1508. if (ofnode) {
  1509. ret = subsys_parse_devicetree(desc);
  1510. if (ret)
  1511. goto err_register;
  1512. subsys->restart_order = update_restart_order(subsys);
  1513. ret = subsys_setup_irqs(subsys);
  1514. if (ret < 0)
  1515. goto err_setup_irqs;
  1516. if (of_property_read_u32(ofnode, "qcom,ssctl-instance-id",
  1517. &desc->ssctl_instance_id))
  1518. pr_debug("Reading instance-id for %s failed\n",
  1519. desc->name);
  1520. if (of_property_read_u32(ofnode, "qcom,sysmon-id",
  1521. &subsys->desc->sysmon_pid))
  1522. pr_debug("Reading sysmon-id for %s failed\n",
  1523. desc->name);
  1524. subsys->desc->edge = of_get_property(ofnode, "qcom,edge",
  1525. NULL);
  1526. if (!subsys->desc->edge)
  1527. pr_debug("Reading qcom,edge for %s failed\n",
  1528. desc->name);
  1529. }
  1530. ret = sysmon_notifier_register(desc);
  1531. if (ret < 0)
  1532. goto err_sysmon_notifier;
  1533. if (subsys->desc->edge) {
  1534. ret = sysmon_glink_register(desc);
  1535. if (ret < 0)
  1536. goto err_sysmon_glink_register;
  1537. }
  1538. mutex_lock(&subsys_list_lock);
  1539. INIT_LIST_HEAD(&subsys->list);
  1540. list_add_tail(&subsys->list, &subsys_list);
  1541. mutex_unlock(&subsys_list_lock);
  1542. return subsys;
  1543. err_sysmon_glink_register:
  1544. sysmon_notifier_unregister(subsys->desc);
  1545. err_sysmon_notifier:
  1546. if (ofnode)
  1547. subsys_free_irqs(subsys);
  1548. err_setup_irqs:
  1549. if (ofnode)
  1550. subsys_remove_restart_order(ofnode);
  1551. err_register:
  1552. subsys_debugfs_remove(subsys);
  1553. device_unregister(&subsys->dev);
  1554. return ERR_PTR(ret);
  1555. }
  1556. EXPORT_SYMBOL(subsys_register);
  1557. void subsys_unregister(struct subsys_device *subsys)
  1558. {
  1559. struct subsys_device *subsys_dev, *tmp;
  1560. struct device_node *device = subsys->desc->dev->of_node;
  1561. if (IS_ERR_OR_NULL(subsys))
  1562. return;
  1563. if (get_device(&subsys->dev)) {
  1564. mutex_lock(&subsys_list_lock);
  1565. list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
  1566. if (subsys_dev == subsys)
  1567. list_del(&subsys->list);
  1568. mutex_unlock(&subsys_list_lock);
  1569. if (device) {
  1570. subsys_free_irqs(subsys);
  1571. subsys_remove_restart_order(device);
  1572. }
  1573. mutex_lock(&subsys->track.lock);
  1574. WARN_ON(subsys->count);
  1575. device_unregister(&subsys->dev);
  1576. mutex_unlock(&subsys->track.lock);
  1577. subsys_debugfs_remove(subsys);
  1578. subsys_char_device_remove(subsys);
  1579. sysmon_notifier_unregister(subsys->desc);
  1580. if (subsys->desc->edge)
  1581. sysmon_glink_unregister(subsys->desc);
  1582. put_device(&subsys->dev);
  1583. }
  1584. }
  1585. EXPORT_SYMBOL(subsys_unregister);
  1586. static int subsys_panic(struct device *dev, void *data)
  1587. {
  1588. struct subsys_device *subsys = to_subsys(dev);
  1589. if (subsys->desc->crash_shutdown)
  1590. subsys->desc->crash_shutdown(subsys->desc);
  1591. return 0;
  1592. }
  1593. static int ssr_panic_handler(struct notifier_block *this,
  1594. unsigned long event, void *ptr)
  1595. {
  1596. bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
  1597. return NOTIFY_DONE;
  1598. }
  1599. static struct notifier_block panic_nb = {
  1600. .notifier_call = ssr_panic_handler,
  1601. };
  1602. static int __init subsys_restart_init(void)
  1603. {
  1604. int ret;
  1605. ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
  1606. BUG_ON(!ssr_wq);
  1607. ret = bus_register(&subsys_bus_type);
  1608. if (ret)
  1609. goto err_bus;
  1610. ret = subsys_debugfs_init();
  1611. if (ret)
  1612. goto err_debugfs;
  1613. char_class = class_create(THIS_MODULE, "subsys");
  1614. if (IS_ERR(char_class)) {
  1615. ret = -ENOMEM;
  1616. pr_err("Failed to create subsys_dev class\n");
  1617. goto err_class;
  1618. }
  1619. ret = atomic_notifier_chain_register(&panic_notifier_list,
  1620. &panic_nb);
  1621. if (ret)
  1622. goto err_soc;
  1623. return 0;
  1624. err_soc:
  1625. class_destroy(char_class);
  1626. err_class:
  1627. subsys_debugfs_exit();
  1628. err_debugfs:
  1629. bus_unregister(&subsys_bus_type);
  1630. err_bus:
  1631. destroy_workqueue(ssr_wq);
  1632. return ret;
  1633. }
  1634. arch_initcall(subsys_restart_init);
  1635. MODULE_DESCRIPTION("Subsystem Restart Driver");
  1636. MODULE_LICENSE("GPL v2");