watchdog_v2.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/io.h>
  15. #include <linux/delay.h>
  16. #include <linux/slab.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/kthread.h>
  19. #include <linux/mutex.h>
  20. #include <linux/sched.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/irq.h>
  23. #include <linux/percpu.h>
  24. #include <linux/of.h>
  25. #include <linux/cpu.h>
  26. #include <linux/cpu_pm.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/wait.h>
  29. #include <soc/qcom/scm.h>
  30. #include <soc/qcom/memory_dump.h>
  31. #include <soc/qcom/minidump.h>
  32. #include <soc/qcom/watchdog.h>
  33. #include <linux/dma-mapping.h>
  34. #define MODULE_NAME "msm_watchdog"
  35. #define WDT0_ACCSCSSNBARK_INT 0
  36. #define TCSR_WDT_CFG 0x30
  37. #define WDT0_RST 0x04
  38. #define WDT0_EN 0x08
  39. #define WDT0_STS 0x0C
  40. #define WDT0_BARK_TIME 0x10
  41. #define WDT0_BITE_TIME 0x14
  42. #define WDOG_ABSENT 0
  43. #define EN 0
  44. #define UNMASKED_INT_EN 1
  45. #define MASK_SIZE 32
  46. #define SCM_SET_REGSAVE_CMD 0x2
  47. #define SCM_SVC_SEC_WDOG_DIS 0x7
  48. #define MAX_CPU_CTX_SIZE 2048
  49. #define MAX_CPU_SCANDUMP_SIZE 0x10100
  50. static struct msm_watchdog_data *wdog_data;
  51. static int cpu_idle_pc_state[NR_CPUS];
  52. /*
  53. * user_pet_enable:
  54. * Require userspace to write to a sysfs file every pet_time milliseconds.
  55. * Disabled by default on boot.
  56. */
  57. struct msm_watchdog_data {
  58. unsigned int __iomem phys_base;
  59. size_t size;
  60. void __iomem *base;
  61. void __iomem *wdog_absent_base;
  62. struct device *dev;
  63. unsigned int pet_time;
  64. unsigned int bark_time;
  65. unsigned int bark_irq;
  66. unsigned int bite_irq;
  67. bool do_ipi_ping;
  68. bool wakeup_irq_enable;
  69. unsigned long long last_pet;
  70. unsigned int min_slack_ticks;
  71. unsigned long long min_slack_ns;
  72. void *scm_regsave;
  73. cpumask_t alive_mask;
  74. struct mutex disable_lock;
  75. bool irq_ppi;
  76. struct msm_watchdog_data __percpu **wdog_cpu_dd;
  77. struct notifier_block panic_blk;
  78. bool enabled;
  79. bool user_pet_enabled;
  80. struct task_struct *watchdog_task;
  81. struct timer_list pet_timer;
  82. wait_queue_head_t pet_complete;
  83. bool timer_expired;
  84. bool user_pet_complete;
  85. unsigned int scandump_size;
  86. };
  87. /*
  88. * On the kernel command line specify
  89. * watchdog_v2.enable=1 to enable the watchdog
  90. * By default watchdog is turned on
  91. */
  92. static int enable = 1;
  93. module_param(enable, int, 0);
  94. /*
  95. * On the kernel command line specify
  96. * watchdog_v2.WDT_HZ=<clock val in HZ> to set Watchdog
  97. * ticks. By default it is set to 32765.
  98. */
  99. static long WDT_HZ = 32765;
  100. module_param(WDT_HZ, long, 0);
  101. /*
  102. * Watchdog ipi optimization:
  103. * Does not ping cores in low power mode at pet time to save power.
  104. * This feature is enabled by default.
  105. *
  106. * On the kernel command line specify
  107. * watchdog_v2.ipi_en=1 to disable this optimization.
  108. * Or, can be turned off, by enabling CONFIG_QCOM_WDOG_IPI_ENABLE.
  109. */
  110. #ifdef CONFIG_QCOM_WDOG_IPI_ENABLE
  111. #define IPI_CORES_IN_LPM 1
  112. #else
  113. #define IPI_CORES_IN_LPM 0
  114. #endif
  115. static int ipi_en = IPI_CORES_IN_LPM;
  116. module_param(ipi_en, int, 0444);
  117. static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
  118. {
  119. static char alive_mask_buf[MASK_SIZE];
  120. scnprintf(alive_mask_buf, MASK_SIZE, "%*pb1", cpumask_pr_args(
  121. &wdog_dd->alive_mask));
  122. dev_info(wdog_dd->dev, "cpu alive mask from last pet %s\n",
  123. alive_mask_buf);
  124. }
  125. static int msm_watchdog_suspend(struct device *dev)
  126. {
  127. struct msm_watchdog_data *wdog_dd =
  128. (struct msm_watchdog_data *)dev_get_drvdata(dev);
  129. if (!enable)
  130. return 0;
  131. __raw_writel(1, wdog_dd->base + WDT0_RST);
  132. if (wdog_dd->wakeup_irq_enable) {
  133. /* Make sure register write is complete before proceeding */
  134. mb();
  135. wdog_dd->last_pet = sched_clock();
  136. return 0;
  137. }
  138. __raw_writel(0, wdog_dd->base + WDT0_EN);
  139. /* Make sure watchdog is suspended before setting enable */
  140. mb();
  141. wdog_dd->enabled = false;
  142. wdog_dd->last_pet = sched_clock();
  143. return 0;
  144. }
  145. static int msm_watchdog_resume(struct device *dev)
  146. {
  147. struct msm_watchdog_data *wdog_dd =
  148. (struct msm_watchdog_data *)dev_get_drvdata(dev);
  149. if (!enable)
  150. return 0;
  151. if (wdog_dd->wakeup_irq_enable) {
  152. __raw_writel(1, wdog_dd->base + WDT0_RST);
  153. /* Make sure register write is complete before proceeding */
  154. mb();
  155. wdog_dd->last_pet = sched_clock();
  156. return 0;
  157. }
  158. __raw_writel(1, wdog_dd->base + WDT0_EN);
  159. __raw_writel(1, wdog_dd->base + WDT0_RST);
  160. /* Make sure watchdog is reset before setting enable */
  161. mb();
  162. wdog_dd->enabled = true;
  163. wdog_dd->last_pet = sched_clock();
  164. return 0;
  165. }
  166. static int panic_wdog_handler(struct notifier_block *this,
  167. unsigned long event, void *ptr)
  168. {
  169. struct msm_watchdog_data *wdog_dd = container_of(this,
  170. struct msm_watchdog_data, panic_blk);
  171. if (panic_timeout == 0) {
  172. __raw_writel(0, wdog_dd->base + WDT0_EN);
  173. /* Make sure watchdog is enabled before notifying the caller */
  174. mb();
  175. } else {
  176. __raw_writel(WDT_HZ * (panic_timeout + 10),
  177. wdog_dd->base + WDT0_BARK_TIME);
  178. __raw_writel(WDT_HZ * (panic_timeout + 10),
  179. wdog_dd->base + WDT0_BITE_TIME);
  180. __raw_writel(1, wdog_dd->base + WDT0_RST);
  181. }
  182. return NOTIFY_DONE;
  183. }
  184. static void wdog_disable(struct msm_watchdog_data *wdog_dd)
  185. {
  186. __raw_writel(0, wdog_dd->base + WDT0_EN);
  187. /* Make sure watchdog is disabled before proceeding */
  188. mb();
  189. if (wdog_dd->irq_ppi) {
  190. disable_percpu_irq(wdog_dd->bark_irq);
  191. free_percpu_irq(wdog_dd->bark_irq, wdog_dd->wdog_cpu_dd);
  192. } else
  193. devm_free_irq(wdog_dd->dev, wdog_dd->bark_irq, wdog_dd);
  194. enable = 0;
  195. /*Ensure all cpus see update to enable*/
  196. smp_mb();
  197. atomic_notifier_chain_unregister(&panic_notifier_list,
  198. &wdog_dd->panic_blk);
  199. del_timer_sync(&wdog_dd->pet_timer);
  200. /* may be suspended after the first write above */
  201. __raw_writel(0, wdog_dd->base + WDT0_EN);
  202. /* Make sure watchdog is disabled before setting enable */
  203. mb();
  204. wdog_dd->enabled = false;
  205. pr_info("MSM Apps Watchdog deactivated.\n");
  206. }
  207. static ssize_t wdog_disable_get(struct device *dev,
  208. struct device_attribute *attr, char *buf)
  209. {
  210. int ret;
  211. struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
  212. mutex_lock(&wdog_dd->disable_lock);
  213. ret = snprintf(buf, PAGE_SIZE, "%d\n", enable == 0 ? 1 : 0);
  214. mutex_unlock(&wdog_dd->disable_lock);
  215. return ret;
  216. }
  217. static ssize_t wdog_disable_set(struct device *dev,
  218. struct device_attribute *attr,
  219. const char *buf, size_t count)
  220. {
  221. int ret;
  222. u8 disable;
  223. struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
  224. ret = kstrtou8(buf, 10, &disable);
  225. if (ret) {
  226. dev_err(wdog_dd->dev, "invalid user input\n");
  227. return ret;
  228. }
  229. if (disable == 1) {
  230. mutex_lock(&wdog_dd->disable_lock);
  231. if (enable == 0) {
  232. pr_info("MSM Apps Watchdog already disabled\n");
  233. mutex_unlock(&wdog_dd->disable_lock);
  234. return count;
  235. }
  236. disable = 1;
  237. if (!is_scm_armv8()) {
  238. ret = scm_call(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_DIS,
  239. &disable, sizeof(disable), NULL, 0);
  240. } else {
  241. struct scm_desc desc = {0};
  242. desc.args[0] = 1;
  243. desc.arginfo = SCM_ARGS(1);
  244. ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT,
  245. SCM_SVC_SEC_WDOG_DIS), &desc);
  246. }
  247. if (ret) {
  248. dev_err(wdog_dd->dev,
  249. "Failed to deactivate secure wdog\n");
  250. mutex_unlock(&wdog_dd->disable_lock);
  251. return -EIO;
  252. }
  253. wdog_disable(wdog_dd);
  254. mutex_unlock(&wdog_dd->disable_lock);
  255. } else {
  256. pr_err("invalid operation, only disable = 1 supported\n");
  257. return -EINVAL;
  258. }
  259. return count;
  260. }
  261. static DEVICE_ATTR(disable, S_IWUSR | S_IRUSR, wdog_disable_get,
  262. wdog_disable_set);
  263. /*
  264. * Userspace Watchdog Support:
  265. * Write 1 to the "user_pet_enabled" file to enable hw support for a
  266. * userspace watchdog.
  267. * Userspace is required to pet the watchdog by continuing to write 1
  268. * to this file in the expected interval.
  269. * Userspace may disable this requirement by writing 0 to this same
  270. * file.
  271. */
  272. static void __wdog_user_pet(struct msm_watchdog_data *wdog_dd)
  273. {
  274. wdog_dd->user_pet_complete = true;
  275. wake_up(&wdog_dd->pet_complete);
  276. }
  277. static ssize_t wdog_user_pet_enabled_get(struct device *dev,
  278. struct device_attribute *attr, char *buf)
  279. {
  280. int ret;
  281. struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
  282. ret = snprintf(buf, PAGE_SIZE, "%d\n",
  283. wdog_dd->user_pet_enabled);
  284. return ret;
  285. }
  286. static ssize_t wdog_user_pet_enabled_set(struct device *dev,
  287. struct device_attribute *attr,
  288. const char *buf, size_t count)
  289. {
  290. int ret;
  291. struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
  292. ret = strtobool(buf, &wdog_dd->user_pet_enabled);
  293. if (ret) {
  294. dev_err(wdog_dd->dev, "invalid user input\n");
  295. return ret;
  296. }
  297. __wdog_user_pet(wdog_dd);
  298. return count;
  299. }
  300. static DEVICE_ATTR(user_pet_enabled, S_IWUSR | S_IRUSR,
  301. wdog_user_pet_enabled_get, wdog_user_pet_enabled_set);
  302. static ssize_t wdog_pet_time_get(struct device *dev,
  303. struct device_attribute *attr, char *buf)
  304. {
  305. int ret;
  306. struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
  307. ret = snprintf(buf, PAGE_SIZE, "%d\n", wdog_dd->pet_time);
  308. return ret;
  309. }
  310. static DEVICE_ATTR(pet_time, S_IRUSR, wdog_pet_time_get, NULL);
  311. static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
  312. {
  313. int slack, i, count, prev_count = 0;
  314. unsigned long long time_ns;
  315. unsigned long long slack_ns;
  316. unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
  317. for (i = 0; i < 2; i++) {
  318. count = (__raw_readl(wdog_dd->base + WDT0_STS) >> 1) & 0xFFFFF;
  319. if (count != prev_count) {
  320. prev_count = count;
  321. i = 0;
  322. }
  323. }
  324. slack = ((wdog_dd->bark_time * WDT_HZ) / 1000) - count;
  325. if (slack < wdog_dd->min_slack_ticks)
  326. wdog_dd->min_slack_ticks = slack;
  327. __raw_writel(1, wdog_dd->base + WDT0_RST);
  328. time_ns = sched_clock();
  329. slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
  330. if (slack_ns < wdog_dd->min_slack_ns)
  331. wdog_dd->min_slack_ns = slack_ns;
  332. wdog_dd->last_pet = time_ns;
  333. }
  334. static void keep_alive_response(void *info)
  335. {
  336. int cpu = smp_processor_id();
  337. struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
  338. cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
  339. /* Make sure alive mask is cleared and set in order */
  340. smp_mb();
  341. }
  342. /*
  343. * If this function does not return, it implies one of the
  344. * other cpu's is not responsive.
  345. */
  346. static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
  347. {
  348. int cpu;
  349. cpumask_clear(&wdog_dd->alive_mask);
  350. /* Make sure alive mask is cleared and set in order */
  351. smp_mb();
  352. for_each_cpu(cpu, cpu_online_mask) {
  353. if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu))
  354. smp_call_function_single(cpu, keep_alive_response,
  355. wdog_dd, 1);
  356. }
  357. }
  358. static void pet_task_wakeup(unsigned long data)
  359. {
  360. struct msm_watchdog_data *wdog_dd =
  361. (struct msm_watchdog_data *)data;
  362. wdog_dd->timer_expired = true;
  363. wake_up(&wdog_dd->pet_complete);
  364. }
  365. static __ref int watchdog_kthread(void *arg)
  366. {
  367. struct msm_watchdog_data *wdog_dd =
  368. (struct msm_watchdog_data *)arg;
  369. unsigned long delay_time = 0;
  370. struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
  371. sched_setscheduler(current, SCHED_FIFO, &param);
  372. while (!kthread_should_stop()) {
  373. while (wait_event_interruptible(
  374. wdog_dd->pet_complete,
  375. wdog_dd->timer_expired) != 0)
  376. ;
  377. if (wdog_dd->do_ipi_ping)
  378. ping_other_cpus(wdog_dd);
  379. while (wait_event_interruptible(
  380. wdog_dd->pet_complete,
  381. wdog_dd->user_pet_complete) != 0)
  382. ;
  383. wdog_dd->timer_expired = false;
  384. wdog_dd->user_pet_complete = !wdog_dd->user_pet_enabled;
  385. if (enable) {
  386. delay_time = msecs_to_jiffies(wdog_dd->pet_time);
  387. pet_watchdog(wdog_dd);
  388. }
  389. /* Check again before scheduling
  390. * Could have been changed on other cpu
  391. */
  392. mod_timer(&wdog_dd->pet_timer, jiffies + delay_time);
  393. }
  394. return 0;
  395. }
  396. static int wdog_cpu_pm_notify(struct notifier_block *self,
  397. unsigned long action, void *v)
  398. {
  399. int cpu;
  400. cpu = raw_smp_processor_id();
  401. switch (action) {
  402. case CPU_PM_ENTER:
  403. cpu_idle_pc_state[cpu] = 1;
  404. break;
  405. case CPU_PM_ENTER_FAILED:
  406. case CPU_PM_EXIT:
  407. cpu_idle_pc_state[cpu] = 0;
  408. break;
  409. }
  410. return NOTIFY_OK;
  411. }
  412. static struct notifier_block wdog_cpu_pm_nb = {
  413. .notifier_call = wdog_cpu_pm_notify,
  414. };
  415. static int msm_watchdog_remove(struct platform_device *pdev)
  416. {
  417. struct msm_watchdog_data *wdog_dd =
  418. (struct msm_watchdog_data *)platform_get_drvdata(pdev);
  419. if (!ipi_en)
  420. cpu_pm_unregister_notifier(&wdog_cpu_pm_nb);
  421. mutex_lock(&wdog_dd->disable_lock);
  422. if (enable)
  423. wdog_disable(wdog_dd);
  424. mutex_unlock(&wdog_dd->disable_lock);
  425. device_remove_file(wdog_dd->dev, &dev_attr_disable);
  426. if (wdog_dd->irq_ppi)
  427. free_percpu(wdog_dd->wdog_cpu_dd);
  428. dev_info(wdog_dd->dev, "MSM Watchdog Exit - Deactivated\n");
  429. del_timer_sync(&wdog_dd->pet_timer);
  430. kthread_stop(wdog_dd->watchdog_task);
  431. kfree(wdog_dd);
  432. return 0;
  433. }
  434. void msm_trigger_wdog_bite(void)
  435. {
  436. if (!wdog_data)
  437. return;
  438. pr_info("Causing a watchdog bite!");
  439. __raw_writel(1, wdog_data->base + WDT0_BITE_TIME);
  440. /* Mke sure bite time is written before we reset */
  441. mb();
  442. __raw_writel(1, wdog_data->base + WDT0_RST);
  443. /* Make sure we wait only after reset */
  444. mb();
  445. /* Delay to make sure bite occurs */
  446. mdelay(10000);
  447. pr_err("Wdog - STS: 0x%x, CTL: 0x%x, BARK TIME: 0x%x, BITE TIME: 0x%x",
  448. __raw_readl(wdog_data->base + WDT0_STS),
  449. __raw_readl(wdog_data->base + WDT0_EN),
  450. __raw_readl(wdog_data->base + WDT0_BARK_TIME),
  451. __raw_readl(wdog_data->base + WDT0_BITE_TIME));
  452. }
  453. static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
  454. {
  455. struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
  456. unsigned long nanosec_rem;
  457. unsigned long long t = sched_clock();
  458. nanosec_rem = do_div(t, 1000000000);
  459. dev_info(wdog_dd->dev, "Watchdog bark! Now = %lu.%06lu\n",
  460. (unsigned long) t, nanosec_rem / 1000);
  461. nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
  462. dev_info(wdog_dd->dev, "Watchdog last pet at %lu.%06lu\n",
  463. (unsigned long) wdog_dd->last_pet, nanosec_rem / 1000);
  464. if (wdog_dd->do_ipi_ping)
  465. dump_cpu_alive_mask(wdog_dd);
  466. msm_trigger_wdog_bite();
  467. panic("Failed to cause a watchdog bite! - Falling back to kernel panic!");
  468. return IRQ_HANDLED;
  469. }
  470. static irqreturn_t wdog_ppi_bark(int irq, void *dev_id)
  471. {
  472. struct msm_watchdog_data *wdog_dd =
  473. *(struct msm_watchdog_data **)(dev_id);
  474. return wdog_bark_handler(irq, wdog_dd);
  475. }
  476. static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
  477. {
  478. int ret;
  479. struct msm_dump_entry dump_entry;
  480. struct msm_dump_data *cpu_data;
  481. int cpu;
  482. void *cpu_buf;
  483. cpu_data = kzalloc(sizeof(struct msm_dump_data) *
  484. num_present_cpus(), GFP_KERNEL);
  485. if (!cpu_data)
  486. goto out0;
  487. cpu_buf = kzalloc(MAX_CPU_CTX_SIZE * num_present_cpus(),
  488. GFP_KERNEL);
  489. if (!cpu_buf)
  490. goto out1;
  491. for_each_cpu(cpu, cpu_present_mask) {
  492. cpu_data[cpu].addr = virt_to_phys(cpu_buf +
  493. cpu * MAX_CPU_CTX_SIZE);
  494. cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
  495. snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
  496. "KCPU_CTX%d", cpu);
  497. dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
  498. dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
  499. ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
  500. &dump_entry);
  501. /*
  502. * Don't free the buffers in case of error since
  503. * registration may have succeeded for some cpus.
  504. */
  505. if (ret)
  506. pr_err("cpu %d reg dump setup failed\n", cpu);
  507. }
  508. return;
  509. out1:
  510. kfree(cpu_data);
  511. out0:
  512. return;
  513. }
  514. static void register_scan_dump(struct msm_watchdog_data *wdog_dd)
  515. {
  516. static void *dump_addr;
  517. int ret;
  518. struct msm_dump_entry dump_entry;
  519. struct msm_dump_data *dump_data;
  520. if (!wdog_dd->scandump_size)
  521. return;
  522. dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL);
  523. if (!dump_data)
  524. return;
  525. dump_addr = kzalloc(wdog_dd->scandump_size, GFP_KERNEL);
  526. if (!dump_addr)
  527. goto err0;
  528. dump_data->addr = virt_to_phys(dump_addr);
  529. dump_data->len = wdog_dd->scandump_size;
  530. strlcpy(dump_data->name, "KSCANDUMP", sizeof(dump_data->name));
  531. dump_entry.id = MSM_DUMP_DATA_SCANDUMP;
  532. dump_entry.addr = virt_to_phys(dump_data);
  533. ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
  534. if (ret) {
  535. pr_err("Registering scandump region failed\n");
  536. goto err1;
  537. }
  538. return;
  539. err1:
  540. kfree(dump_addr);
  541. err0:
  542. kfree(dump_data);
  543. }
  544. static void configure_scandump(struct msm_watchdog_data *wdog_dd)
  545. {
  546. int ret;
  547. struct msm_dump_entry dump_entry;
  548. struct msm_dump_data *cpu_data;
  549. int cpu;
  550. static dma_addr_t dump_addr;
  551. static void *dump_vaddr;
  552. for_each_cpu(cpu, cpu_present_mask) {
  553. cpu_data = devm_kzalloc(wdog_dd->dev,
  554. sizeof(struct msm_dump_data),
  555. GFP_KERNEL);
  556. if (!cpu_data)
  557. continue;
  558. dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev,
  559. MAX_CPU_SCANDUMP_SIZE,
  560. &dump_addr,
  561. GFP_KERNEL);
  562. if (!dump_vaddr) {
  563. dev_err(wdog_dd->dev, "Couldn't get memory for dump\n");
  564. continue;
  565. }
  566. memset(dump_vaddr, 0x0, MAX_CPU_SCANDUMP_SIZE);
  567. cpu_data->addr = dump_addr;
  568. cpu_data->len = MAX_CPU_SCANDUMP_SIZE;
  569. snprintf(cpu_data->name, sizeof(cpu_data->name),
  570. "KSCANDUMP%d", cpu);
  571. dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
  572. dump_entry.addr = virt_to_phys(cpu_data);
  573. ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
  574. &dump_entry);
  575. if (ret) {
  576. dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n",
  577. MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu);
  578. dma_free_coherent(wdog_dd->dev, MAX_CPU_SCANDUMP_SIZE,
  579. dump_vaddr,
  580. dump_addr);
  581. devm_kfree(wdog_dd->dev, cpu_data);
  582. }
  583. }
  584. register_scan_dump(wdog_dd);
  585. }
  586. static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
  587. {
  588. int error = 0;
  589. error |= device_create_file(wdog_dd->dev, &dev_attr_disable);
  590. if (of_property_read_bool(wdog_dd->dev->of_node,
  591. "qcom,userspace-watchdog")) {
  592. error |= device_create_file(wdog_dd->dev, &dev_attr_pet_time);
  593. error |= device_create_file(wdog_dd->dev,
  594. &dev_attr_user_pet_enabled);
  595. }
  596. if (error)
  597. dev_err(wdog_dd->dev, "cannot create sysfs attribute\n");
  598. return error;
  599. }
  600. static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
  601. {
  602. unsigned long delay_time;
  603. uint32_t val;
  604. u64 timeout;
  605. int ret;
  606. /*
  607. * Disable the watchdog for cluster 1 so that cluster 0 watchdog will
  608. * be mapped to the entire sub-system.
  609. */
  610. if (wdog_dd->wdog_absent_base)
  611. __raw_writel(2, wdog_dd->wdog_absent_base + WDOG_ABSENT);
  612. if (wdog_dd->irq_ppi) {
  613. wdog_dd->wdog_cpu_dd = alloc_percpu(struct msm_watchdog_data *);
  614. if (!wdog_dd->wdog_cpu_dd) {
  615. dev_err(wdog_dd->dev, "fail to allocate cpu data\n");
  616. return;
  617. }
  618. *raw_cpu_ptr(wdog_dd->wdog_cpu_dd) = wdog_dd;
  619. ret = request_percpu_irq(wdog_dd->bark_irq, wdog_ppi_bark,
  620. "apps_wdog_bark",
  621. wdog_dd->wdog_cpu_dd);
  622. if (ret) {
  623. dev_err(wdog_dd->dev, "failed to request bark irq\n");
  624. free_percpu(wdog_dd->wdog_cpu_dd);
  625. return;
  626. }
  627. } else {
  628. ret = devm_request_irq(wdog_dd->dev, wdog_dd->bark_irq,
  629. wdog_bark_handler, IRQF_TRIGGER_RISING,
  630. "apps_wdog_bark", wdog_dd);
  631. if (ret) {
  632. dev_err(wdog_dd->dev, "failed to request bark irq\n");
  633. return;
  634. }
  635. }
  636. delay_time = msecs_to_jiffies(wdog_dd->pet_time);
  637. wdog_dd->min_slack_ticks = UINT_MAX;
  638. wdog_dd->min_slack_ns = ULLONG_MAX;
  639. configure_scandump(wdog_dd);
  640. configure_bark_dump(wdog_dd);
  641. timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
  642. __raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
  643. __raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
  644. wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
  645. atomic_notifier_chain_register(&panic_notifier_list,
  646. &wdog_dd->panic_blk);
  647. mutex_init(&wdog_dd->disable_lock);
  648. init_waitqueue_head(&wdog_dd->pet_complete);
  649. wdog_dd->timer_expired = false;
  650. wdog_dd->user_pet_complete = true;
  651. wdog_dd->user_pet_enabled = false;
  652. wake_up_process(wdog_dd->watchdog_task);
  653. init_timer(&wdog_dd->pet_timer);
  654. wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
  655. wdog_dd->pet_timer.function = pet_task_wakeup;
  656. wdog_dd->pet_timer.expires = jiffies + delay_time;
  657. add_timer(&wdog_dd->pet_timer);
  658. val = BIT(EN);
  659. if (wdog_dd->wakeup_irq_enable)
  660. val |= BIT(UNMASKED_INT_EN);
  661. __raw_writel(val, wdog_dd->base + WDT0_EN);
  662. __raw_writel(1, wdog_dd->base + WDT0_RST);
  663. wdog_dd->last_pet = sched_clock();
  664. wdog_dd->enabled = true;
  665. init_watchdog_sysfs(wdog_dd);
  666. if (wdog_dd->irq_ppi)
  667. enable_percpu_irq(wdog_dd->bark_irq, 0);
  668. if (!ipi_en)
  669. cpu_pm_register_notifier(&wdog_cpu_pm_nb);
  670. dev_info(wdog_dd->dev, "MSM Watchdog Initialized\n");
  671. }
  672. static const struct of_device_id msm_wdog_match_table[] = {
  673. { .compatible = "qcom,msm-watchdog" },
  674. {}
  675. };
  676. static void dump_pdata(struct msm_watchdog_data *pdata)
  677. {
  678. dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
  679. dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
  680. dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
  681. dev_dbg(pdata->dev, "wdog base address is 0x%lx\n", (unsigned long)
  682. pdata->base);
  683. }
  684. static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
  685. struct msm_watchdog_data *pdata)
  686. {
  687. struct device_node *node = pdev->dev.of_node;
  688. struct resource *res;
  689. int ret;
  690. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base");
  691. if (!res)
  692. return -ENODEV;
  693. pdata->size = resource_size(res);
  694. pdata->phys_base = res->start;
  695. if (unlikely(!(devm_request_mem_region(&pdev->dev, pdata->phys_base,
  696. pdata->size, "msm-watchdog")))) {
  697. dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
  698. __func__);
  699. return -ENXIO;
  700. }
  701. pdata->base = devm_ioremap(&pdev->dev, pdata->phys_base,
  702. pdata->size);
  703. if (!pdata->base) {
  704. dev_err(&pdev->dev, "%s cannot map wdog register space\n",
  705. __func__);
  706. return -ENXIO;
  707. }
  708. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  709. "wdt-absent-base");
  710. if (res) {
  711. pdata->wdog_absent_base = devm_ioremap(&pdev->dev, res->start,
  712. resource_size(res));
  713. if (!pdata->wdog_absent_base) {
  714. dev_err(&pdev->dev,
  715. "cannot map wdog absent register space\n");
  716. return -ENXIO;
  717. }
  718. } else {
  719. dev_info(&pdev->dev, "wdog absent resource not present\n");
  720. }
  721. pdata->bark_irq = platform_get_irq(pdev, 0);
  722. pdata->bite_irq = platform_get_irq(pdev, 1);
  723. ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
  724. if (ret) {
  725. dev_err(&pdev->dev, "reading bark time failed\n");
  726. return -ENXIO;
  727. }
  728. ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
  729. if (ret) {
  730. dev_err(&pdev->dev, "reading pet time failed\n");
  731. return -ENXIO;
  732. }
  733. pdata->do_ipi_ping = of_property_read_bool(node, "qcom,ipi-ping");
  734. if (!pdata->bark_time) {
  735. dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
  736. __func__);
  737. return -ENXIO;
  738. }
  739. if (!pdata->pet_time) {
  740. dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
  741. __func__);
  742. return -ENXIO;
  743. }
  744. pdata->wakeup_irq_enable = of_property_read_bool(node,
  745. "qcom,wakeup-enable");
  746. if (of_property_read_u32(node, "qcom,scandump-size",
  747. &pdata->scandump_size))
  748. dev_info(&pdev->dev,
  749. "No need to allocate memory for scandumps\n");
  750. pdata->irq_ppi = irq_is_percpu(pdata->bark_irq);
  751. dump_pdata(pdata);
  752. return 0;
  753. }
  754. static int msm_watchdog_probe(struct platform_device *pdev)
  755. {
  756. int ret;
  757. struct msm_watchdog_data *wdog_dd;
  758. struct md_region md_entry;
  759. if (!pdev->dev.of_node || !enable)
  760. return -ENODEV;
  761. wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
  762. if (!wdog_dd)
  763. return -EIO;
  764. ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
  765. if (ret)
  766. goto err;
  767. wdog_data = wdog_dd;
  768. wdog_dd->dev = &pdev->dev;
  769. platform_set_drvdata(pdev, wdog_dd);
  770. cpumask_clear(&wdog_dd->alive_mask);
  771. wdog_dd->watchdog_task = kthread_create(watchdog_kthread, wdog_dd,
  772. "msm_watchdog");
  773. if (IS_ERR(wdog_dd->watchdog_task)) {
  774. ret = PTR_ERR(wdog_dd->watchdog_task);
  775. goto err;
  776. }
  777. init_watchdog_data(wdog_dd);
  778. /* Add wdog info to minidump table */
  779. strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name));
  780. md_entry.virt_addr = (uintptr_t)wdog_dd;
  781. md_entry.phys_addr = virt_to_phys(wdog_dd);
  782. md_entry.size = sizeof(*wdog_dd);
  783. if (msm_minidump_add_region(&md_entry))
  784. pr_info("Failed to add Watchdog data in Minidump\n");
  785. return 0;
  786. err:
  787. kzfree(wdog_dd);
  788. return ret;
  789. }
  790. static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
  791. .suspend_noirq = msm_watchdog_suspend,
  792. .resume_noirq = msm_watchdog_resume,
  793. };
  794. static struct platform_driver msm_watchdog_driver = {
  795. .probe = msm_watchdog_probe,
  796. .remove = msm_watchdog_remove,
  797. .driver = {
  798. .name = MODULE_NAME,
  799. .owner = THIS_MODULE,
  800. .pm = &msm_watchdog_dev_pm_ops,
  801. .of_match_table = msm_wdog_match_table,
  802. },
  803. };
  804. static int init_watchdog(void)
  805. {
  806. return platform_driver_register(&msm_watchdog_driver);
  807. }
  808. pure_initcall(init_watchdog);
  809. MODULE_DESCRIPTION("MSM Watchdog Driver");
  810. MODULE_LICENSE("GPL v2");