fsl_hypervisor.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /*
  2. * Freescale Hypervisor Management Driver
  3. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
  4. * Author: Timur Tabi <[email protected]>
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. * The Freescale hypervisor management driver provides several services to
  11. * drivers and applications related to the Freescale hypervisor:
  12. *
  13. * 1. An ioctl interface for querying and managing partitions.
  14. *
  15. * 2. A file interface to reading incoming doorbells.
  16. *
  17. * 3. An interrupt handler for shutting down the partition upon receiving the
  18. * shutdown doorbell from a manager partition.
  19. *
  20. * 4. A kernel interface for receiving callbacks when a managed partition
  21. * shuts down.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/init.h>
  26. #include <linux/types.h>
  27. #include <linux/err.h>
  28. #include <linux/fs.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/mm.h>
  31. #include <linux/pagemap.h>
  32. #include <linux/slab.h>
  33. #include <linux/poll.h>
  34. #include <linux/of.h>
  35. #include <linux/of_irq.h>
  36. #include <linux/reboot.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/notifier.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/io.h>
  41. #include <asm/fsl_hcalls.h>
  42. #include <linux/fsl_hypervisor.h>
  43. static BLOCKING_NOTIFIER_HEAD(failover_subscribers);
  44. /*
  45. * Ioctl interface for FSL_HV_IOCTL_PARTITION_RESTART
  46. *
  47. * Restart a running partition
  48. */
  49. static long ioctl_restart(struct fsl_hv_ioctl_restart __user *p)
  50. {
  51. struct fsl_hv_ioctl_restart param;
  52. /* Get the parameters from the user */
  53. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_restart)))
  54. return -EFAULT;
  55. param.ret = fh_partition_restart(param.partition);
  56. if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
  57. return -EFAULT;
  58. return 0;
  59. }
  60. /*
  61. * Ioctl interface for FSL_HV_IOCTL_PARTITION_STATUS
  62. *
  63. * Query the status of a partition
  64. */
  65. static long ioctl_status(struct fsl_hv_ioctl_status __user *p)
  66. {
  67. struct fsl_hv_ioctl_status param;
  68. u32 status;
  69. /* Get the parameters from the user */
  70. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_status)))
  71. return -EFAULT;
  72. param.ret = fh_partition_get_status(param.partition, &status);
  73. if (!param.ret)
  74. param.status = status;
  75. if (copy_to_user(p, &param, sizeof(struct fsl_hv_ioctl_status)))
  76. return -EFAULT;
  77. return 0;
  78. }
  79. /*
  80. * Ioctl interface for FSL_HV_IOCTL_PARTITION_START
  81. *
  82. * Start a stopped partition.
  83. */
  84. static long ioctl_start(struct fsl_hv_ioctl_start __user *p)
  85. {
  86. struct fsl_hv_ioctl_start param;
  87. /* Get the parameters from the user */
  88. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_start)))
  89. return -EFAULT;
  90. param.ret = fh_partition_start(param.partition, param.entry_point,
  91. param.load);
  92. if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
  93. return -EFAULT;
  94. return 0;
  95. }
  96. /*
  97. * Ioctl interface for FSL_HV_IOCTL_PARTITION_STOP
  98. *
  99. * Stop a running partition
  100. */
  101. static long ioctl_stop(struct fsl_hv_ioctl_stop __user *p)
  102. {
  103. struct fsl_hv_ioctl_stop param;
  104. /* Get the parameters from the user */
  105. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_stop)))
  106. return -EFAULT;
  107. param.ret = fh_partition_stop(param.partition);
  108. if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
  109. return -EFAULT;
  110. return 0;
  111. }
  112. /*
  113. * Ioctl interface for FSL_HV_IOCTL_MEMCPY
  114. *
  115. * The FH_MEMCPY hypercall takes an array of address/address/size structures
  116. * to represent the data being copied. As a convenience to the user, this
  117. * ioctl takes a user-create buffer and a pointer to a guest physically
  118. * contiguous buffer in the remote partition, and creates the
  119. * address/address/size array for the hypercall.
  120. */
  121. static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
  122. {
  123. struct fsl_hv_ioctl_memcpy param;
  124. struct page **pages = NULL;
  125. void *sg_list_unaligned = NULL;
  126. struct fh_sg_list *sg_list = NULL;
  127. unsigned int num_pages;
  128. unsigned long lb_offset; /* Offset within a page of the local buffer */
  129. unsigned int i;
  130. long ret = 0;
  131. int num_pinned; /* return value from get_user_pages() */
  132. phys_addr_t remote_paddr; /* The next address in the remote buffer */
  133. uint32_t count; /* The number of bytes left to copy */
  134. /* Get the parameters from the user */
  135. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_memcpy)))
  136. return -EFAULT;
  137. /*
  138. * One partition must be local, the other must be remote. In other
  139. * words, if source and target are both -1, or are both not -1, then
  140. * return an error.
  141. */
  142. if ((param.source == -1) == (param.target == -1))
  143. return -EINVAL;
  144. /*
  145. * The array of pages returned by get_user_pages() covers only
  146. * page-aligned memory. Since the user buffer is probably not
  147. * page-aligned, we need to handle the discrepancy.
  148. *
  149. * We calculate the offset within a page of the S/G list, and make
  150. * adjustments accordingly. This will result in a page list that looks
  151. * like this:
  152. *
  153. * ---- <-- first page starts before the buffer
  154. * | |
  155. * |////|-> ----
  156. * |////| | |
  157. * ---- | |
  158. * | |
  159. * ---- | |
  160. * |////| | |
  161. * |////| | |
  162. * |////| | |
  163. * ---- | |
  164. * | |
  165. * ---- | |
  166. * |////| | |
  167. * |////| | |
  168. * |////| | |
  169. * ---- | |
  170. * | |
  171. * ---- | |
  172. * |////| | |
  173. * |////|-> ----
  174. * | | <-- last page ends after the buffer
  175. * ----
  176. *
  177. * The distance between the start of the first page and the start of the
  178. * buffer is lb_offset. The hashed (///) areas are the parts of the
  179. * page list that contain the actual buffer.
  180. *
  181. * The advantage of this approach is that the number of pages is
  182. * equal to the number of entries in the S/G list that we give to the
  183. * hypervisor.
  184. */
  185. lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
  186. if (param.count == 0 ||
  187. param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
  188. return -EINVAL;
  189. num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  190. /* Allocate the buffers we need */
  191. /*
  192. * 'pages' is an array of struct page pointers that's initialized by
  193. * get_user_pages().
  194. */
  195. pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
  196. if (!pages) {
  197. pr_debug("fsl-hv: could not allocate page list\n");
  198. return -ENOMEM;
  199. }
  200. /*
  201. * sg_list is the list of fh_sg_list objects that we pass to the
  202. * hypervisor.
  203. */
  204. sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
  205. sizeof(struct fh_sg_list) - 1, GFP_KERNEL);
  206. if (!sg_list_unaligned) {
  207. pr_debug("fsl-hv: could not allocate S/G list\n");
  208. ret = -ENOMEM;
  209. goto exit;
  210. }
  211. sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
  212. /* Get the physical addresses of the source buffer */
  213. down_read(&current->mm->mmap_sem);
  214. num_pinned = get_user_pages(param.local_vaddr - lb_offset,
  215. num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
  216. pages, NULL);
  217. up_read(&current->mm->mmap_sem);
  218. if (num_pinned != num_pages) {
  219. /* get_user_pages() failed */
  220. pr_debug("fsl-hv: could not lock source buffer\n");
  221. ret = (num_pinned < 0) ? num_pinned : -EFAULT;
  222. goto exit;
  223. }
  224. /*
  225. * Build the fh_sg_list[] array. The first page is special
  226. * because it's misaligned.
  227. */
  228. if (param.source == -1) {
  229. sg_list[0].source = page_to_phys(pages[0]) + lb_offset;
  230. sg_list[0].target = param.remote_paddr;
  231. } else {
  232. sg_list[0].source = param.remote_paddr;
  233. sg_list[0].target = page_to_phys(pages[0]) + lb_offset;
  234. }
  235. sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset);
  236. remote_paddr = param.remote_paddr + sg_list[0].size;
  237. count = param.count - sg_list[0].size;
  238. for (i = 1; i < num_pages; i++) {
  239. if (param.source == -1) {
  240. /* local to remote */
  241. sg_list[i].source = page_to_phys(pages[i]);
  242. sg_list[i].target = remote_paddr;
  243. } else {
  244. /* remote to local */
  245. sg_list[i].source = remote_paddr;
  246. sg_list[i].target = page_to_phys(pages[i]);
  247. }
  248. sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE);
  249. remote_paddr += sg_list[i].size;
  250. count -= sg_list[i].size;
  251. }
  252. param.ret = fh_partition_memcpy(param.source, param.target,
  253. virt_to_phys(sg_list), num_pages);
  254. exit:
  255. if (pages) {
  256. for (i = 0; i < num_pages; i++)
  257. if (pages[i])
  258. put_page(pages[i]);
  259. }
  260. kfree(sg_list_unaligned);
  261. kfree(pages);
  262. if (!ret)
  263. if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
  264. return -EFAULT;
  265. return ret;
  266. }
  267. /*
  268. * Ioctl interface for FSL_HV_IOCTL_DOORBELL
  269. *
  270. * Ring a doorbell
  271. */
  272. static long ioctl_doorbell(struct fsl_hv_ioctl_doorbell __user *p)
  273. {
  274. struct fsl_hv_ioctl_doorbell param;
  275. /* Get the parameters from the user. */
  276. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_doorbell)))
  277. return -EFAULT;
  278. param.ret = ev_doorbell_send(param.doorbell);
  279. if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
  280. return -EFAULT;
  281. return 0;
  282. }
  283. static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
  284. {
  285. struct fsl_hv_ioctl_prop param;
  286. char __user *upath, *upropname;
  287. void __user *upropval;
  288. char *path, *propname;
  289. void *propval;
  290. int ret = 0;
  291. /* Get the parameters from the user. */
  292. if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_prop)))
  293. return -EFAULT;
  294. upath = (char __user *)(uintptr_t)param.path;
  295. upropname = (char __user *)(uintptr_t)param.propname;
  296. upropval = (void __user *)(uintptr_t)param.propval;
  297. path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
  298. if (IS_ERR(path))
  299. return PTR_ERR(path);
  300. propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
  301. if (IS_ERR(propname)) {
  302. ret = PTR_ERR(propname);
  303. goto err_free_path;
  304. }
  305. if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
  306. ret = -EINVAL;
  307. goto err_free_propname;
  308. }
  309. propval = kmalloc(param.proplen, GFP_KERNEL);
  310. if (!propval) {
  311. ret = -ENOMEM;
  312. goto err_free_propname;
  313. }
  314. if (set) {
  315. if (copy_from_user(propval, upropval, param.proplen)) {
  316. ret = -EFAULT;
  317. goto err_free_propval;
  318. }
  319. param.ret = fh_partition_set_dtprop(param.handle,
  320. virt_to_phys(path),
  321. virt_to_phys(propname),
  322. virt_to_phys(propval),
  323. param.proplen);
  324. } else {
  325. param.ret = fh_partition_get_dtprop(param.handle,
  326. virt_to_phys(path),
  327. virt_to_phys(propname),
  328. virt_to_phys(propval),
  329. &param.proplen);
  330. if (param.ret == 0) {
  331. if (copy_to_user(upropval, propval, param.proplen) ||
  332. put_user(param.proplen, &p->proplen)) {
  333. ret = -EFAULT;
  334. goto err_free_propval;
  335. }
  336. }
  337. }
  338. if (put_user(param.ret, &p->ret))
  339. ret = -EFAULT;
  340. err_free_propval:
  341. kfree(propval);
  342. err_free_propname:
  343. kfree(propname);
  344. err_free_path:
  345. kfree(path);
  346. return ret;
  347. }
  348. /*
  349. * Ioctl main entry point
  350. */
  351. static long fsl_hv_ioctl(struct file *file, unsigned int cmd,
  352. unsigned long argaddr)
  353. {
  354. void __user *arg = (void __user *)argaddr;
  355. long ret;
  356. switch (cmd) {
  357. case FSL_HV_IOCTL_PARTITION_RESTART:
  358. ret = ioctl_restart(arg);
  359. break;
  360. case FSL_HV_IOCTL_PARTITION_GET_STATUS:
  361. ret = ioctl_status(arg);
  362. break;
  363. case FSL_HV_IOCTL_PARTITION_START:
  364. ret = ioctl_start(arg);
  365. break;
  366. case FSL_HV_IOCTL_PARTITION_STOP:
  367. ret = ioctl_stop(arg);
  368. break;
  369. case FSL_HV_IOCTL_MEMCPY:
  370. ret = ioctl_memcpy(arg);
  371. break;
  372. case FSL_HV_IOCTL_DOORBELL:
  373. ret = ioctl_doorbell(arg);
  374. break;
  375. case FSL_HV_IOCTL_GETPROP:
  376. ret = ioctl_dtprop(arg, 0);
  377. break;
  378. case FSL_HV_IOCTL_SETPROP:
  379. ret = ioctl_dtprop(arg, 1);
  380. break;
  381. default:
  382. pr_debug("fsl-hv: bad ioctl dir=%u type=%u cmd=%u size=%u\n",
  383. _IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd),
  384. _IOC_SIZE(cmd));
  385. return -ENOTTY;
  386. }
  387. return ret;
  388. }
  389. /* Linked list of processes that have us open */
  390. static struct list_head db_list;
  391. /* spinlock for db_list */
  392. static DEFINE_SPINLOCK(db_list_lock);
  393. /* The size of the doorbell event queue. This must be a power of two. */
  394. #define QSIZE 16
  395. /* Returns the next head/tail pointer, wrapping around the queue if necessary */
  396. #define nextp(x) (((x) + 1) & (QSIZE - 1))
  397. /* Per-open data structure */
  398. struct doorbell_queue {
  399. struct list_head list;
  400. spinlock_t lock;
  401. wait_queue_head_t wait;
  402. unsigned int head;
  403. unsigned int tail;
  404. uint32_t q[QSIZE];
  405. };
  406. /* Linked list of ISRs that we registered */
  407. struct list_head isr_list;
  408. /* Per-ISR data structure */
  409. struct doorbell_isr {
  410. struct list_head list;
  411. unsigned int irq;
  412. uint32_t doorbell; /* The doorbell handle */
  413. uint32_t partition; /* The partition handle, if used */
  414. };
  415. /*
  416. * Add a doorbell to all of the doorbell queues
  417. */
  418. static void fsl_hv_queue_doorbell(uint32_t doorbell)
  419. {
  420. struct doorbell_queue *dbq;
  421. unsigned long flags;
  422. /* Prevent another core from modifying db_list */
  423. spin_lock_irqsave(&db_list_lock, flags);
  424. list_for_each_entry(dbq, &db_list, list) {
  425. if (dbq->head != nextp(dbq->tail)) {
  426. dbq->q[dbq->tail] = doorbell;
  427. /*
  428. * This memory barrier eliminates the need to grab
  429. * the spinlock for dbq.
  430. */
  431. smp_wmb();
  432. dbq->tail = nextp(dbq->tail);
  433. wake_up_interruptible(&dbq->wait);
  434. }
  435. }
  436. spin_unlock_irqrestore(&db_list_lock, flags);
  437. }
  438. /*
  439. * Interrupt handler for all doorbells
  440. *
  441. * We use the same interrupt handler for all doorbells. Whenever a doorbell
  442. * is rung, and we receive an interrupt, we just put the handle for that
  443. * doorbell (passed to us as *data) into all of the queues.
  444. */
  445. static irqreturn_t fsl_hv_isr(int irq, void *data)
  446. {
  447. fsl_hv_queue_doorbell((uintptr_t) data);
  448. return IRQ_HANDLED;
  449. }
  450. /*
  451. * State change thread function
  452. *
  453. * The state change notification arrives in an interrupt, but we can't call
  454. * blocking_notifier_call_chain() in an interrupt handler. We could call
  455. * atomic_notifier_call_chain(), but that would require the clients' call-back
  456. * function to run in interrupt context. Since we don't want to impose that
  457. * restriction on the clients, we use a threaded IRQ to process the
  458. * notification in kernel context.
  459. */
  460. static irqreturn_t fsl_hv_state_change_thread(int irq, void *data)
  461. {
  462. struct doorbell_isr *dbisr = data;
  463. blocking_notifier_call_chain(&failover_subscribers, dbisr->partition,
  464. NULL);
  465. return IRQ_HANDLED;
  466. }
  467. /*
  468. * Interrupt handler for state-change doorbells
  469. */
  470. static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
  471. {
  472. unsigned int status;
  473. struct doorbell_isr *dbisr = data;
  474. int ret;
  475. /* It's still a doorbell, so add it to all the queues. */
  476. fsl_hv_queue_doorbell(dbisr->doorbell);
  477. /* Determine the new state, and if it's stopped, notify the clients. */
  478. ret = fh_partition_get_status(dbisr->partition, &status);
  479. if (!ret && (status == FH_PARTITION_STOPPED))
  480. return IRQ_WAKE_THREAD;
  481. return IRQ_HANDLED;
  482. }
  483. /*
  484. * Returns a bitmask indicating whether a read will block
  485. */
  486. static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
  487. {
  488. struct doorbell_queue *dbq = filp->private_data;
  489. unsigned long flags;
  490. unsigned int mask;
  491. spin_lock_irqsave(&dbq->lock, flags);
  492. poll_wait(filp, &dbq->wait, p);
  493. mask = (dbq->head == dbq->tail) ? 0 : (POLLIN | POLLRDNORM);
  494. spin_unlock_irqrestore(&dbq->lock, flags);
  495. return mask;
  496. }
  497. /*
  498. * Return the handles for any incoming doorbells
  499. *
  500. * If there are doorbell handles in the queue for this open instance, then
  501. * return them to the caller as an array of 32-bit integers. Otherwise,
  502. * block until there is at least one handle to return.
  503. */
  504. static ssize_t fsl_hv_read(struct file *filp, char __user *buf, size_t len,
  505. loff_t *off)
  506. {
  507. struct doorbell_queue *dbq = filp->private_data;
  508. uint32_t __user *p = (uint32_t __user *) buf; /* for put_user() */
  509. unsigned long flags;
  510. ssize_t count = 0;
  511. /* Make sure we stop when the user buffer is full. */
  512. while (len >= sizeof(uint32_t)) {
  513. uint32_t dbell; /* Local copy of doorbell queue data */
  514. spin_lock_irqsave(&dbq->lock, flags);
  515. /*
  516. * If the queue is empty, then either we're done or we need
  517. * to block. If the application specified O_NONBLOCK, then
  518. * we return the appropriate error code.
  519. */
  520. if (dbq->head == dbq->tail) {
  521. spin_unlock_irqrestore(&dbq->lock, flags);
  522. if (count)
  523. break;
  524. if (filp->f_flags & O_NONBLOCK)
  525. return -EAGAIN;
  526. if (wait_event_interruptible(dbq->wait,
  527. dbq->head != dbq->tail))
  528. return -ERESTARTSYS;
  529. continue;
  530. }
  531. /*
  532. * Even though we have an smp_wmb() in the ISR, the core
  533. * might speculatively execute the "dbell = ..." below while
  534. * it's evaluating the if-statement above. In that case, the
  535. * value put into dbell could be stale if the core accepts the
  536. * speculation. To prevent that, we need a read memory barrier
  537. * here as well.
  538. */
  539. smp_rmb();
  540. /* Copy the data to a temporary local buffer, because
  541. * we can't call copy_to_user() from inside a spinlock
  542. */
  543. dbell = dbq->q[dbq->head];
  544. dbq->head = nextp(dbq->head);
  545. spin_unlock_irqrestore(&dbq->lock, flags);
  546. if (put_user(dbell, p))
  547. return -EFAULT;
  548. p++;
  549. count += sizeof(uint32_t);
  550. len -= sizeof(uint32_t);
  551. }
  552. return count;
  553. }
  554. /*
  555. * Open the driver and prepare for reading doorbells.
  556. *
  557. * Every time an application opens the driver, we create a doorbell queue
  558. * for that file handle. This queue is used for any incoming doorbells.
  559. */
  560. static int fsl_hv_open(struct inode *inode, struct file *filp)
  561. {
  562. struct doorbell_queue *dbq;
  563. unsigned long flags;
  564. int ret = 0;
  565. dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
  566. if (!dbq) {
  567. pr_err("fsl-hv: out of memory\n");
  568. return -ENOMEM;
  569. }
  570. spin_lock_init(&dbq->lock);
  571. init_waitqueue_head(&dbq->wait);
  572. spin_lock_irqsave(&db_list_lock, flags);
  573. list_add(&dbq->list, &db_list);
  574. spin_unlock_irqrestore(&db_list_lock, flags);
  575. filp->private_data = dbq;
  576. return ret;
  577. }
  578. /*
  579. * Close the driver
  580. */
  581. static int fsl_hv_close(struct inode *inode, struct file *filp)
  582. {
  583. struct doorbell_queue *dbq = filp->private_data;
  584. unsigned long flags;
  585. int ret = 0;
  586. spin_lock_irqsave(&db_list_lock, flags);
  587. list_del(&dbq->list);
  588. spin_unlock_irqrestore(&db_list_lock, flags);
  589. kfree(dbq);
  590. return ret;
  591. }
  592. static const struct file_operations fsl_hv_fops = {
  593. .owner = THIS_MODULE,
  594. .open = fsl_hv_open,
  595. .release = fsl_hv_close,
  596. .poll = fsl_hv_poll,
  597. .read = fsl_hv_read,
  598. .unlocked_ioctl = fsl_hv_ioctl,
  599. .compat_ioctl = fsl_hv_ioctl,
  600. };
  601. static struct miscdevice fsl_hv_misc_dev = {
  602. MISC_DYNAMIC_MINOR,
  603. "fsl-hv",
  604. &fsl_hv_fops
  605. };
  606. static irqreturn_t fsl_hv_shutdown_isr(int irq, void *data)
  607. {
  608. orderly_poweroff(false);
  609. return IRQ_HANDLED;
  610. }
  611. /*
  612. * Returns the handle of the parent of the given node
  613. *
  614. * The handle is the value of the 'hv-handle' property
  615. */
  616. static int get_parent_handle(struct device_node *np)
  617. {
  618. struct device_node *parent;
  619. const uint32_t *prop;
  620. uint32_t handle;
  621. int len;
  622. parent = of_get_parent(np);
  623. if (!parent)
  624. /* It's not really possible for this to fail */
  625. return -ENODEV;
  626. /*
  627. * The proper name for the handle property is "hv-handle", but some
  628. * older versions of the hypervisor used "reg".
  629. */
  630. prop = of_get_property(parent, "hv-handle", &len);
  631. if (!prop)
  632. prop = of_get_property(parent, "reg", &len);
  633. if (!prop || (len != sizeof(uint32_t))) {
  634. /* This can happen only if the node is malformed */
  635. of_node_put(parent);
  636. return -ENODEV;
  637. }
  638. handle = be32_to_cpup(prop);
  639. of_node_put(parent);
  640. return handle;
  641. }
  642. /*
  643. * Register a callback for failover events
  644. *
  645. * This function is called by device drivers to register their callback
  646. * functions for fail-over events.
  647. */
  648. int fsl_hv_failover_register(struct notifier_block *nb)
  649. {
  650. return blocking_notifier_chain_register(&failover_subscribers, nb);
  651. }
  652. EXPORT_SYMBOL(fsl_hv_failover_register);
  653. /*
  654. * Unregister a callback for failover events
  655. */
  656. int fsl_hv_failover_unregister(struct notifier_block *nb)
  657. {
  658. return blocking_notifier_chain_unregister(&failover_subscribers, nb);
  659. }
  660. EXPORT_SYMBOL(fsl_hv_failover_unregister);
  661. /*
  662. * Return TRUE if we're running under FSL hypervisor
  663. *
  664. * This function checks to see if we're running under the Freescale
  665. * hypervisor, and returns zero if we're not, or non-zero if we are.
  666. *
  667. * First, it checks if MSR[GS]==1, which means we're running under some
  668. * hypervisor. Then it checks if there is a hypervisor node in the device
  669. * tree. Currently, that means there needs to be a node in the root called
  670. * "hypervisor" and which has a property named "fsl,hv-version".
  671. */
  672. static int has_fsl_hypervisor(void)
  673. {
  674. struct device_node *node;
  675. int ret;
  676. node = of_find_node_by_path("/hypervisor");
  677. if (!node)
  678. return 0;
  679. ret = of_find_property(node, "fsl,hv-version", NULL) != NULL;
  680. of_node_put(node);
  681. return ret;
  682. }
  683. /*
  684. * Freescale hypervisor management driver init
  685. *
  686. * This function is called when this module is loaded.
  687. *
  688. * Register ourselves as a miscellaneous driver. This will register the
  689. * fops structure and create the right sysfs entries for udev.
  690. */
  691. static int __init fsl_hypervisor_init(void)
  692. {
  693. struct device_node *np;
  694. struct doorbell_isr *dbisr, *n;
  695. int ret;
  696. pr_info("Freescale hypervisor management driver\n");
  697. if (!has_fsl_hypervisor()) {
  698. pr_info("fsl-hv: no hypervisor found\n");
  699. return -ENODEV;
  700. }
  701. ret = misc_register(&fsl_hv_misc_dev);
  702. if (ret) {
  703. pr_err("fsl-hv: cannot register device\n");
  704. return ret;
  705. }
  706. INIT_LIST_HEAD(&db_list);
  707. INIT_LIST_HEAD(&isr_list);
  708. for_each_compatible_node(np, NULL, "epapr,hv-receive-doorbell") {
  709. unsigned int irq;
  710. const uint32_t *handle;
  711. handle = of_get_property(np, "interrupts", NULL);
  712. irq = irq_of_parse_and_map(np, 0);
  713. if (!handle || (irq == NO_IRQ)) {
  714. pr_err("fsl-hv: no 'interrupts' property in %s node\n",
  715. np->full_name);
  716. continue;
  717. }
  718. dbisr = kzalloc(sizeof(*dbisr), GFP_KERNEL);
  719. if (!dbisr)
  720. goto out_of_memory;
  721. dbisr->irq = irq;
  722. dbisr->doorbell = be32_to_cpup(handle);
  723. if (of_device_is_compatible(np, "fsl,hv-shutdown-doorbell")) {
  724. /* The shutdown doorbell gets its own ISR */
  725. ret = request_irq(irq, fsl_hv_shutdown_isr, 0,
  726. np->name, NULL);
  727. } else if (of_device_is_compatible(np,
  728. "fsl,hv-state-change-doorbell")) {
  729. /*
  730. * The state change doorbell triggers a notification if
  731. * the state of the managed partition changes to
  732. * "stopped". We need a separate interrupt handler for
  733. * that, and we also need to know the handle of the
  734. * target partition, not just the handle of the
  735. * doorbell.
  736. */
  737. dbisr->partition = ret = get_parent_handle(np);
  738. if (ret < 0) {
  739. pr_err("fsl-hv: node %s has missing or "
  740. "malformed parent\n", np->full_name);
  741. kfree(dbisr);
  742. continue;
  743. }
  744. ret = request_threaded_irq(irq, fsl_hv_state_change_isr,
  745. fsl_hv_state_change_thread,
  746. 0, np->name, dbisr);
  747. } else
  748. ret = request_irq(irq, fsl_hv_isr, 0, np->name, dbisr);
  749. if (ret < 0) {
  750. pr_err("fsl-hv: could not request irq %u for node %s\n",
  751. irq, np->full_name);
  752. kfree(dbisr);
  753. continue;
  754. }
  755. list_add(&dbisr->list, &isr_list);
  756. pr_info("fsl-hv: registered handler for doorbell %u\n",
  757. dbisr->doorbell);
  758. }
  759. return 0;
  760. out_of_memory:
  761. list_for_each_entry_safe(dbisr, n, &isr_list, list) {
  762. free_irq(dbisr->irq, dbisr);
  763. list_del(&dbisr->list);
  764. kfree(dbisr);
  765. }
  766. misc_deregister(&fsl_hv_misc_dev);
  767. return -ENOMEM;
  768. }
  769. /*
  770. * Freescale hypervisor management driver termination
  771. *
  772. * This function is called when this driver is unloaded.
  773. */
  774. static void __exit fsl_hypervisor_exit(void)
  775. {
  776. struct doorbell_isr *dbisr, *n;
  777. list_for_each_entry_safe(dbisr, n, &isr_list, list) {
  778. free_irq(dbisr->irq, dbisr);
  779. list_del(&dbisr->list);
  780. kfree(dbisr);
  781. }
  782. misc_deregister(&fsl_hv_misc_dev);
  783. }
  784. module_init(fsl_hypervisor_init);
  785. module_exit(fsl_hypervisor_exit);
  786. MODULE_AUTHOR("Timur Tabi <[email protected]>");
  787. MODULE_DESCRIPTION("Freescale hypervisor management driver");
  788. MODULE_LICENSE("GPL v2");