vfio_pci_igd.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * VFIO PCI Intel Graphics support
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Register a device specific region through which to provide read-only
  12. * access to the Intel IGD opregion. The register defining the opregion
  13. * address is also virtualized to prevent user modification.
  14. */
  15. #include <linux/io.h>
  16. #include <linux/pci.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/vfio.h>
  19. #include "vfio_pci_private.h"
  20. #define OPREGION_SIGNATURE "IntelGraphicsMem"
  21. #define OPREGION_SIZE (8 * 1024)
  22. #define OPREGION_PCI_ADDR 0xfc
  23. static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
  24. size_t count, loff_t *ppos, bool iswrite)
  25. {
  26. unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
  27. void *base = vdev->region[i].data;
  28. loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
  29. if (pos >= vdev->region[i].size || iswrite)
  30. return -EINVAL;
  31. count = min(count, (size_t)(vdev->region[i].size - pos));
  32. if (copy_to_user(buf, base + pos, count))
  33. return -EFAULT;
  34. *ppos += count;
  35. return count;
  36. }
  37. static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
  38. struct vfio_pci_region *region)
  39. {
  40. memunmap(region->data);
  41. }
  42. static const struct vfio_pci_regops vfio_pci_igd_regops = {
  43. .rw = vfio_pci_igd_rw,
  44. .release = vfio_pci_igd_release,
  45. };
  46. static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
  47. {
  48. __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
  49. u32 addr, size;
  50. void *base;
  51. int ret;
  52. ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
  53. if (ret)
  54. return ret;
  55. if (!addr || !(~addr))
  56. return -ENODEV;
  57. base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
  58. if (!base)
  59. return -ENOMEM;
  60. if (memcmp(base, OPREGION_SIGNATURE, 16)) {
  61. memunmap(base);
  62. return -EINVAL;
  63. }
  64. size = le32_to_cpu(*(__le32 *)(base + 16));
  65. if (!size) {
  66. memunmap(base);
  67. return -EINVAL;
  68. }
  69. size *= 1024; /* In KB */
  70. if (size != OPREGION_SIZE) {
  71. memunmap(base);
  72. base = memremap(addr, size, MEMREMAP_WB);
  73. if (!base)
  74. return -ENOMEM;
  75. }
  76. ret = vfio_pci_register_dev_region(vdev,
  77. PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
  78. VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
  79. &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
  80. if (ret) {
  81. memunmap(base);
  82. return ret;
  83. }
  84. /* Fill vconfig with the hw value and virtualize register */
  85. *dwordp = cpu_to_le32(addr);
  86. memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
  87. PCI_CAP_ID_INVALID_VIRT, 4);
  88. return ret;
  89. }
  90. static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
  91. char __user *buf, size_t count, loff_t *ppos,
  92. bool iswrite)
  93. {
  94. unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
  95. struct pci_dev *pdev = vdev->region[i].data;
  96. loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
  97. size_t size;
  98. int ret;
  99. if (pos >= vdev->region[i].size || iswrite)
  100. return -EINVAL;
  101. size = count = min(count, (size_t)(vdev->region[i].size - pos));
  102. if ((pos & 1) && size) {
  103. u8 val;
  104. ret = pci_user_read_config_byte(pdev, pos, &val);
  105. if (ret)
  106. return pcibios_err_to_errno(ret);
  107. if (copy_to_user(buf + count - size, &val, 1))
  108. return -EFAULT;
  109. pos++;
  110. size--;
  111. }
  112. if ((pos & 3) && size > 2) {
  113. u16 val;
  114. ret = pci_user_read_config_word(pdev, pos, &val);
  115. if (ret)
  116. return pcibios_err_to_errno(ret);
  117. val = cpu_to_le16(val);
  118. if (copy_to_user(buf + count - size, &val, 2))
  119. return -EFAULT;
  120. pos += 2;
  121. size -= 2;
  122. }
  123. while (size > 3) {
  124. u32 val;
  125. ret = pci_user_read_config_dword(pdev, pos, &val);
  126. if (ret)
  127. return pcibios_err_to_errno(ret);
  128. val = cpu_to_le32(val);
  129. if (copy_to_user(buf + count - size, &val, 4))
  130. return -EFAULT;
  131. pos += 4;
  132. size -= 4;
  133. }
  134. while (size >= 2) {
  135. u16 val;
  136. ret = pci_user_read_config_word(pdev, pos, &val);
  137. if (ret)
  138. return pcibios_err_to_errno(ret);
  139. val = cpu_to_le16(val);
  140. if (copy_to_user(buf + count - size, &val, 2))
  141. return -EFAULT;
  142. pos += 2;
  143. size -= 2;
  144. }
  145. while (size) {
  146. u8 val;
  147. ret = pci_user_read_config_byte(pdev, pos, &val);
  148. if (ret)
  149. return pcibios_err_to_errno(ret);
  150. if (copy_to_user(buf + count - size, &val, 1))
  151. return -EFAULT;
  152. pos++;
  153. size--;
  154. }
  155. *ppos += count;
  156. return count;
  157. }
  158. static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
  159. struct vfio_pci_region *region)
  160. {
  161. struct pci_dev *pdev = region->data;
  162. pci_dev_put(pdev);
  163. }
  164. static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
  165. .rw = vfio_pci_igd_cfg_rw,
  166. .release = vfio_pci_igd_cfg_release,
  167. };
  168. static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
  169. {
  170. struct pci_dev *host_bridge, *lpc_bridge;
  171. int ret;
  172. host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
  173. if (!host_bridge)
  174. return -ENODEV;
  175. if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
  176. host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
  177. pci_dev_put(host_bridge);
  178. return -EINVAL;
  179. }
  180. ret = vfio_pci_register_dev_region(vdev,
  181. PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
  182. VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
  183. &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
  184. VFIO_REGION_INFO_FLAG_READ, host_bridge);
  185. if (ret) {
  186. pci_dev_put(host_bridge);
  187. return ret;
  188. }
  189. lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
  190. if (!lpc_bridge)
  191. return -ENODEV;
  192. if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
  193. lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
  194. pci_dev_put(lpc_bridge);
  195. return -EINVAL;
  196. }
  197. ret = vfio_pci_register_dev_region(vdev,
  198. PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
  199. VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
  200. &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
  201. VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
  202. if (ret) {
  203. pci_dev_put(lpc_bridge);
  204. return ret;
  205. }
  206. return 0;
  207. }
  208. int vfio_pci_igd_init(struct vfio_pci_device *vdev)
  209. {
  210. int ret;
  211. ret = vfio_pci_igd_opregion_init(vdev);
  212. if (ret)
  213. return ret;
  214. ret = vfio_pci_igd_cfg_init(vdev);
  215. if (ret)
  216. return ret;
  217. return 0;
  218. }