dma-removed.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. /*
  2. *
  3. * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2000-2004 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/bootmem.h>
  11. #include <linux/module.h>
  12. #include <linux/mm.h>
  13. #include <linux/gfp.h>
  14. #include <linux/errno.h>
  15. #include <linux/ioport.h>
  16. #include <linux/list.h>
  17. #include <linux/init.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dma-contiguous.h>
  21. #include <linux/highmem.h>
  22. #include <linux/memblock.h>
  23. #include <linux/slab.h>
  24. #include <linux/iommu.h>
  25. #include <linux/io.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/sizes.h>
  28. #include <linux/spinlock.h>
  29. #include <asm/dma-contiguous.h>
  30. #include <asm/tlbflush.h>
  31. struct removed_region {
  32. phys_addr_t base;
  33. int nr_pages;
  34. unsigned long *bitmap;
  35. int fixup;
  36. struct mutex lock;
  37. };
  38. #define NO_KERNEL_MAPPING_DUMMY 0x2222
  39. static int dma_init_removed_memory(phys_addr_t phys_addr, size_t size,
  40. struct removed_region **mem)
  41. {
  42. struct removed_region *dma_mem = NULL;
  43. int pages = size >> PAGE_SHIFT;
  44. int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  45. dma_mem = kzalloc(sizeof(struct removed_region), GFP_KERNEL);
  46. if (!dma_mem)
  47. goto out;
  48. dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  49. if (!dma_mem->bitmap)
  50. goto free1_out;
  51. dma_mem->base = phys_addr;
  52. dma_mem->nr_pages = pages;
  53. mutex_init(&dma_mem->lock);
  54. *mem = dma_mem;
  55. return 0;
  56. free1_out:
  57. kfree(dma_mem);
  58. out:
  59. return -ENOMEM;
  60. }
  61. static int dma_assign_removed_region(struct device *dev,
  62. struct removed_region *mem)
  63. {
  64. if (dev->removed_mem)
  65. return -EBUSY;
  66. dev->removed_mem = mem;
  67. return 0;
  68. }
  69. static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn)
  70. {
  71. struct resource *res, *conflict;
  72. resource_size_t cstart, cend;
  73. res = kzalloc(sizeof(*res), GFP_KERNEL);
  74. if (!res)
  75. return;
  76. res->name = "System RAM";
  77. res->start = __pfn_to_phys(base_pfn);
  78. res->end = __pfn_to_phys(end_pfn) - 1;
  79. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  80. conflict = request_resource_conflict(&iomem_resource, res);
  81. if (!conflict) {
  82. pr_err("Removed memory: no conflict resource found\n");
  83. kfree(res);
  84. goto done;
  85. }
  86. cstart = conflict->start;
  87. cend = conflict->end;
  88. if ((cstart == res->start) && (cend == res->end)) {
  89. release_resource(conflict);
  90. } else if ((res->start >= cstart) && (res->start <= cend)) {
  91. if (res->start == cstart) {
  92. adjust_resource(conflict, res->end + 1,
  93. cend - res->end);
  94. } else if (res->end == cend) {
  95. adjust_resource(conflict, cstart,
  96. res->start - cstart);
  97. } else {
  98. adjust_resource(conflict, cstart,
  99. res->start - cstart);
  100. res->start = res->end + 1;
  101. res->end = cend;
  102. request_resource(&iomem_resource, res);
  103. goto done;
  104. }
  105. } else {
  106. pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n",
  107. (unsigned long long) conflict->start,
  108. (unsigned long long) conflict->end);
  109. }
  110. kfree(res);
  111. done:
  112. return;
  113. }
  114. #ifdef CONFIG_FLATMEM
  115. static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
  116. {
  117. struct page *start_pg, *end_pg;
  118. unsigned long pg, pgend;
  119. start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
  120. end_pfn = round_down(end_pfn, pageblock_nr_pages);
  121. /*
  122. * Convert start_pfn/end_pfn to a struct page pointer.
  123. */
  124. start_pg = pfn_to_page(start_pfn - 1) + 1;
  125. end_pg = pfn_to_page(end_pfn - 1) + 1;
  126. /*
  127. * Convert to physical addresses, and round start upwards and end
  128. * downwards.
  129. */
  130. pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
  131. pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
  132. /*
  133. * If there are free pages between these, free the section of the
  134. * memmap array.
  135. */
  136. if (pg < pgend)
  137. free_bootmem_late(pg, pgend - pg);
  138. }
  139. #else
  140. static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
  141. {
  142. }
  143. #endif
  144. static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
  145. void *data)
  146. {
  147. pte_clear(&init_mm, addr, pte);
  148. return 0;
  149. }
  150. static void clear_mapping(unsigned long addr, unsigned long size)
  151. {
  152. apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL);
  153. /* ensure ptes are updated */
  154. mb();
  155. flush_tlb_kernel_range(addr, addr + size);
  156. }
  157. static void removed_region_fixup(struct removed_region *dma_mem, int index)
  158. {
  159. unsigned long fixup_size;
  160. unsigned long base_pfn;
  161. unsigned long flags;
  162. if (index > dma_mem->nr_pages)
  163. return;
  164. /* carve-out */
  165. flags = memblock_region_resize_late_begin();
  166. memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE);
  167. memblock_remove(dma_mem->base, index * PAGE_SIZE);
  168. memblock_region_resize_late_end(flags);
  169. /* clear page-mappings */
  170. base_pfn = dma_mem->base >> PAGE_SHIFT;
  171. if (!PageHighMem(pfn_to_page(base_pfn))) {
  172. clear_mapping((unsigned long) phys_to_virt(dma_mem->base),
  173. index * PAGE_SIZE);
  174. }
  175. /* free page objects */
  176. free_memmap(base_pfn, base_pfn + index);
  177. /* return remaining area to system */
  178. fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE;
  179. free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size);
  180. /*
  181. * release freed resource region so as to show up under iomem resource
  182. * list
  183. */
  184. adapt_iomem_resource(base_pfn, base_pfn + index);
  185. /* limit the fixup region */
  186. dma_mem->nr_pages = index;
  187. }
  188. void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  189. gfp_t gfp, unsigned long attrs)
  190. {
  191. bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
  192. bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
  193. int pageno;
  194. unsigned long order;
  195. void *addr = NULL;
  196. struct removed_region *dma_mem = dev->removed_mem;
  197. int nbits;
  198. unsigned int align;
  199. if (!gfpflags_allow_blocking(gfp))
  200. return NULL;
  201. size = PAGE_ALIGN(size);
  202. nbits = size >> PAGE_SHIFT;
  203. order = get_order(size);
  204. if (order > get_order(SZ_1M))
  205. order = get_order(SZ_1M);
  206. align = (1 << order) - 1;
  207. mutex_lock(&dma_mem->lock);
  208. pageno = bitmap_find_next_zero_area(dma_mem->bitmap, dma_mem->nr_pages,
  209. 0, nbits, align);
  210. if (pageno < dma_mem->nr_pages) {
  211. phys_addr_t base = dma_mem->base + pageno * PAGE_SIZE;
  212. *handle = base;
  213. bitmap_set(dma_mem->bitmap, pageno, nbits);
  214. if (dma_mem->fixup) {
  215. removed_region_fixup(dma_mem, pageno + nbits);
  216. dma_mem->fixup = 0;
  217. }
  218. if (no_kernel_mapping && skip_zeroing) {
  219. addr = (void *)NO_KERNEL_MAPPING_DUMMY;
  220. goto out;
  221. }
  222. addr = ioremap(base, size);
  223. if (WARN_ON(!addr)) {
  224. bitmap_clear(dma_mem->bitmap, pageno, nbits);
  225. } else {
  226. if (!skip_zeroing)
  227. memset_io(addr, 0, size);
  228. if (no_kernel_mapping) {
  229. iounmap(addr);
  230. addr = (void *)NO_KERNEL_MAPPING_DUMMY;
  231. }
  232. *handle = base;
  233. }
  234. }
  235. out:
  236. mutex_unlock(&dma_mem->lock);
  237. return addr;
  238. }
  239. int removed_mmap(struct device *dev, struct vm_area_struct *vma,
  240. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  241. unsigned long attrs)
  242. {
  243. return -ENXIO;
  244. }
  245. void removed_free(struct device *dev, size_t size, void *cpu_addr,
  246. dma_addr_t handle, unsigned long attrs)
  247. {
  248. bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
  249. struct removed_region *dma_mem = dev->removed_mem;
  250. size = PAGE_ALIGN(size);
  251. if (!no_kernel_mapping)
  252. iounmap(cpu_addr);
  253. mutex_lock(&dma_mem->lock);
  254. bitmap_clear(dma_mem->bitmap, (handle - dma_mem->base) >> PAGE_SHIFT,
  255. size >> PAGE_SHIFT);
  256. mutex_unlock(&dma_mem->lock);
  257. }
  258. static dma_addr_t removed_map_page(struct device *dev, struct page *page,
  259. unsigned long offset, size_t size,
  260. enum dma_data_direction dir,
  261. unsigned long attrs)
  262. {
  263. return ~(dma_addr_t)0;
  264. }
  265. static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
  266. size_t size, enum dma_data_direction dir,
  267. unsigned long attrs)
  268. {
  269. }
  270. static int removed_map_sg(struct device *dev, struct scatterlist *sg,
  271. int nents, enum dma_data_direction dir,
  272. unsigned long attrs)
  273. {
  274. return 0;
  275. }
  276. static void removed_unmap_sg(struct device *dev,
  277. struct scatterlist *sg, int nents,
  278. enum dma_data_direction dir,
  279. unsigned long attrs)
  280. {
  281. }
  282. static void removed_sync_single_for_cpu(struct device *dev,
  283. dma_addr_t dma_handle, size_t size,
  284. enum dma_data_direction dir)
  285. {
  286. }
  287. void removed_sync_single_for_device(struct device *dev,
  288. dma_addr_t dma_handle, size_t size,
  289. enum dma_data_direction dir)
  290. {
  291. }
  292. void removed_sync_sg_for_cpu(struct device *dev,
  293. struct scatterlist *sg, int nents,
  294. enum dma_data_direction dir)
  295. {
  296. }
  297. void removed_sync_sg_for_device(struct device *dev,
  298. struct scatterlist *sg, int nents,
  299. enum dma_data_direction dir)
  300. {
  301. }
  302. void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
  303. size_t size, unsigned long attrs)
  304. {
  305. return ioremap(handle, size);
  306. }
  307. void removed_unremap(struct device *dev, void *remapped_address, size_t size)
  308. {
  309. iounmap(remapped_address);
  310. }
  311. const struct dma_map_ops removed_dma_ops = {
  312. .alloc = removed_alloc,
  313. .free = removed_free,
  314. .mmap = removed_mmap,
  315. .map_page = removed_map_page,
  316. .unmap_page = removed_unmap_page,
  317. .map_sg = removed_map_sg,
  318. .unmap_sg = removed_unmap_sg,
  319. .sync_single_for_cpu = removed_sync_single_for_cpu,
  320. .sync_single_for_device = removed_sync_single_for_device,
  321. .sync_sg_for_cpu = removed_sync_sg_for_cpu,
  322. .sync_sg_for_device = removed_sync_sg_for_device,
  323. .remap = removed_remap,
  324. .unremap = removed_unremap,
  325. };
  326. EXPORT_SYMBOL(removed_dma_ops);
  327. #ifdef CONFIG_OF_RESERVED_MEM
  328. #include <linux/of.h>
  329. #include <linux/of_fdt.h>
  330. #include <linux/of_reserved_mem.h>
  331. static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
  332. {
  333. struct removed_region *mem = rmem->priv;
  334. if (!mem && dma_init_removed_memory(rmem->base, rmem->size, &mem)) {
  335. pr_info("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
  336. &rmem->base, (unsigned long)rmem->size / SZ_1M);
  337. return -EINVAL;
  338. }
  339. mem->fixup = rmem->fixup;
  340. set_dma_ops(dev, &removed_dma_ops);
  341. rmem->priv = mem;
  342. dma_assign_removed_region(dev, mem);
  343. return 0;
  344. }
  345. static void rmem_dma_device_release(struct reserved_mem *rmem,
  346. struct device *dev)
  347. {
  348. dev->dma_mem = NULL;
  349. }
  350. static const struct reserved_mem_ops removed_mem_ops = {
  351. .device_init = rmem_dma_device_init,
  352. .device_release = rmem_dma_device_release,
  353. };
  354. static int __init removed_dma_setup(struct reserved_mem *rmem)
  355. {
  356. unsigned long node = rmem->fdt_node;
  357. int nomap, fixup;
  358. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  359. fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL;
  360. if (nomap && fixup) {
  361. pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n");
  362. return -EINVAL;
  363. }
  364. rmem->fixup = fixup;
  365. if (rmem->fixup) {
  366. /* Architecture specific contiguous memory fixup only for
  367. * no-map-fixup to split mappings
  368. */
  369. dma_contiguous_early_fixup(rmem->base, rmem->size);
  370. }
  371. rmem->ops = &removed_mem_ops;
  372. pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n",
  373. &rmem->base, (unsigned long)rmem->size / SZ_1M);
  374. return 0;
  375. }
  376. RESERVEDMEM_OF_DECLARE(dma, "removed-dma-pool", removed_dma_setup);
  377. #endif