pci-dma.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * DMA coherent memory allocation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation; either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * Copyright (C) 2002 - 2005 Tensilica Inc.
  10. * Copyright (C) 2015 Cadence Design Systems Inc.
  11. *
  12. * Based on version for i386.
  13. *
  14. * Chris Zankel <chris@zankel.net>
  15. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  16. */
  17. #include <linux/dma-contiguous.h>
  18. #include <linux/dma-noncoherent.h>
  19. #include <linux/dma-direct.h>
  20. #include <linux/gfp.h>
  21. #include <linux/highmem.h>
  22. #include <linux/mm.h>
  23. #include <linux/types.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/io.h>
  26. #include <asm/platform.h>
  27. static void do_cache_op(phys_addr_t paddr, size_t size,
  28. void (*fn)(unsigned long, unsigned long))
  29. {
  30. unsigned long off = paddr & (PAGE_SIZE - 1);
  31. unsigned long pfn = PFN_DOWN(paddr);
  32. struct page *page = pfn_to_page(pfn);
  33. if (!PageHighMem(page))
  34. fn((unsigned long)phys_to_virt(paddr), size);
  35. else
  36. while (size > 0) {
  37. size_t sz = min_t(size_t, size, PAGE_SIZE - off);
  38. void *vaddr = kmap_atomic(page);
  39. fn((unsigned long)vaddr + off, sz);
  40. kunmap_atomic(vaddr);
  41. off = 0;
  42. ++page;
  43. size -= sz;
  44. }
  45. }
  46. void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
  47. size_t size, enum dma_data_direction dir)
  48. {
  49. switch (dir) {
  50. case DMA_BIDIRECTIONAL:
  51. case DMA_FROM_DEVICE:
  52. do_cache_op(paddr, size, __invalidate_dcache_range);
  53. break;
  54. case DMA_NONE:
  55. BUG();
  56. break;
  57. default:
  58. break;
  59. }
  60. }
  61. void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
  62. size_t size, enum dma_data_direction dir)
  63. {
  64. switch (dir) {
  65. case DMA_BIDIRECTIONAL:
  66. case DMA_TO_DEVICE:
  67. if (XCHAL_DCACHE_IS_WRITEBACK)
  68. do_cache_op(paddr, size, __flush_dcache_range);
  69. break;
  70. case DMA_NONE:
  71. BUG();
  72. break;
  73. default:
  74. break;
  75. }
  76. }
  77. #ifdef CONFIG_MMU
  78. bool platform_vaddr_cached(const void *p)
  79. {
  80. unsigned long addr = (unsigned long)p;
  81. return addr >= XCHAL_KSEG_CACHED_VADDR &&
  82. addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
  83. }
  84. bool platform_vaddr_uncached(const void *p)
  85. {
  86. unsigned long addr = (unsigned long)p;
  87. return addr >= XCHAL_KSEG_BYPASS_VADDR &&
  88. addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
  89. }
  90. void *platform_vaddr_to_uncached(void *p)
  91. {
  92. return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
  93. }
  94. void *platform_vaddr_to_cached(void *p)
  95. {
  96. return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
  97. }
  98. #else
  99. bool __attribute__((weak)) platform_vaddr_cached(const void *p)
  100. {
  101. WARN_ONCE(1, "Default %s implementation is used\n", __func__);
  102. return true;
  103. }
  104. bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
  105. {
  106. WARN_ONCE(1, "Default %s implementation is used\n", __func__);
  107. return false;
  108. }
  109. void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
  110. {
  111. WARN_ONCE(1, "Default %s implementation is used\n", __func__);
  112. return p;
  113. }
  114. void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
  115. {
  116. WARN_ONCE(1, "Default %s implementation is used\n", __func__);
  117. return p;
  118. }
  119. #endif
  120. /*
  121. * Note: We assume that the full memory space is always mapped to 'kseg'
  122. * Otherwise we have to use page attributes (not implemented).
  123. */
  124. void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  125. gfp_t flag, unsigned long attrs)
  126. {
  127. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  128. struct page *page = NULL;
  129. /* ignore region speicifiers */
  130. flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
  131. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  132. flag |= GFP_DMA;
  133. if (gfpflags_allow_blocking(flag))
  134. page = dma_alloc_from_contiguous(dev, count, get_order(size),
  135. flag & __GFP_NOWARN);
  136. if (!page)
  137. page = alloc_pages(flag, get_order(size));
  138. if (!page)
  139. return NULL;
  140. *handle = phys_to_dma(dev, page_to_phys(page));
  141. if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
  142. return page;
  143. }
  144. #ifdef CONFIG_MMU
  145. if (PageHighMem(page)) {
  146. void *p;
  147. p = dma_common_contiguous_remap(page, size, VM_MAP,
  148. pgprot_noncached(PAGE_KERNEL),
  149. __builtin_return_address(0));
  150. if (!p) {
  151. if (!dma_release_from_contiguous(dev, page, count))
  152. __free_pages(page, get_order(size));
  153. }
  154. return p;
  155. }
  156. #endif
  157. BUG_ON(!platform_vaddr_cached(page_address(page)));
  158. __invalidate_dcache_range((unsigned long)page_address(page), size);
  159. return platform_vaddr_to_uncached(page_address(page));
  160. }
  161. void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  162. dma_addr_t dma_handle, unsigned long attrs)
  163. {
  164. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  165. struct page *page;
  166. if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
  167. page = vaddr;
  168. } else if (platform_vaddr_uncached(vaddr)) {
  169. page = virt_to_page(platform_vaddr_to_cached(vaddr));
  170. } else {
  171. #ifdef CONFIG_MMU
  172. dma_common_free_remap(vaddr, size, VM_MAP);
  173. #endif
  174. page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
  175. }
  176. if (!dma_release_from_contiguous(dev, page, count))
  177. __free_pages(page, get_order(size));
  178. }