dma-mapping.c 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /*
  2. * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
  3. * Copyright (C) 2009 Wind River Systems Inc
  4. * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
  5. *
  6. * Based on DMA code from MIPS.
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/string.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/io.h>
  17. #include <linux/cache.h>
  18. #include <asm/cacheflush.h>
  19. void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
  20. size_t size, enum dma_data_direction dir)
  21. {
  22. void *vaddr = phys_to_virt(paddr);
  23. switch (dir) {
  24. case DMA_FROM_DEVICE:
  25. invalidate_dcache_range((unsigned long)vaddr,
  26. (unsigned long)(vaddr + size));
  27. break;
  28. case DMA_TO_DEVICE:
  29. /*
  30. * We just need to flush the caches here , but Nios2 flush
  31. * instruction will do both writeback and invalidate.
  32. */
  33. case DMA_BIDIRECTIONAL: /* flush and invalidate */
  34. flush_dcache_range((unsigned long)vaddr,
  35. (unsigned long)(vaddr + size));
  36. break;
  37. default:
  38. BUG();
  39. }
  40. }
  41. void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
  42. size_t size, enum dma_data_direction dir)
  43. {
  44. void *vaddr = phys_to_virt(paddr);
  45. switch (dir) {
  46. case DMA_BIDIRECTIONAL:
  47. case DMA_FROM_DEVICE:
  48. invalidate_dcache_range((unsigned long)vaddr,
  49. (unsigned long)(vaddr + size));
  50. break;
  51. case DMA_TO_DEVICE:
  52. break;
  53. default:
  54. BUG();
  55. }
  56. }
  57. void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
  58. gfp_t gfp, unsigned long attrs)
  59. {
  60. void *ret;
  61. /* optimized page clearing */
  62. gfp |= __GFP_ZERO;
  63. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  64. gfp |= GFP_DMA;
  65. ret = (void *) __get_free_pages(gfp, get_order(size));
  66. if (ret != NULL) {
  67. *dma_handle = virt_to_phys(ret);
  68. flush_dcache_range((unsigned long) ret,
  69. (unsigned long) ret + size);
  70. ret = UNCAC_ADDR(ret);
  71. }
  72. return ret;
  73. }
  74. void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  75. dma_addr_t dma_handle, unsigned long attrs)
  76. {
  77. unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
  78. free_pages(addr, get_order(size));
  79. }