dma-coherent.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Port on Texas Instruments TMS320C6x architecture
  3. *
  4. * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
  5. * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * DMA uncached mapping support.
  12. *
  13. * Using code pulled from ARM
  14. * Copyright (C) 2000-2004 Russell King
  15. *
  16. */
  17. #include <linux/slab.h>
  18. #include <linux/bitmap.h>
  19. #include <linux/bitops.h>
  20. #include <linux/module.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/dma-noncoherent.h>
  23. #include <linux/memblock.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/page.h>
  26. #include <asm/setup.h>
  27. /*
  28. * DMA coherent memory management, can be redefined using the memdma=
  29. * kernel command line
  30. */
  31. /* none by default */
  32. static phys_addr_t dma_base;
  33. static u32 dma_size;
  34. static u32 dma_pages;
  35. static unsigned long *dma_bitmap;
  36. /* bitmap lock */
  37. static DEFINE_SPINLOCK(dma_lock);
  38. /*
  39. * Return a DMA coherent and contiguous memory chunk from the DMA memory
  40. */
  41. static inline u32 __alloc_dma_pages(int order)
  42. {
  43. unsigned long flags;
  44. u32 pos;
  45. spin_lock_irqsave(&dma_lock, flags);
  46. pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
  47. spin_unlock_irqrestore(&dma_lock, flags);
  48. return dma_base + (pos << PAGE_SHIFT);
  49. }
  50. static void __free_dma_pages(u32 addr, int order)
  51. {
  52. unsigned long flags;
  53. u32 pos = (addr - dma_base) >> PAGE_SHIFT;
  54. if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
  55. printk(KERN_ERR "%s: freeing outside range.\n", __func__);
  56. BUG();
  57. }
  58. spin_lock_irqsave(&dma_lock, flags);
  59. bitmap_release_region(dma_bitmap, pos, order);
  60. spin_unlock_irqrestore(&dma_lock, flags);
  61. }
  62. /*
  63. * Allocate DMA coherent memory space and return both the kernel
  64. * virtual and DMA address for that space.
  65. */
  66. void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  67. gfp_t gfp, unsigned long attrs)
  68. {
  69. u32 paddr;
  70. int order;
  71. if (!dma_size || !size)
  72. return NULL;
  73. order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
  74. paddr = __alloc_dma_pages(order);
  75. if (handle)
  76. *handle = paddr;
  77. if (!paddr)
  78. return NULL;
  79. return phys_to_virt(paddr);
  80. }
  81. /*
  82. * Free DMA coherent memory as defined by the above mapping.
  83. */
  84. void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  85. dma_addr_t dma_handle, unsigned long attrs)
  86. {
  87. int order;
  88. if (!dma_size || !size)
  89. return;
  90. order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
  91. __free_dma_pages(virt_to_phys(vaddr), order);
  92. }
  93. /*
  94. * Initialise the coherent DMA memory allocator using the given uncached region.
  95. */
  96. void __init coherent_mem_init(phys_addr_t start, u32 size)
  97. {
  98. phys_addr_t bitmap_phys;
  99. if (!size)
  100. return;
  101. printk(KERN_INFO
  102. "Coherent memory (DMA) region start=0x%x size=0x%x\n",
  103. start, size);
  104. dma_base = start;
  105. dma_size = size;
  106. /* allocate bitmap */
  107. dma_pages = dma_size >> PAGE_SHIFT;
  108. if (dma_size & (PAGE_SIZE - 1))
  109. ++dma_pages;
  110. bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
  111. sizeof(long));
  112. dma_bitmap = phys_to_virt(bitmap_phys);
  113. memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
  114. }
  115. static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
  116. enum dma_data_direction dir)
  117. {
  118. BUG_ON(!valid_dma_direction(dir));
  119. switch (dir) {
  120. case DMA_FROM_DEVICE:
  121. L2_cache_block_invalidate(paddr, paddr + size);
  122. break;
  123. case DMA_TO_DEVICE:
  124. L2_cache_block_writeback(paddr, paddr + size);
  125. break;
  126. case DMA_BIDIRECTIONAL:
  127. L2_cache_block_writeback_invalidate(paddr, paddr + size);
  128. break;
  129. default:
  130. break;
  131. }
  132. }
  133. void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
  134. size_t size, enum dma_data_direction dir)
  135. {
  136. return c6x_dma_sync(dev, paddr, size, dir);
  137. }
  138. void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
  139. size_t size, enum dma_data_direction dir)
  140. {
  141. return c6x_dma_sync(dev, paddr, size, dir);
  142. }