ax45mp_cache.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * non-coherent cache functions for Andes AX45MP
  4. *
  5. * Copyright (C) 2023 Renesas Electronics Corp.
  6. */
  7. #include <linux/cacheflush.h>
  8. #include <linux/cacheinfo.h>
  9. #include <linux/dma-direction.h>
  10. #include <linux/of_address.h>
  11. #include <linux/of_platform.h>
  12. #include <asm/dma-noncoherent.h>
  13. /* L2 cache registers */
  14. #define AX45MP_L2C_REG_CTL_OFFSET 0x8
  15. #define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
  16. #define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
  17. #define AX45MP_L2C_REG_STATUS_OFFSET 0x80
  18. /* D-cache operation */
  19. #define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
  20. #define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
  21. /* L2 CCTL status */
  22. #define AX45MP_CCTL_L2_STATUS_IDLE 0
  23. /* L2 CCTL status cores mask */
  24. #define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
  25. /* L2 cache operation */
  26. #define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
  27. #define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
  28. #define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
  29. #define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
  30. #define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
  31. (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
  32. #define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
  33. (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
  34. #define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
  35. (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
  36. #define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
  37. #define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
  38. #define AX45MP_CACHE_LINE_SIZE 64
  39. struct ax45mp_priv {
  40. void __iomem *l2c_base;
  41. u32 ax45mp_cache_line_size;
  42. };
  43. static struct ax45mp_priv ax45mp_priv;
  44. /* L2 Cache operations */
  45. static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
  46. {
  47. return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
  48. }
  49. static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
  50. unsigned int l1_op, unsigned int l2_op)
  51. {
  52. unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
  53. void __iomem *base = ax45mp_priv.l2c_base;
  54. int mhartid = smp_processor_id();
  55. unsigned long pa;
  56. while (end > start) {
  57. csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
  58. csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
  59. pa = virt_to_phys((void *)start);
  60. writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
  61. writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
  62. while ((ax45mp_cpu_l2c_get_cctl_status() &
  63. AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
  64. AX45MP_CCTL_L2_STATUS_IDLE)
  65. ;
  66. start += line_size;
  67. }
  68. }
  69. /* Write-back L1 and L2 cache entry */
  70. static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
  71. {
  72. ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
  73. AX45MP_CCTL_L2_PA_WB);
  74. }
  75. /* Invalidate the L1 and L2 cache entry */
  76. static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
  77. {
  78. ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
  79. AX45MP_CCTL_L2_PA_INVAL);
  80. }
  81. static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
  82. {
  83. unsigned long start = (unsigned long)phys_to_virt(paddr);
  84. unsigned long end = start + size;
  85. unsigned long line_size;
  86. unsigned long flags;
  87. if (unlikely(start == end))
  88. return;
  89. line_size = ax45mp_priv.ax45mp_cache_line_size;
  90. start = start & (~(line_size - 1));
  91. end = ((end + line_size - 1) & (~(line_size - 1)));
  92. local_irq_save(flags);
  93. ax45mp_cpu_dcache_inval_range(start, end);
  94. local_irq_restore(flags);
  95. }
  96. static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
  97. {
  98. unsigned long start = (unsigned long)phys_to_virt(paddr);
  99. unsigned long end = start + size;
  100. unsigned long line_size;
  101. unsigned long flags;
  102. if (unlikely(start == end))
  103. return;
  104. line_size = ax45mp_priv.ax45mp_cache_line_size;
  105. start = start & (~(line_size - 1));
  106. end = ((end + line_size - 1) & (~(line_size - 1)));
  107. local_irq_save(flags);
  108. ax45mp_cpu_dcache_wb_range(start, end);
  109. local_irq_restore(flags);
  110. }
  111. static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
  112. {
  113. ax45mp_dma_cache_wback(paddr, size);
  114. ax45mp_dma_cache_inv(paddr, size);
  115. }
  116. static int ax45mp_get_l2_line_size(struct device_node *np)
  117. {
  118. int ret;
  119. ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
  120. if (ret) {
  121. pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
  122. return ret;
  123. }
  124. if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
  125. pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
  126. ax45mp_priv.ax45mp_cache_line_size);
  127. return -EINVAL;
  128. }
  129. return 0;
  130. }
  131. static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
  132. .wback = &ax45mp_dma_cache_wback,
  133. .inv = &ax45mp_dma_cache_inv,
  134. .wback_inv = &ax45mp_dma_cache_wback_inv,
  135. };
  136. static const struct of_device_id ax45mp_cache_ids[] = {
  137. { .compatible = "andestech,ax45mp-cache" },
  138. { /* sentinel */ }
  139. };
  140. static int __init ax45mp_cache_init(void)
  141. {
  142. struct device_node *np;
  143. struct resource res;
  144. int ret;
  145. np = of_find_matching_node(NULL, ax45mp_cache_ids);
  146. if (!of_device_is_available(np))
  147. return -ENODEV;
  148. ret = of_address_to_resource(np, 0, &res);
  149. if (ret)
  150. return ret;
  151. /*
  152. * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
  153. * will be 0 for sure, so we can definitely rely on it. If
  154. * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
  155. * more so we just return success here and only if its being set we
  156. * continue further in the probe path.
  157. */
  158. if (!riscv_cbom_block_size)
  159. return 0;
  160. ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
  161. if (!ax45mp_priv.l2c_base)
  162. return -ENOMEM;
  163. ret = ax45mp_get_l2_line_size(np);
  164. if (ret) {
  165. iounmap(ax45mp_priv.l2c_base);
  166. return ret;
  167. }
  168. riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
  169. return 0;
  170. }
  171. early_initcall(ax45mp_cache_init);