sifive_ccache.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SiFive composable cache controller Driver
  4. *
  5. * Copyright (C) 2018-2022 SiFive, Inc.
  6. *
  7. */
  8. #define pr_fmt(fmt) "CCACHE: " fmt
  9. #include <linux/align.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/of_address.h>
  14. #include <linux/device.h>
  15. #include <linux/bitfield.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/property.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/cacheinfo.h>
  20. #include <asm/dma-noncoherent.h>
  21. #include <soc/sifive/sifive_ccache.h>
  22. #define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
  23. #define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104
  24. #define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108
  25. #define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120
  26. #define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124
  27. #define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128
  28. #define SIFIVE_CCACHE_DATECCFIX_LOW 0x140
  29. #define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144
  30. #define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148
  31. #define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160
  32. #define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164
  33. #define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168
  34. #define SIFIVE_CCACHE_CONFIG 0x00
  35. #define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0)
  36. #define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8)
  37. #define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
  38. #define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
  39. #define SIFIVE_CCACHE_FLUSH64 0x200
  40. #define SIFIVE_CCACHE_FLUSH32 0x240
  41. #define SIFIVE_CCACHE_WAYENABLE 0x08
  42. #define SIFIVE_CCACHE_ECCINJECTERR 0x40
  43. #define SIFIVE_CCACHE_MAX_ECCINTR 4
  44. #define SIFIVE_CCACHE_LINE_SIZE 64
  45. static void __iomem *ccache_base;
  46. static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
  47. static struct riscv_cacheinfo_ops ccache_cache_ops;
  48. static int level;
  49. enum {
  50. DIR_CORR = 0,
  51. DATA_CORR,
  52. DATA_UNCORR,
  53. DIR_UNCORR,
  54. };
  55. enum {
  56. QUIRK_NONSTANDARD_CACHE_OPS = BIT(0),
  57. QUIRK_BROKEN_DATA_UNCORR = BIT(1),
  58. };
  59. #ifdef CONFIG_DEBUG_FS
  60. static struct dentry *sifive_test;
  61. static ssize_t ccache_write(struct file *file, const char __user *data,
  62. size_t count, loff_t *ppos)
  63. {
  64. unsigned int val;
  65. if (kstrtouint_from_user(data, count, 0, &val))
  66. return -EINVAL;
  67. if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
  68. writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR);
  69. else
  70. return -EINVAL;
  71. return count;
  72. }
  73. static const struct file_operations ccache_fops = {
  74. .owner = THIS_MODULE,
  75. .open = simple_open,
  76. .write = ccache_write
  77. };
  78. static void setup_sifive_debug(void)
  79. {
  80. sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL);
  81. debugfs_create_file("sifive_debug_inject_error", 0200,
  82. sifive_test, NULL, &ccache_fops);
  83. }
  84. #endif
  85. static void ccache_config_read(void)
  86. {
  87. u32 cfg;
  88. cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG);
  89. pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n",
  90. FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg),
  91. FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg),
  92. BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)),
  93. BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg)));
  94. cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE);
  95. pr_info("Index of the largest way enabled: %u\n", cfg);
  96. }
  97. static const struct of_device_id sifive_ccache_ids[] = {
  98. { .compatible = "sifive,fu540-c000-ccache" },
  99. { .compatible = "sifive,fu740-c000-ccache" },
  100. { .compatible = "starfive,jh7100-ccache",
  101. .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS | QUIRK_BROKEN_DATA_UNCORR) },
  102. { .compatible = "sifive,ccache0" },
  103. { /* end of table */ }
  104. };
  105. static ATOMIC_NOTIFIER_HEAD(ccache_err_chain);
  106. int register_sifive_ccache_error_notifier(struct notifier_block *nb)
  107. {
  108. return atomic_notifier_chain_register(&ccache_err_chain, nb);
  109. }
  110. EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier);
  111. int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
  112. {
  113. return atomic_notifier_chain_unregister(&ccache_err_chain, nb);
  114. }
  115. EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
  116. #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
  117. static void ccache_flush_range(phys_addr_t start, size_t len)
  118. {
  119. phys_addr_t end = start + len;
  120. phys_addr_t line;
  121. if (!len)
  122. return;
  123. mb();
  124. for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
  125. line += SIFIVE_CCACHE_LINE_SIZE) {
  126. #ifdef CONFIG_32BIT
  127. writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
  128. #else
  129. writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
  130. #endif
  131. mb();
  132. }
  133. }
  134. static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
  135. .wback = &ccache_flush_range,
  136. .inv = &ccache_flush_range,
  137. .wback_inv = &ccache_flush_range,
  138. };
  139. #endif /* CONFIG_RISCV_NONSTANDARD_CACHE_OPS */
  140. static int ccache_largest_wayenabled(void)
  141. {
  142. return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
  143. }
  144. static ssize_t number_of_ways_enabled_show(struct device *dev,
  145. struct device_attribute *attr,
  146. char *buf)
  147. {
  148. return sprintf(buf, "%u\n", ccache_largest_wayenabled());
  149. }
  150. static DEVICE_ATTR_RO(number_of_ways_enabled);
  151. static struct attribute *priv_attrs[] = {
  152. &dev_attr_number_of_ways_enabled.attr,
  153. NULL,
  154. };
  155. static const struct attribute_group priv_attr_group = {
  156. .attrs = priv_attrs,
  157. };
  158. static const struct attribute_group *ccache_get_priv_group(struct cacheinfo
  159. *this_leaf)
  160. {
  161. /* We want to use private group for composable cache only */
  162. if (this_leaf->level == level)
  163. return &priv_attr_group;
  164. else
  165. return NULL;
  166. }
  167. static irqreturn_t ccache_int_handler(int irq, void *device)
  168. {
  169. unsigned int add_h, add_l;
  170. if (irq == g_irq[DIR_CORR]) {
  171. add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH);
  172. add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW);
  173. pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l);
  174. /* Reading this register clears the DirError interrupt sig */
  175. readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT);
  176. atomic_notifier_call_chain(&ccache_err_chain,
  177. SIFIVE_CCACHE_ERR_TYPE_CE,
  178. "DirECCFix");
  179. }
  180. if (irq == g_irq[DIR_UNCORR]) {
  181. add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH);
  182. add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW);
  183. /* Reading this register clears the DirFail interrupt sig */
  184. readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT);
  185. atomic_notifier_call_chain(&ccache_err_chain,
  186. SIFIVE_CCACHE_ERR_TYPE_UE,
  187. "DirECCFail");
  188. panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
  189. }
  190. if (irq == g_irq[DATA_CORR]) {
  191. add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH);
  192. add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW);
  193. pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l);
  194. /* Reading this register clears the DataError interrupt sig */
  195. readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT);
  196. atomic_notifier_call_chain(&ccache_err_chain,
  197. SIFIVE_CCACHE_ERR_TYPE_CE,
  198. "DatECCFix");
  199. }
  200. if (irq == g_irq[DATA_UNCORR]) {
  201. add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH);
  202. add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW);
  203. pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l);
  204. /* Reading this register clears the DataFail interrupt sig */
  205. readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT);
  206. atomic_notifier_call_chain(&ccache_err_chain,
  207. SIFIVE_CCACHE_ERR_TYPE_UE,
  208. "DatECCFail");
  209. }
  210. return IRQ_HANDLED;
  211. }
  212. static int sifive_ccache_probe(struct platform_device *pdev)
  213. {
  214. struct device *dev = &pdev->dev;
  215. unsigned long quirks;
  216. int intr_num, rc;
  217. quirks = (unsigned long)device_get_match_data(dev);
  218. intr_num = platform_irq_count(pdev);
  219. if (!intr_num)
  220. return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
  221. for (int i = 0; i < intr_num; i++) {
  222. if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
  223. continue;
  224. g_irq[i] = platform_get_irq(pdev, i);
  225. if (g_irq[i] < 0)
  226. return g_irq[i];
  227. rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
  228. if (rc)
  229. return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
  230. }
  231. return 0;
  232. }
  233. static struct platform_driver sifive_ccache_driver = {
  234. .probe = sifive_ccache_probe,
  235. .driver = {
  236. .name = "sifive_ccache",
  237. .of_match_table = sifive_ccache_ids,
  238. },
  239. };
  240. static int __init sifive_ccache_init(void)
  241. {
  242. struct device_node *np;
  243. struct resource res;
  244. const struct of_device_id *match;
  245. unsigned long quirks __maybe_unused;
  246. int rc;
  247. np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
  248. if (!np)
  249. return -ENODEV;
  250. quirks = (uintptr_t)match->data;
  251. if (of_address_to_resource(np, 0, &res)) {
  252. rc = -ENODEV;
  253. goto err_node_put;
  254. }
  255. ccache_base = ioremap(res.start, resource_size(&res));
  256. if (!ccache_base) {
  257. rc = -ENOMEM;
  258. goto err_node_put;
  259. }
  260. if (of_property_read_u32(np, "cache-level", &level)) {
  261. rc = -ENOENT;
  262. goto err_unmap;
  263. }
  264. #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
  265. if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
  266. riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
  267. riscv_noncoherent_supported();
  268. riscv_noncoherent_register_cache_ops(&ccache_mgmt_ops);
  269. }
  270. #endif
  271. ccache_config_read();
  272. ccache_cache_ops.get_priv_group = ccache_get_priv_group;
  273. riscv_set_cacheinfo_ops(&ccache_cache_ops);
  274. #ifdef CONFIG_DEBUG_FS
  275. setup_sifive_debug();
  276. #endif
  277. rc = platform_driver_register(&sifive_ccache_driver);
  278. if (rc)
  279. goto err_unmap;
  280. of_node_put(np);
  281. return 0;
  282. err_unmap:
  283. iounmap(ccache_base);
  284. err_node_put:
  285. of_node_put(np);
  286. return rc;
  287. }
  288. arch_initcall(sifive_ccache_init);