dca-core.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  4. */
  5. /*
  6. * This driver supports an interface for DCA clients and providers to meet.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/notifier.h>
  10. #include <linux/device.h>
  11. #include <linux/dca.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #define DCA_VERSION "1.12.1"
  15. MODULE_VERSION(DCA_VERSION);
  16. MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
  17. MODULE_LICENSE("GPL");
  18. MODULE_AUTHOR("Intel Corporation");
  19. static DEFINE_RAW_SPINLOCK(dca_lock);
  20. static LIST_HEAD(dca_domains);
  21. static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
  22. static int dca_providers_blocked;
  23. static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
  24. {
  25. struct pci_dev *pdev = to_pci_dev(dev);
  26. struct pci_bus *bus = pdev->bus;
  27. while (bus->parent)
  28. bus = bus->parent;
  29. return bus;
  30. }
  31. static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
  32. {
  33. struct dca_domain *domain;
  34. domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
  35. if (!domain)
  36. return NULL;
  37. INIT_LIST_HEAD(&domain->dca_providers);
  38. domain->pci_rc = rc;
  39. return domain;
  40. }
  41. static void dca_free_domain(struct dca_domain *domain)
  42. {
  43. list_del(&domain->node);
  44. kfree(domain);
  45. }
  46. static int dca_provider_ioat_ver_3_0(struct device *dev)
  47. {
  48. struct pci_dev *pdev = to_pci_dev(dev);
  49. return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
  50. ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
  51. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
  52. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
  53. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
  54. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
  55. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
  56. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
  57. (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
  58. }
  59. static void unregister_dca_providers(void)
  60. {
  61. struct dca_provider *dca, *_dca;
  62. struct list_head unregistered_providers;
  63. struct dca_domain *domain;
  64. unsigned long flags;
  65. blocking_notifier_call_chain(&dca_provider_chain,
  66. DCA_PROVIDER_REMOVE, NULL);
  67. INIT_LIST_HEAD(&unregistered_providers);
  68. raw_spin_lock_irqsave(&dca_lock, flags);
  69. if (list_empty(&dca_domains)) {
  70. raw_spin_unlock_irqrestore(&dca_lock, flags);
  71. return;
  72. }
  73. /* at this point only one domain in the list is expected */
  74. domain = list_first_entry(&dca_domains, struct dca_domain, node);
  75. list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
  76. list_move(&dca->node, &unregistered_providers);
  77. dca_free_domain(domain);
  78. raw_spin_unlock_irqrestore(&dca_lock, flags);
  79. list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
  80. dca_sysfs_remove_provider(dca);
  81. list_del(&dca->node);
  82. }
  83. }
  84. static struct dca_domain *dca_find_domain(struct pci_bus *rc)
  85. {
  86. struct dca_domain *domain;
  87. list_for_each_entry(domain, &dca_domains, node)
  88. if (domain->pci_rc == rc)
  89. return domain;
  90. return NULL;
  91. }
  92. static struct dca_domain *dca_get_domain(struct device *dev)
  93. {
  94. struct pci_bus *rc;
  95. struct dca_domain *domain;
  96. rc = dca_pci_rc_from_dev(dev);
  97. domain = dca_find_domain(rc);
  98. if (!domain) {
  99. if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
  100. dca_providers_blocked = 1;
  101. }
  102. return domain;
  103. }
  104. static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
  105. {
  106. struct dca_provider *dca;
  107. struct pci_bus *rc;
  108. struct dca_domain *domain;
  109. if (dev) {
  110. rc = dca_pci_rc_from_dev(dev);
  111. domain = dca_find_domain(rc);
  112. if (!domain)
  113. return NULL;
  114. } else {
  115. if (!list_empty(&dca_domains))
  116. domain = list_first_entry(&dca_domains,
  117. struct dca_domain,
  118. node);
  119. else
  120. return NULL;
  121. }
  122. list_for_each_entry(dca, &domain->dca_providers, node)
  123. if ((!dev) || (dca->ops->dev_managed(dca, dev)))
  124. return dca;
  125. return NULL;
  126. }
  127. /**
  128. * dca_add_requester - add a dca client to the list
  129. * @dev - the device that wants dca service
  130. */
  131. int dca_add_requester(struct device *dev)
  132. {
  133. struct dca_provider *dca;
  134. int err, slot = -ENODEV;
  135. unsigned long flags;
  136. struct pci_bus *pci_rc;
  137. struct dca_domain *domain;
  138. if (!dev)
  139. return -EFAULT;
  140. raw_spin_lock_irqsave(&dca_lock, flags);
  141. /* check if the requester has not been added already */
  142. dca = dca_find_provider_by_dev(dev);
  143. if (dca) {
  144. raw_spin_unlock_irqrestore(&dca_lock, flags);
  145. return -EEXIST;
  146. }
  147. pci_rc = dca_pci_rc_from_dev(dev);
  148. domain = dca_find_domain(pci_rc);
  149. if (!domain) {
  150. raw_spin_unlock_irqrestore(&dca_lock, flags);
  151. return -ENODEV;
  152. }
  153. list_for_each_entry(dca, &domain->dca_providers, node) {
  154. slot = dca->ops->add_requester(dca, dev);
  155. if (slot >= 0)
  156. break;
  157. }
  158. raw_spin_unlock_irqrestore(&dca_lock, flags);
  159. if (slot < 0)
  160. return slot;
  161. err = dca_sysfs_add_req(dca, dev, slot);
  162. if (err) {
  163. raw_spin_lock_irqsave(&dca_lock, flags);
  164. if (dca == dca_find_provider_by_dev(dev))
  165. dca->ops->remove_requester(dca, dev);
  166. raw_spin_unlock_irqrestore(&dca_lock, flags);
  167. return err;
  168. }
  169. return 0;
  170. }
  171. EXPORT_SYMBOL_GPL(dca_add_requester);
  172. /**
  173. * dca_remove_requester - remove a dca client from the list
  174. * @dev - the device that wants dca service
  175. */
  176. int dca_remove_requester(struct device *dev)
  177. {
  178. struct dca_provider *dca;
  179. int slot;
  180. unsigned long flags;
  181. if (!dev)
  182. return -EFAULT;
  183. raw_spin_lock_irqsave(&dca_lock, flags);
  184. dca = dca_find_provider_by_dev(dev);
  185. if (!dca) {
  186. raw_spin_unlock_irqrestore(&dca_lock, flags);
  187. return -ENODEV;
  188. }
  189. slot = dca->ops->remove_requester(dca, dev);
  190. raw_spin_unlock_irqrestore(&dca_lock, flags);
  191. if (slot < 0)
  192. return slot;
  193. dca_sysfs_remove_req(dca, slot);
  194. return 0;
  195. }
  196. EXPORT_SYMBOL_GPL(dca_remove_requester);
  197. /**
  198. * dca_common_get_tag - return the dca tag (serves both new and old api)
  199. * @dev - the device that wants dca service
  200. * @cpu - the cpuid as returned by get_cpu()
  201. */
  202. static u8 dca_common_get_tag(struct device *dev, int cpu)
  203. {
  204. struct dca_provider *dca;
  205. u8 tag;
  206. unsigned long flags;
  207. raw_spin_lock_irqsave(&dca_lock, flags);
  208. dca = dca_find_provider_by_dev(dev);
  209. if (!dca) {
  210. raw_spin_unlock_irqrestore(&dca_lock, flags);
  211. return -ENODEV;
  212. }
  213. tag = dca->ops->get_tag(dca, dev, cpu);
  214. raw_spin_unlock_irqrestore(&dca_lock, flags);
  215. return tag;
  216. }
  217. /**
  218. * dca3_get_tag - return the dca tag to the requester device
  219. * for the given cpu (new api)
  220. * @dev - the device that wants dca service
  221. * @cpu - the cpuid as returned by get_cpu()
  222. */
  223. u8 dca3_get_tag(struct device *dev, int cpu)
  224. {
  225. if (!dev)
  226. return -EFAULT;
  227. return dca_common_get_tag(dev, cpu);
  228. }
  229. EXPORT_SYMBOL_GPL(dca3_get_tag);
  230. /**
  231. * dca_get_tag - return the dca tag for the given cpu (old api)
  232. * @cpu - the cpuid as returned by get_cpu()
  233. */
  234. u8 dca_get_tag(int cpu)
  235. {
  236. return dca_common_get_tag(NULL, cpu);
  237. }
  238. EXPORT_SYMBOL_GPL(dca_get_tag);
  239. /**
  240. * alloc_dca_provider - get data struct for describing a dca provider
  241. * @ops - pointer to struct of dca operation function pointers
  242. * @priv_size - size of extra mem to be added for provider's needs
  243. */
  244. struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
  245. int priv_size)
  246. {
  247. struct dca_provider *dca;
  248. int alloc_size;
  249. alloc_size = (sizeof(*dca) + priv_size);
  250. dca = kzalloc(alloc_size, GFP_KERNEL);
  251. if (!dca)
  252. return NULL;
  253. dca->ops = ops;
  254. return dca;
  255. }
  256. EXPORT_SYMBOL_GPL(alloc_dca_provider);
  257. /**
  258. * free_dca_provider - release the dca provider data struct
  259. * @ops - pointer to struct of dca operation function pointers
  260. * @priv_size - size of extra mem to be added for provider's needs
  261. */
  262. void free_dca_provider(struct dca_provider *dca)
  263. {
  264. kfree(dca);
  265. }
  266. EXPORT_SYMBOL_GPL(free_dca_provider);
  267. /**
  268. * register_dca_provider - register a dca provider
  269. * @dca - struct created by alloc_dca_provider()
  270. * @dev - device providing dca services
  271. */
  272. int register_dca_provider(struct dca_provider *dca, struct device *dev)
  273. {
  274. int err;
  275. unsigned long flags;
  276. struct dca_domain *domain, *newdomain = NULL;
  277. raw_spin_lock_irqsave(&dca_lock, flags);
  278. if (dca_providers_blocked) {
  279. raw_spin_unlock_irqrestore(&dca_lock, flags);
  280. return -ENODEV;
  281. }
  282. raw_spin_unlock_irqrestore(&dca_lock, flags);
  283. err = dca_sysfs_add_provider(dca, dev);
  284. if (err)
  285. return err;
  286. raw_spin_lock_irqsave(&dca_lock, flags);
  287. domain = dca_get_domain(dev);
  288. if (!domain) {
  289. struct pci_bus *rc;
  290. if (dca_providers_blocked) {
  291. raw_spin_unlock_irqrestore(&dca_lock, flags);
  292. dca_sysfs_remove_provider(dca);
  293. unregister_dca_providers();
  294. return -ENODEV;
  295. }
  296. raw_spin_unlock_irqrestore(&dca_lock, flags);
  297. rc = dca_pci_rc_from_dev(dev);
  298. newdomain = dca_allocate_domain(rc);
  299. if (!newdomain)
  300. return -ENODEV;
  301. raw_spin_lock_irqsave(&dca_lock, flags);
  302. /* Recheck, we might have raced after dropping the lock */
  303. domain = dca_get_domain(dev);
  304. if (!domain) {
  305. domain = newdomain;
  306. newdomain = NULL;
  307. list_add(&domain->node, &dca_domains);
  308. }
  309. }
  310. list_add(&dca->node, &domain->dca_providers);
  311. raw_spin_unlock_irqrestore(&dca_lock, flags);
  312. blocking_notifier_call_chain(&dca_provider_chain,
  313. DCA_PROVIDER_ADD, NULL);
  314. kfree(newdomain);
  315. return 0;
  316. }
  317. EXPORT_SYMBOL_GPL(register_dca_provider);
  318. /**
  319. * unregister_dca_provider - remove a dca provider
  320. * @dca - struct created by alloc_dca_provider()
  321. */
  322. void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
  323. {
  324. unsigned long flags;
  325. struct pci_bus *pci_rc;
  326. struct dca_domain *domain;
  327. blocking_notifier_call_chain(&dca_provider_chain,
  328. DCA_PROVIDER_REMOVE, NULL);
  329. raw_spin_lock_irqsave(&dca_lock, flags);
  330. if (list_empty(&dca_domains)) {
  331. raw_spin_unlock_irqrestore(&dca_lock, flags);
  332. return;
  333. }
  334. list_del(&dca->node);
  335. pci_rc = dca_pci_rc_from_dev(dev);
  336. domain = dca_find_domain(pci_rc);
  337. if (list_empty(&domain->dca_providers))
  338. dca_free_domain(domain);
  339. raw_spin_unlock_irqrestore(&dca_lock, flags);
  340. dca_sysfs_remove_provider(dca);
  341. }
  342. EXPORT_SYMBOL_GPL(unregister_dca_provider);
  343. /**
  344. * dca_register_notify - register a client's notifier callback
  345. */
  346. void dca_register_notify(struct notifier_block *nb)
  347. {
  348. blocking_notifier_chain_register(&dca_provider_chain, nb);
  349. }
  350. EXPORT_SYMBOL_GPL(dca_register_notify);
  351. /**
  352. * dca_unregister_notify - remove a client's notifier callback
  353. */
  354. void dca_unregister_notify(struct notifier_block *nb)
  355. {
  356. blocking_notifier_chain_unregister(&dca_provider_chain, nb);
  357. }
  358. EXPORT_SYMBOL_GPL(dca_unregister_notify);
  359. static int __init dca_init(void)
  360. {
  361. pr_info("dca service started, version %s\n", DCA_VERSION);
  362. return dca_sysfs_init();
  363. }
  364. static void __exit dca_exit(void)
  365. {
  366. dca_sysfs_exit();
  367. }
  368. arch_initcall(dca_init);
  369. module_exit(dca_exit);