pci-epc-core.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Endpoint *Controller* (EPC) library
  4. *
  5. * Copyright (C) 2017 Texas Instruments
  6. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/slab.h>
  10. #include <linux/module.h>
  11. #include <linux/pci-epc.h>
  12. #include <linux/pci-epf.h>
  13. #include <linux/pci-ep-cfs.h>
  14. static const struct class pci_epc_class = {
  15. .name = "pci_epc",
  16. };
  17. static void devm_pci_epc_release(struct device *dev, void *res)
  18. {
  19. struct pci_epc *epc = *(struct pci_epc **)res;
  20. pci_epc_destroy(epc);
  21. }
  22. static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
  23. {
  24. struct pci_epc **epc = res;
  25. return *epc == match_data;
  26. }
  27. /**
  28. * pci_epc_put() - release the PCI endpoint controller
  29. * @epc: epc returned by pci_epc_get()
  30. *
  31. * release the refcount the caller obtained by invoking pci_epc_get()
  32. */
  33. void pci_epc_put(struct pci_epc *epc)
  34. {
  35. if (IS_ERR_OR_NULL(epc))
  36. return;
  37. module_put(epc->ops->owner);
  38. put_device(&epc->dev);
  39. }
  40. EXPORT_SYMBOL_GPL(pci_epc_put);
  41. /**
  42. * pci_epc_get() - get the PCI endpoint controller
  43. * @epc_name: device name of the endpoint controller
  44. *
  45. * Invoke to get struct pci_epc * corresponding to the device name of the
  46. * endpoint controller
  47. */
  48. struct pci_epc *pci_epc_get(const char *epc_name)
  49. {
  50. int ret = -EINVAL;
  51. struct pci_epc *epc;
  52. struct device *dev;
  53. struct class_dev_iter iter;
  54. class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
  55. while ((dev = class_dev_iter_next(&iter))) {
  56. if (strcmp(epc_name, dev_name(dev)))
  57. continue;
  58. epc = to_pci_epc(dev);
  59. if (!try_module_get(epc->ops->owner)) {
  60. ret = -EINVAL;
  61. goto err;
  62. }
  63. class_dev_iter_exit(&iter);
  64. get_device(&epc->dev);
  65. return epc;
  66. }
  67. err:
  68. class_dev_iter_exit(&iter);
  69. return ERR_PTR(ret);
  70. }
  71. EXPORT_SYMBOL_GPL(pci_epc_get);
  72. /**
  73. * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
  74. * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
  75. *
  76. * Invoke to get the first unreserved BAR that can be used by the endpoint
  77. * function.
  78. */
  79. enum pci_barno
  80. pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
  81. {
  82. return pci_epc_get_next_free_bar(epc_features, BAR_0);
  83. }
  84. EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
  85. /**
  86. * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
  87. * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
  88. * @bar: the starting BAR number from where unreserved BAR should be searched
  89. *
  90. * Invoke to get the next unreserved BAR starting from @bar that can be used
  91. * for endpoint function.
  92. */
  93. enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
  94. *epc_features, enum pci_barno bar)
  95. {
  96. int i;
  97. if (!epc_features)
  98. return BAR_0;
  99. /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
  100. if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
  101. bar++;
  102. for (i = bar; i < PCI_STD_NUM_BARS; i++) {
  103. /* If the BAR is not reserved, return it. */
  104. if (epc_features->bar[i].type != BAR_RESERVED)
  105. return i;
  106. }
  107. return NO_BAR;
  108. }
  109. EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
  110. /**
  111. * pci_epc_get_features() - get the features supported by EPC
  112. * @epc: the features supported by *this* EPC device will be returned
  113. * @func_no: the features supported by the EPC device specific to the
  114. * endpoint function with func_no will be returned
  115. * @vfunc_no: the features supported by the EPC device specific to the
  116. * virtual endpoint function with vfunc_no will be returned
  117. *
  118. * Invoke to get the features provided by the EPC which may be
  119. * specific to an endpoint function. Returns pci_epc_features on success
  120. * and NULL for any failures.
  121. */
  122. const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
  123. u8 func_no, u8 vfunc_no)
  124. {
  125. const struct pci_epc_features *epc_features;
  126. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  127. return NULL;
  128. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  129. return NULL;
  130. if (!epc->ops->get_features)
  131. return NULL;
  132. mutex_lock(&epc->lock);
  133. epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
  134. mutex_unlock(&epc->lock);
  135. return epc_features;
  136. }
  137. EXPORT_SYMBOL_GPL(pci_epc_get_features);
  138. /**
  139. * pci_epc_stop() - stop the PCI link
  140. * @epc: the link of the EPC device that has to be stopped
  141. *
  142. * Invoke to stop the PCI link
  143. */
  144. void pci_epc_stop(struct pci_epc *epc)
  145. {
  146. if (IS_ERR(epc) || !epc->ops->stop)
  147. return;
  148. mutex_lock(&epc->lock);
  149. epc->ops->stop(epc);
  150. mutex_unlock(&epc->lock);
  151. }
  152. EXPORT_SYMBOL_GPL(pci_epc_stop);
  153. /**
  154. * pci_epc_start() - start the PCI link
  155. * @epc: the link of *this* EPC device has to be started
  156. *
  157. * Invoke to start the PCI link
  158. */
  159. int pci_epc_start(struct pci_epc *epc)
  160. {
  161. int ret;
  162. if (IS_ERR(epc))
  163. return -EINVAL;
  164. if (!epc->ops->start)
  165. return 0;
  166. mutex_lock(&epc->lock);
  167. ret = epc->ops->start(epc);
  168. mutex_unlock(&epc->lock);
  169. return ret;
  170. }
  171. EXPORT_SYMBOL_GPL(pci_epc_start);
  172. /**
  173. * pci_epc_raise_irq() - interrupt the host system
  174. * @epc: the EPC device which has to interrupt the host
  175. * @func_no: the physical endpoint function number in the EPC device
  176. * @vfunc_no: the virtual endpoint function number in the physical function
  177. * @type: specify the type of interrupt; INTX, MSI or MSI-X
  178. * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
  179. *
  180. * Invoke to raise an INTX, MSI or MSI-X interrupt
  181. */
  182. int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  183. unsigned int type, u16 interrupt_num)
  184. {
  185. int ret;
  186. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  187. return -EINVAL;
  188. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  189. return -EINVAL;
  190. if (!epc->ops->raise_irq)
  191. return 0;
  192. mutex_lock(&epc->lock);
  193. ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
  194. mutex_unlock(&epc->lock);
  195. return ret;
  196. }
  197. EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
  198. /**
  199. * pci_epc_map_msi_irq() - Map physical address to MSI address and return
  200. * MSI data
  201. * @epc: the EPC device which has the MSI capability
  202. * @func_no: the physical endpoint function number in the EPC device
  203. * @vfunc_no: the virtual endpoint function number in the physical function
  204. * @phys_addr: the physical address of the outbound region
  205. * @interrupt_num: the MSI interrupt number with range (1-N)
  206. * @entry_size: Size of Outbound address region for each interrupt
  207. * @msi_data: the data that should be written in order to raise MSI interrupt
  208. * with interrupt number as 'interrupt num'
  209. * @msi_addr_offset: Offset of MSI address from the aligned outbound address
  210. * to which the MSI address is mapped
  211. *
  212. * Invoke to map physical address to MSI address and return MSI data. The
  213. * physical address should be an address in the outbound region. This is
  214. * required to implement doorbell functionality of NTB wherein EPC on either
  215. * side of the interface (primary and secondary) can directly write to the
  216. * physical address (in outbound region) of the other interface to ring
  217. * doorbell.
  218. */
  219. int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  220. phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
  221. u32 *msi_data, u32 *msi_addr_offset)
  222. {
  223. int ret;
  224. if (IS_ERR_OR_NULL(epc))
  225. return -EINVAL;
  226. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  227. return -EINVAL;
  228. if (!epc->ops->map_msi_irq)
  229. return -EINVAL;
  230. mutex_lock(&epc->lock);
  231. ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
  232. interrupt_num, entry_size, msi_data,
  233. msi_addr_offset);
  234. mutex_unlock(&epc->lock);
  235. return ret;
  236. }
  237. EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
  238. /**
  239. * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
  240. * @epc: the EPC device to which MSI interrupts was requested
  241. * @func_no: the physical endpoint function number in the EPC device
  242. * @vfunc_no: the virtual endpoint function number in the physical function
  243. *
  244. * Invoke to get the number of MSI interrupts allocated by the RC
  245. */
  246. int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
  247. {
  248. int interrupt;
  249. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  250. return 0;
  251. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  252. return 0;
  253. if (!epc->ops->get_msi)
  254. return 0;
  255. mutex_lock(&epc->lock);
  256. interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
  257. mutex_unlock(&epc->lock);
  258. if (interrupt < 0)
  259. return 0;
  260. interrupt = 1 << interrupt;
  261. return interrupt;
  262. }
  263. EXPORT_SYMBOL_GPL(pci_epc_get_msi);
  264. /**
  265. * pci_epc_set_msi() - set the number of MSI interrupt numbers required
  266. * @epc: the EPC device on which MSI has to be configured
  267. * @func_no: the physical endpoint function number in the EPC device
  268. * @vfunc_no: the virtual endpoint function number in the physical function
  269. * @interrupts: number of MSI interrupts required by the EPF
  270. *
  271. * Invoke to set the required number of MSI interrupts.
  272. */
  273. int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
  274. {
  275. int ret;
  276. u8 encode_int;
  277. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
  278. interrupts < 1 || interrupts > 32)
  279. return -EINVAL;
  280. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  281. return -EINVAL;
  282. if (!epc->ops->set_msi)
  283. return 0;
  284. encode_int = order_base_2(interrupts);
  285. mutex_lock(&epc->lock);
  286. ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
  287. mutex_unlock(&epc->lock);
  288. return ret;
  289. }
  290. EXPORT_SYMBOL_GPL(pci_epc_set_msi);
  291. /**
  292. * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
  293. * @epc: the EPC device to which MSI-X interrupts was requested
  294. * @func_no: the physical endpoint function number in the EPC device
  295. * @vfunc_no: the virtual endpoint function number in the physical function
  296. *
  297. * Invoke to get the number of MSI-X interrupts allocated by the RC
  298. */
  299. int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
  300. {
  301. int interrupt;
  302. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  303. return 0;
  304. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  305. return 0;
  306. if (!epc->ops->get_msix)
  307. return 0;
  308. mutex_lock(&epc->lock);
  309. interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
  310. mutex_unlock(&epc->lock);
  311. if (interrupt < 0)
  312. return 0;
  313. return interrupt + 1;
  314. }
  315. EXPORT_SYMBOL_GPL(pci_epc_get_msix);
  316. /**
  317. * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
  318. * @epc: the EPC device on which MSI-X has to be configured
  319. * @func_no: the physical endpoint function number in the EPC device
  320. * @vfunc_no: the virtual endpoint function number in the physical function
  321. * @interrupts: number of MSI-X interrupts required by the EPF
  322. * @bir: BAR where the MSI-X table resides
  323. * @offset: Offset pointing to the start of MSI-X table
  324. *
  325. * Invoke to set the required number of MSI-X interrupts.
  326. */
  327. int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  328. u16 interrupts, enum pci_barno bir, u32 offset)
  329. {
  330. int ret;
  331. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
  332. interrupts < 1 || interrupts > 2048)
  333. return -EINVAL;
  334. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  335. return -EINVAL;
  336. if (!epc->ops->set_msix)
  337. return 0;
  338. mutex_lock(&epc->lock);
  339. ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
  340. offset);
  341. mutex_unlock(&epc->lock);
  342. return ret;
  343. }
  344. EXPORT_SYMBOL_GPL(pci_epc_set_msix);
  345. /**
  346. * pci_epc_unmap_addr() - unmap CPU address from PCI address
  347. * @epc: the EPC device on which address is allocated
  348. * @func_no: the physical endpoint function number in the EPC device
  349. * @vfunc_no: the virtual endpoint function number in the physical function
  350. * @phys_addr: physical address of the local system
  351. *
  352. * Invoke to unmap the CPU address from PCI address.
  353. */
  354. void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  355. phys_addr_t phys_addr)
  356. {
  357. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  358. return;
  359. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  360. return;
  361. if (!epc->ops->unmap_addr)
  362. return;
  363. mutex_lock(&epc->lock);
  364. epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
  365. mutex_unlock(&epc->lock);
  366. }
  367. EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
  368. /**
  369. * pci_epc_map_addr() - map CPU address to PCI address
  370. * @epc: the EPC device on which address is allocated
  371. * @func_no: the physical endpoint function number in the EPC device
  372. * @vfunc_no: the virtual endpoint function number in the physical function
  373. * @phys_addr: physical address of the local system
  374. * @pci_addr: PCI address to which the physical address should be mapped
  375. * @size: the size of the allocation
  376. *
  377. * Invoke to map CPU address with PCI address.
  378. */
  379. int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  380. phys_addr_t phys_addr, u64 pci_addr, size_t size)
  381. {
  382. int ret;
  383. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  384. return -EINVAL;
  385. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  386. return -EINVAL;
  387. if (!epc->ops->map_addr)
  388. return 0;
  389. mutex_lock(&epc->lock);
  390. ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
  391. size);
  392. mutex_unlock(&epc->lock);
  393. return ret;
  394. }
  395. EXPORT_SYMBOL_GPL(pci_epc_map_addr);
  396. /**
  397. * pci_epc_clear_bar() - reset the BAR
  398. * @epc: the EPC device for which the BAR has to be cleared
  399. * @func_no: the physical endpoint function number in the EPC device
  400. * @vfunc_no: the virtual endpoint function number in the physical function
  401. * @epf_bar: the struct epf_bar that contains the BAR information
  402. *
  403. * Invoke to reset the BAR of the endpoint device.
  404. */
  405. void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  406. struct pci_epf_bar *epf_bar)
  407. {
  408. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
  409. (epf_bar->barno == BAR_5 &&
  410. epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
  411. return;
  412. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  413. return;
  414. if (!epc->ops->clear_bar)
  415. return;
  416. mutex_lock(&epc->lock);
  417. epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
  418. mutex_unlock(&epc->lock);
  419. }
  420. EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
  421. /**
  422. * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
  423. * @epc: the EPC device on which BAR has to be configured
  424. * @func_no: the physical endpoint function number in the EPC device
  425. * @vfunc_no: the virtual endpoint function number in the physical function
  426. * @epf_bar: the struct epf_bar that contains the BAR information
  427. *
  428. * Invoke to configure the BAR of the endpoint device.
  429. */
  430. int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  431. struct pci_epf_bar *epf_bar)
  432. {
  433. int ret;
  434. int flags = epf_bar->flags;
  435. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
  436. (epf_bar->barno == BAR_5 &&
  437. flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
  438. (flags & PCI_BASE_ADDRESS_SPACE_IO &&
  439. flags & PCI_BASE_ADDRESS_IO_MASK) ||
  440. (upper_32_bits(epf_bar->size) &&
  441. !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
  442. return -EINVAL;
  443. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  444. return -EINVAL;
  445. if (!epc->ops->set_bar)
  446. return 0;
  447. mutex_lock(&epc->lock);
  448. ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
  449. mutex_unlock(&epc->lock);
  450. return ret;
  451. }
  452. EXPORT_SYMBOL_GPL(pci_epc_set_bar);
  453. /**
  454. * pci_epc_write_header() - write standard configuration header
  455. * @epc: the EPC device to which the configuration header should be written
  456. * @func_no: the physical endpoint function number in the EPC device
  457. * @vfunc_no: the virtual endpoint function number in the physical function
  458. * @header: standard configuration header fields
  459. *
  460. * Invoke to write the configuration header to the endpoint controller. Every
  461. * endpoint controller will have a dedicated location to which the standard
  462. * configuration header would be written. The callback function should write
  463. * the header fields to this dedicated location.
  464. */
  465. int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
  466. struct pci_epf_header *header)
  467. {
  468. int ret;
  469. if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
  470. return -EINVAL;
  471. if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
  472. return -EINVAL;
  473. /* Only Virtual Function #1 has deviceID */
  474. if (vfunc_no > 1)
  475. return -EINVAL;
  476. if (!epc->ops->write_header)
  477. return 0;
  478. mutex_lock(&epc->lock);
  479. ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
  480. mutex_unlock(&epc->lock);
  481. return ret;
  482. }
  483. EXPORT_SYMBOL_GPL(pci_epc_write_header);
  484. /**
  485. * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
  486. * @epc: the EPC device to which the endpoint function should be added
  487. * @epf: the endpoint function to be added
  488. * @type: Identifies if the EPC is connected to the primary or secondary
  489. * interface of EPF
  490. *
  491. * A PCI endpoint device can have one or more functions. In the case of PCIe,
  492. * the specification allows up to 8 PCIe endpoint functions. Invoke
  493. * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
  494. */
  495. int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
  496. enum pci_epc_interface_type type)
  497. {
  498. struct list_head *list;
  499. u32 func_no;
  500. int ret = 0;
  501. if (IS_ERR_OR_NULL(epc) || epf->is_vf)
  502. return -EINVAL;
  503. if (type == PRIMARY_INTERFACE && epf->epc)
  504. return -EBUSY;
  505. if (type == SECONDARY_INTERFACE && epf->sec_epc)
  506. return -EBUSY;
  507. mutex_lock(&epc->list_lock);
  508. func_no = find_first_zero_bit(&epc->function_num_map,
  509. BITS_PER_LONG);
  510. if (func_no >= BITS_PER_LONG) {
  511. ret = -EINVAL;
  512. goto ret;
  513. }
  514. if (func_no > epc->max_functions - 1) {
  515. dev_err(&epc->dev, "Exceeding max supported Function Number\n");
  516. ret = -EINVAL;
  517. goto ret;
  518. }
  519. set_bit(func_no, &epc->function_num_map);
  520. if (type == PRIMARY_INTERFACE) {
  521. epf->func_no = func_no;
  522. epf->epc = epc;
  523. list = &epf->list;
  524. } else {
  525. epf->sec_epc_func_no = func_no;
  526. epf->sec_epc = epc;
  527. list = &epf->sec_epc_list;
  528. }
  529. list_add_tail(list, &epc->pci_epf);
  530. ret:
  531. mutex_unlock(&epc->list_lock);
  532. return ret;
  533. }
  534. EXPORT_SYMBOL_GPL(pci_epc_add_epf);
  535. /**
  536. * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
  537. * @epc: the EPC device from which the endpoint function should be removed
  538. * @epf: the endpoint function to be removed
  539. * @type: identifies if the EPC is connected to the primary or secondary
  540. * interface of EPF
  541. *
  542. * Invoke to remove PCI endpoint function from the endpoint controller.
  543. */
  544. void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
  545. enum pci_epc_interface_type type)
  546. {
  547. struct list_head *list;
  548. u32 func_no = 0;
  549. if (IS_ERR_OR_NULL(epc) || !epf)
  550. return;
  551. mutex_lock(&epc->list_lock);
  552. if (type == PRIMARY_INTERFACE) {
  553. func_no = epf->func_no;
  554. list = &epf->list;
  555. epf->epc = NULL;
  556. } else {
  557. func_no = epf->sec_epc_func_no;
  558. list = &epf->sec_epc_list;
  559. epf->sec_epc = NULL;
  560. }
  561. clear_bit(func_no, &epc->function_num_map);
  562. list_del(list);
  563. mutex_unlock(&epc->list_lock);
  564. }
  565. EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
  566. /**
  567. * pci_epc_linkup() - Notify the EPF device that EPC device has established a
  568. * connection with the Root Complex.
  569. * @epc: the EPC device which has established link with the host
  570. *
  571. * Invoke to Notify the EPF device that the EPC device has established a
  572. * connection with the Root Complex.
  573. */
  574. void pci_epc_linkup(struct pci_epc *epc)
  575. {
  576. struct pci_epf *epf;
  577. if (IS_ERR_OR_NULL(epc))
  578. return;
  579. mutex_lock(&epc->list_lock);
  580. list_for_each_entry(epf, &epc->pci_epf, list) {
  581. mutex_lock(&epf->lock);
  582. if (epf->event_ops && epf->event_ops->link_up)
  583. epf->event_ops->link_up(epf);
  584. mutex_unlock(&epf->lock);
  585. }
  586. mutex_unlock(&epc->list_lock);
  587. }
  588. EXPORT_SYMBOL_GPL(pci_epc_linkup);
  589. /**
  590. * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
  591. * connection with the Root Complex.
  592. * @epc: the EPC device which has dropped the link with the host
  593. *
  594. * Invoke to Notify the EPF device that the EPC device has dropped the
  595. * connection with the Root Complex.
  596. */
  597. void pci_epc_linkdown(struct pci_epc *epc)
  598. {
  599. struct pci_epf *epf;
  600. if (IS_ERR_OR_NULL(epc))
  601. return;
  602. mutex_lock(&epc->list_lock);
  603. list_for_each_entry(epf, &epc->pci_epf, list) {
  604. mutex_lock(&epf->lock);
  605. if (epf->event_ops && epf->event_ops->link_down)
  606. epf->event_ops->link_down(epf);
  607. mutex_unlock(&epf->lock);
  608. }
  609. mutex_unlock(&epc->list_lock);
  610. }
  611. EXPORT_SYMBOL_GPL(pci_epc_linkdown);
  612. /**
  613. * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
  614. * is completed.
  615. * @epc: the EPC device whose initialization is completed
  616. *
  617. * Invoke to Notify the EPF device that the EPC device's initialization
  618. * is completed.
  619. */
  620. void pci_epc_init_notify(struct pci_epc *epc)
  621. {
  622. struct pci_epf *epf;
  623. if (IS_ERR_OR_NULL(epc))
  624. return;
  625. mutex_lock(&epc->list_lock);
  626. list_for_each_entry(epf, &epc->pci_epf, list) {
  627. mutex_lock(&epf->lock);
  628. if (epf->event_ops && epf->event_ops->epc_init)
  629. epf->event_ops->epc_init(epf);
  630. mutex_unlock(&epf->lock);
  631. }
  632. epc->init_complete = true;
  633. mutex_unlock(&epc->list_lock);
  634. }
  635. EXPORT_SYMBOL_GPL(pci_epc_init_notify);
  636. /**
  637. * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
  638. * complete to the EPF device
  639. * @epc: the EPC device whose initialization is pending to be notified
  640. * @epf: the EPF device to be notified
  641. *
  642. * Invoke to notify the pending EPC device initialization complete to the EPF
  643. * device. This is used to deliver the notification if the EPC initialization
  644. * got completed before the EPF driver bind.
  645. */
  646. void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
  647. {
  648. if (epc->init_complete) {
  649. mutex_lock(&epf->lock);
  650. if (epf->event_ops && epf->event_ops->epc_init)
  651. epf->event_ops->epc_init(epf);
  652. mutex_unlock(&epf->lock);
  653. }
  654. }
  655. EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
  656. /**
  657. * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
  658. * @epc: the EPC device whose deinitialization is completed
  659. *
  660. * Invoke to notify the EPF device that the EPC deinitialization is completed.
  661. */
  662. void pci_epc_deinit_notify(struct pci_epc *epc)
  663. {
  664. struct pci_epf *epf;
  665. if (IS_ERR_OR_NULL(epc))
  666. return;
  667. mutex_lock(&epc->list_lock);
  668. list_for_each_entry(epf, &epc->pci_epf, list) {
  669. mutex_lock(&epf->lock);
  670. if (epf->event_ops && epf->event_ops->epc_deinit)
  671. epf->event_ops->epc_deinit(epf);
  672. mutex_unlock(&epf->lock);
  673. }
  674. epc->init_complete = false;
  675. mutex_unlock(&epc->list_lock);
  676. }
  677. EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
  678. /**
  679. * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
  680. * device has received the Bus Master
  681. * Enable event from the Root complex
  682. * @epc: the EPC device that received the Bus Master Enable event
  683. *
  684. * Notify the EPF device that the EPC device has generated the Bus Master Enable
  685. * event due to host setting the Bus Master Enable bit in the Command register.
  686. */
  687. void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
  688. {
  689. struct pci_epf *epf;
  690. if (IS_ERR_OR_NULL(epc))
  691. return;
  692. mutex_lock(&epc->list_lock);
  693. list_for_each_entry(epf, &epc->pci_epf, list) {
  694. mutex_lock(&epf->lock);
  695. if (epf->event_ops && epf->event_ops->bus_master_enable)
  696. epf->event_ops->bus_master_enable(epf);
  697. mutex_unlock(&epf->lock);
  698. }
  699. mutex_unlock(&epc->list_lock);
  700. }
  701. EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
  702. /**
  703. * pci_epc_destroy() - destroy the EPC device
  704. * @epc: the EPC device that has to be destroyed
  705. *
  706. * Invoke to destroy the PCI EPC device
  707. */
  708. void pci_epc_destroy(struct pci_epc *epc)
  709. {
  710. pci_ep_cfs_remove_epc_group(epc->group);
  711. #ifdef CONFIG_PCI_DOMAINS_GENERIC
  712. pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
  713. #endif
  714. device_unregister(&epc->dev);
  715. }
  716. EXPORT_SYMBOL_GPL(pci_epc_destroy);
  717. /**
  718. * devm_pci_epc_destroy() - destroy the EPC device
  719. * @dev: device that wants to destroy the EPC
  720. * @epc: the EPC device that has to be destroyed
  721. *
  722. * Invoke to destroy the devres associated with this
  723. * pci_epc and destroy the EPC device.
  724. */
  725. void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
  726. {
  727. int r;
  728. r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
  729. epc);
  730. dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
  731. }
  732. EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
  733. static void pci_epc_release(struct device *dev)
  734. {
  735. kfree(to_pci_epc(dev));
  736. }
  737. /**
  738. * __pci_epc_create() - create a new endpoint controller (EPC) device
  739. * @dev: device that is creating the new EPC
  740. * @ops: function pointers for performing EPC operations
  741. * @owner: the owner of the module that creates the EPC device
  742. *
  743. * Invoke to create a new EPC device and add it to pci_epc class.
  744. */
  745. struct pci_epc *
  746. __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
  747. struct module *owner)
  748. {
  749. int ret;
  750. struct pci_epc *epc;
  751. if (WARN_ON(!dev)) {
  752. ret = -EINVAL;
  753. goto err_ret;
  754. }
  755. epc = kzalloc(sizeof(*epc), GFP_KERNEL);
  756. if (!epc) {
  757. ret = -ENOMEM;
  758. goto err_ret;
  759. }
  760. mutex_init(&epc->lock);
  761. mutex_init(&epc->list_lock);
  762. INIT_LIST_HEAD(&epc->pci_epf);
  763. device_initialize(&epc->dev);
  764. epc->dev.class = &pci_epc_class;
  765. epc->dev.parent = dev;
  766. epc->dev.release = pci_epc_release;
  767. epc->ops = ops;
  768. #ifdef CONFIG_PCI_DOMAINS_GENERIC
  769. epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
  770. #else
  771. /*
  772. * TODO: If the architecture doesn't support generic PCI
  773. * domains, then a custom implementation has to be used.
  774. */
  775. WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
  776. #endif
  777. ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
  778. if (ret)
  779. goto put_dev;
  780. ret = device_add(&epc->dev);
  781. if (ret)
  782. goto put_dev;
  783. epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
  784. return epc;
  785. put_dev:
  786. put_device(&epc->dev);
  787. err_ret:
  788. return ERR_PTR(ret);
  789. }
  790. EXPORT_SYMBOL_GPL(__pci_epc_create);
  791. /**
  792. * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
  793. * @dev: device that is creating the new EPC
  794. * @ops: function pointers for performing EPC operations
  795. * @owner: the owner of the module that creates the EPC device
  796. *
  797. * Invoke to create a new EPC device and add it to pci_epc class.
  798. * While at that, it also associates the device with the pci_epc using devres.
  799. * On driver detach, release function is invoked on the devres data,
  800. * then, devres data is freed.
  801. */
  802. struct pci_epc *
  803. __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
  804. struct module *owner)
  805. {
  806. struct pci_epc **ptr, *epc;
  807. ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
  808. if (!ptr)
  809. return ERR_PTR(-ENOMEM);
  810. epc = __pci_epc_create(dev, ops, owner);
  811. if (!IS_ERR(epc)) {
  812. *ptr = epc;
  813. devres_add(dev, ptr);
  814. } else {
  815. devres_free(ptr);
  816. }
  817. return epc;
  818. }
  819. EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
  820. static int __init pci_epc_init(void)
  821. {
  822. return class_register(&pci_epc_class);
  823. }
  824. module_init(pci_epc_init);
  825. static void __exit pci_epc_exit(void)
  826. {
  827. class_unregister(&pci_epc_class);
  828. }
  829. module_exit(pci_epc_exit);
  830. MODULE_DESCRIPTION("PCI EPC Library");
  831. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");