snet_main.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SolidRun DPU driver for control plane
  4. *
  5. * Copyright (C) 2022-2023 SolidRun
  6. *
  7. * Author: Alvaro Karsz <alvaro.karsz@solid-run.com>
  8. *
  9. */
  10. #include <linux/iopoll.h>
  11. #include "snet_vdpa.h"
  12. /* SNET DPU device ID */
  13. #define SNET_DEVICE_ID 0x1000
  14. /* SNET signature */
  15. #define SNET_SIGNATURE 0xD0D06363
  16. /* Max. config version that we can work with */
  17. #define SNET_CFG_VERSION 0x2
  18. /* Queue align */
  19. #define SNET_QUEUE_ALIGNMENT PAGE_SIZE
  20. /* Kick value to notify that new data is available */
  21. #define SNET_KICK_VAL 0x1
  22. #define SNET_CONFIG_OFF 0x0
  23. /* How long we are willing to wait for a SNET device */
  24. #define SNET_DETECT_TIMEOUT 5000000
  25. /* How long should we wait for the DPU to read our config */
  26. #define SNET_READ_CFG_TIMEOUT 3000000
  27. /* Size of configs written to the DPU */
  28. #define SNET_GENERAL_CFG_LEN 36
  29. #define SNET_GENERAL_CFG_VQ_LEN 40
  30. static struct snet *vdpa_to_snet(struct vdpa_device *vdpa)
  31. {
  32. return container_of(vdpa, struct snet, vdpa);
  33. }
  34. static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data)
  35. {
  36. struct snet *snet = data;
  37. /* Call callback if any */
  38. if (likely(snet->cb.callback))
  39. return snet->cb.callback(snet->cb.private);
  40. return IRQ_HANDLED;
  41. }
  42. static irqreturn_t snet_vq_irq_hndlr(int irq, void *data)
  43. {
  44. struct snet_vq *vq = data;
  45. /* Call callback if any */
  46. if (likely(vq->cb.callback))
  47. return vq->cb.callback(vq->cb.private);
  48. return IRQ_HANDLED;
  49. }
  50. static void snet_free_irqs(struct snet *snet)
  51. {
  52. struct psnet *psnet = snet->psnet;
  53. struct pci_dev *pdev;
  54. u32 i;
  55. /* Which Device allcoated the IRQs? */
  56. if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
  57. pdev = snet->pdev->physfn;
  58. else
  59. pdev = snet->pdev;
  60. /* Free config's IRQ */
  61. if (snet->cfg_irq != -1) {
  62. devm_free_irq(&pdev->dev, snet->cfg_irq, snet);
  63. snet->cfg_irq = -1;
  64. }
  65. /* Free VQ IRQs */
  66. for (i = 0; i < snet->cfg->vq_num; i++) {
  67. if (snet->vqs[i] && snet->vqs[i]->irq != -1) {
  68. devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]);
  69. snet->vqs[i]->irq = -1;
  70. }
  71. }
  72. /* IRQ vectors are freed when the pci remove callback is called */
  73. }
  74. static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
  75. u64 driver_area, u64 device_area)
  76. {
  77. struct snet *snet = vdpa_to_snet(vdev);
  78. /* save received parameters in vqueue sturct */
  79. snet->vqs[idx]->desc_area = desc_area;
  80. snet->vqs[idx]->driver_area = driver_area;
  81. snet->vqs[idx]->device_area = device_area;
  82. return 0;
  83. }
  84. static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
  85. {
  86. struct snet *snet = vdpa_to_snet(vdev);
  87. /* save num in vqueue */
  88. snet->vqs[idx]->num = num;
  89. }
  90. static void snet_kick_vq(struct vdpa_device *vdev, u16 idx)
  91. {
  92. struct snet *snet = vdpa_to_snet(vdev);
  93. /* not ready - ignore */
  94. if (unlikely(!snet->vqs[idx]->ready))
  95. return;
  96. iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
  97. }
  98. static void snet_kick_vq_with_data(struct vdpa_device *vdev, u32 data)
  99. {
  100. struct snet *snet = vdpa_to_snet(vdev);
  101. u16 idx = data & 0xFFFF;
  102. /* not ready - ignore */
  103. if (unlikely(!snet->vqs[idx]->ready))
  104. return;
  105. iowrite32((data & 0xFFFF0000) | SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
  106. }
  107. static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
  108. {
  109. struct snet *snet = vdpa_to_snet(vdev);
  110. snet->vqs[idx]->cb.callback = cb->callback;
  111. snet->vqs[idx]->cb.private = cb->private;
  112. }
  113. static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
  114. {
  115. struct snet *snet = vdpa_to_snet(vdev);
  116. snet->vqs[idx]->ready = ready;
  117. }
  118. static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx)
  119. {
  120. struct snet *snet = vdpa_to_snet(vdev);
  121. return snet->vqs[idx]->ready;
  122. }
  123. static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state)
  124. {
  125. if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) {
  126. const struct vdpa_vq_state_packed *p = &state->packed;
  127. if (p->last_avail_counter == 1 && p->last_used_counter == 1 &&
  128. p->last_avail_idx == 0 && p->last_used_idx == 0)
  129. return true;
  130. } else {
  131. const struct vdpa_vq_state_split *s = &state->split;
  132. if (s->avail_index == 0)
  133. return true;
  134. }
  135. return false;
  136. }
  137. static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state)
  138. {
  139. struct snet *snet = vdpa_to_snet(vdev);
  140. /* We can set any state for config version 2+ */
  141. if (SNET_CFG_VER(snet, 2)) {
  142. memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state));
  143. return 0;
  144. }
  145. /* Older config - we can't set the VQ state.
  146. * Return 0 only if this is the initial state we use in the DPU.
  147. */
  148. if (snet_vq_state_is_initial(snet, state))
  149. return 0;
  150. return -EOPNOTSUPP;
  151. }
  152. static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
  153. {
  154. struct snet *snet = vdpa_to_snet(vdev);
  155. return snet_read_vq_state(snet, idx, state);
  156. }
  157. static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx)
  158. {
  159. struct snet *snet = vdpa_to_snet(vdev);
  160. return snet->vqs[idx]->irq;
  161. }
  162. static u32 snet_get_vq_align(struct vdpa_device *vdev)
  163. {
  164. return (u32)SNET_QUEUE_ALIGNMENT;
  165. }
  166. static int snet_reset_dev(struct snet *snet)
  167. {
  168. struct pci_dev *pdev = snet->pdev;
  169. int ret = 0;
  170. u32 i;
  171. /* If status is 0, nothing to do */
  172. if (!snet->status)
  173. return 0;
  174. /* If DPU started, destroy it */
  175. if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK)
  176. ret = snet_destroy_dev(snet);
  177. /* Clear VQs */
  178. for (i = 0; i < snet->cfg->vq_num; i++) {
  179. if (!snet->vqs[i])
  180. continue;
  181. snet->vqs[i]->cb.callback = NULL;
  182. snet->vqs[i]->cb.private = NULL;
  183. snet->vqs[i]->desc_area = 0;
  184. snet->vqs[i]->device_area = 0;
  185. snet->vqs[i]->driver_area = 0;
  186. snet->vqs[i]->ready = false;
  187. }
  188. /* Clear config callback */
  189. snet->cb.callback = NULL;
  190. snet->cb.private = NULL;
  191. /* Free IRQs */
  192. snet_free_irqs(snet);
  193. /* Reset status */
  194. snet->status = 0;
  195. snet->dpu_ready = false;
  196. if (ret)
  197. SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret);
  198. else
  199. SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid);
  200. return 0;
  201. }
  202. static int snet_reset(struct vdpa_device *vdev)
  203. {
  204. struct snet *snet = vdpa_to_snet(vdev);
  205. return snet_reset_dev(snet);
  206. }
  207. static size_t snet_get_config_size(struct vdpa_device *vdev)
  208. {
  209. struct snet *snet = vdpa_to_snet(vdev);
  210. return (size_t)snet->cfg->cfg_size;
  211. }
  212. static u64 snet_get_features(struct vdpa_device *vdev)
  213. {
  214. struct snet *snet = vdpa_to_snet(vdev);
  215. return snet->cfg->features;
  216. }
  217. static int snet_set_drv_features(struct vdpa_device *vdev, u64 features)
  218. {
  219. struct snet *snet = vdpa_to_snet(vdev);
  220. snet->negotiated_features = snet->cfg->features & features;
  221. return 0;
  222. }
  223. static u64 snet_get_drv_features(struct vdpa_device *vdev)
  224. {
  225. struct snet *snet = vdpa_to_snet(vdev);
  226. return snet->negotiated_features;
  227. }
  228. static u16 snet_get_vq_num_max(struct vdpa_device *vdev)
  229. {
  230. struct snet *snet = vdpa_to_snet(vdev);
  231. return (u16)snet->cfg->vq_size;
  232. }
  233. static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
  234. {
  235. struct snet *snet = vdpa_to_snet(vdev);
  236. snet->cb.callback = cb->callback;
  237. snet->cb.private = cb->private;
  238. }
  239. static u32 snet_get_device_id(struct vdpa_device *vdev)
  240. {
  241. struct snet *snet = vdpa_to_snet(vdev);
  242. return snet->cfg->virtio_id;
  243. }
  244. static u32 snet_get_vendor_id(struct vdpa_device *vdev)
  245. {
  246. return (u32)PCI_VENDOR_ID_SOLIDRUN;
  247. }
  248. static u8 snet_get_status(struct vdpa_device *vdev)
  249. {
  250. struct snet *snet = vdpa_to_snet(vdev);
  251. return snet->status;
  252. }
  253. static int snet_write_conf(struct snet *snet)
  254. {
  255. u32 off, i, tmp;
  256. int ret;
  257. /* No need to write the config twice */
  258. if (snet->dpu_ready)
  259. return true;
  260. /* Snet data :
  261. *
  262. * General data: SNET_GENERAL_CFG_LEN bytes long
  263. * 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24
  264. * | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD |
  265. *
  266. * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long
  267. * 0 0x4 0x8
  268. * | VQ SID AND QUEUE SIZE | IRQ Index |
  269. * | DESC AREA |
  270. * | DEVICE AREA |
  271. * | DRIVER AREA |
  272. * | VQ STATE (CFG 2+) | RSVD |
  273. *
  274. * Magic number should be written last, this is the DPU indication that the data is ready
  275. */
  276. /* Init offset */
  277. off = snet->psnet->cfg.host_cfg_off;
  278. /* Ignore magic number for now */
  279. off += 4;
  280. snet_write32(snet, off, snet->psnet->negotiated_cfg_ver);
  281. off += 4;
  282. snet_write32(snet, off, snet->sid);
  283. off += 4;
  284. snet_write32(snet, off, snet->cfg->vq_num);
  285. off += 4;
  286. snet_write32(snet, off, snet->cfg_irq_idx);
  287. off += 4;
  288. snet_write64(snet, off, snet->negotiated_features);
  289. off += 8;
  290. /* Ignore reserved */
  291. off += 8;
  292. /* Write VQs */
  293. for (i = 0 ; i < snet->cfg->vq_num ; i++) {
  294. tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF);
  295. snet_write32(snet, off, tmp);
  296. off += 4;
  297. snet_write32(snet, off, snet->vqs[i]->irq_idx);
  298. off += 4;
  299. snet_write64(snet, off, snet->vqs[i]->desc_area);
  300. off += 8;
  301. snet_write64(snet, off, snet->vqs[i]->device_area);
  302. off += 8;
  303. snet_write64(snet, off, snet->vqs[i]->driver_area);
  304. off += 8;
  305. /* Write VQ state if config version is 2+ */
  306. if (SNET_CFG_VER(snet, 2))
  307. snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state);
  308. off += 4;
  309. /* Ignore reserved */
  310. off += 4;
  311. }
  312. /* Write magic number - data is ready */
  313. snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE);
  314. /* The DPU will ACK the config by clearing the signature */
  315. ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off,
  316. tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT);
  317. if (ret) {
  318. SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n");
  319. return false;
  320. }
  321. /* set DPU flag */
  322. snet->dpu_ready = true;
  323. return true;
  324. }
  325. static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet)
  326. {
  327. int ret, i, irq;
  328. /* Request config IRQ */
  329. irq = pci_irq_vector(pdev, snet->cfg_irq_idx);
  330. ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0,
  331. snet->cfg_irq_name, snet);
  332. if (ret) {
  333. SNET_ERR(pdev, "Failed to request IRQ\n");
  334. return ret;
  335. }
  336. snet->cfg_irq = irq;
  337. /* Request IRQ for every VQ */
  338. for (i = 0; i < snet->cfg->vq_num; i++) {
  339. irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx);
  340. ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0,
  341. snet->vqs[i]->irq_name, snet->vqs[i]);
  342. if (ret) {
  343. SNET_ERR(pdev, "Failed to request IRQ\n");
  344. return ret;
  345. }
  346. snet->vqs[i]->irq = irq;
  347. }
  348. return 0;
  349. }
  350. static void snet_set_status(struct vdpa_device *vdev, u8 status)
  351. {
  352. struct snet *snet = vdpa_to_snet(vdev);
  353. struct psnet *psnet = snet->psnet;
  354. struct pci_dev *pdev = snet->pdev;
  355. int ret;
  356. bool pf_irqs;
  357. if (status == snet->status)
  358. return;
  359. if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  360. !(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  361. /* Request IRQs */
  362. pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
  363. ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet);
  364. if (ret)
  365. goto set_err;
  366. /* Write config to the DPU */
  367. if (snet_write_conf(snet)) {
  368. SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid);
  369. } else {
  370. snet_free_irqs(snet);
  371. goto set_err;
  372. }
  373. }
  374. /* Save the new status */
  375. snet->status = status;
  376. return;
  377. set_err:
  378. snet->status |= VIRTIO_CONFIG_S_FAILED;
  379. }
  380. static void snet_get_config(struct vdpa_device *vdev, unsigned int offset,
  381. void *buf, unsigned int len)
  382. {
  383. struct snet *snet = vdpa_to_snet(vdev);
  384. void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
  385. u8 *buf_ptr = buf;
  386. u32 i;
  387. /* check for offset error */
  388. if (offset + len > snet->cfg->cfg_size)
  389. return;
  390. /* Write into buffer */
  391. for (i = 0; i < len; i++)
  392. *buf_ptr++ = ioread8(cfg_ptr + i);
  393. }
  394. static void snet_set_config(struct vdpa_device *vdev, unsigned int offset,
  395. const void *buf, unsigned int len)
  396. {
  397. struct snet *snet = vdpa_to_snet(vdev);
  398. void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
  399. const u8 *buf_ptr = buf;
  400. u32 i;
  401. /* check for offset error */
  402. if (offset + len > snet->cfg->cfg_size)
  403. return;
  404. /* Write into PCI BAR */
  405. for (i = 0; i < len; i++)
  406. iowrite8(*buf_ptr++, cfg_ptr + i);
  407. }
  408. static int snet_suspend(struct vdpa_device *vdev)
  409. {
  410. struct snet *snet = vdpa_to_snet(vdev);
  411. int ret;
  412. ret = snet_suspend_dev(snet);
  413. if (ret)
  414. SNET_ERR(snet->pdev, "SNET[%u] suspend failed, err: %d\n", snet->sid, ret);
  415. else
  416. SNET_DBG(snet->pdev, "Suspend SNET[%u] device\n", snet->sid);
  417. return ret;
  418. }
  419. static int snet_resume(struct vdpa_device *vdev)
  420. {
  421. struct snet *snet = vdpa_to_snet(vdev);
  422. int ret;
  423. ret = snet_resume_dev(snet);
  424. if (ret)
  425. SNET_ERR(snet->pdev, "SNET[%u] resume failed, err: %d\n", snet->sid, ret);
  426. else
  427. SNET_DBG(snet->pdev, "Resume SNET[%u] device\n", snet->sid);
  428. return ret;
  429. }
  430. static const struct vdpa_config_ops snet_config_ops = {
  431. .set_vq_address = snet_set_vq_address,
  432. .set_vq_num = snet_set_vq_num,
  433. .kick_vq = snet_kick_vq,
  434. .kick_vq_with_data = snet_kick_vq_with_data,
  435. .set_vq_cb = snet_set_vq_cb,
  436. .set_vq_ready = snet_set_vq_ready,
  437. .get_vq_ready = snet_get_vq_ready,
  438. .set_vq_state = snet_set_vq_state,
  439. .get_vq_state = snet_get_vq_state,
  440. .get_vq_irq = snet_get_vq_irq,
  441. .get_vq_align = snet_get_vq_align,
  442. .reset = snet_reset,
  443. .get_config_size = snet_get_config_size,
  444. .get_device_features = snet_get_features,
  445. .set_driver_features = snet_set_drv_features,
  446. .get_driver_features = snet_get_drv_features,
  447. .get_vq_num_min = snet_get_vq_num_max,
  448. .get_vq_num_max = snet_get_vq_num_max,
  449. .set_config_cb = snet_set_config_cb,
  450. .get_device_id = snet_get_device_id,
  451. .get_vendor_id = snet_get_vendor_id,
  452. .get_status = snet_get_status,
  453. .set_status = snet_set_status,
  454. .get_config = snet_get_config,
  455. .set_config = snet_set_config,
  456. .suspend = snet_suspend,
  457. .resume = snet_resume,
  458. };
  459. static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
  460. {
  461. char *name;
  462. int ret, i, mask = 0;
  463. /* We don't know which BAR will be used to communicate..
  464. * We will map every bar with len > 0.
  465. *
  466. * Later, we will discover the BAR and unmap all other BARs.
  467. */
  468. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  469. if (pci_resource_len(pdev, i))
  470. mask |= (1 << i);
  471. }
  472. /* No BAR can be used.. */
  473. if (!mask) {
  474. SNET_ERR(pdev, "Failed to find a PCI BAR\n");
  475. return -ENODEV;
  476. }
  477. name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
  478. if (!name)
  479. return -ENOMEM;
  480. ret = pcim_iomap_regions(pdev, mask, name);
  481. if (ret) {
  482. SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
  483. return ret;
  484. }
  485. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  486. if (mask & (1 << i))
  487. psnet->bars[i] = pcim_iomap_table(pdev)[i];
  488. }
  489. return 0;
  490. }
  491. static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
  492. {
  493. char *name;
  494. int ret;
  495. name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev));
  496. if (!name)
  497. return -ENOMEM;
  498. /* Request and map BAR */
  499. ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
  500. if (ret) {
  501. SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
  502. return ret;
  503. }
  504. snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
  505. return 0;
  506. }
  507. static void snet_free_cfg(struct snet_cfg *cfg)
  508. {
  509. u32 i;
  510. if (!cfg->devs)
  511. return;
  512. /* Free devices */
  513. for (i = 0; i < cfg->devices_num; i++) {
  514. if (!cfg->devs[i])
  515. break;
  516. kfree(cfg->devs[i]);
  517. }
  518. /* Free pointers to devices */
  519. kfree(cfg->devs);
  520. }
  521. /* Detect which BAR is used for communication with the device. */
  522. static int psnet_detect_bar(struct psnet *psnet, u32 off)
  523. {
  524. unsigned long exit_time;
  525. int i;
  526. exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT);
  527. /* SNET DPU will write SNET's signature when the config is ready. */
  528. while (time_before(jiffies, exit_time)) {
  529. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  530. /* Is this BAR mapped? */
  531. if (!psnet->bars[i])
  532. continue;
  533. if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE)
  534. return i;
  535. }
  536. usleep_range(1000, 10000);
  537. }
  538. return -ENODEV;
  539. }
  540. static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
  541. {
  542. int i, mask = 0;
  543. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  544. if (psnet->bars[i] && i != psnet->barno)
  545. mask |= (1 << i);
  546. }
  547. if (mask)
  548. pcim_iounmap_regions(pdev, mask);
  549. }
  550. /* Read SNET config from PCI BAR */
  551. static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet)
  552. {
  553. struct snet_cfg *cfg = &psnet->cfg;
  554. u32 i, off;
  555. int barno;
  556. /* Move to where the config starts */
  557. off = SNET_CONFIG_OFF;
  558. /* Find BAR used for communication */
  559. barno = psnet_detect_bar(psnet, off);
  560. if (barno < 0) {
  561. SNET_ERR(pdev, "SNET config is not ready.\n");
  562. return barno;
  563. }
  564. /* Save used BAR number and unmap all other BARs */
  565. psnet->barno = barno;
  566. SNET_DBG(pdev, "Using BAR number %d\n", barno);
  567. psnet_unmap_unused_bars(pdev, psnet);
  568. /* load config from BAR */
  569. cfg->key = psnet_read32(psnet, off);
  570. off += 4;
  571. cfg->cfg_size = psnet_read32(psnet, off);
  572. off += 4;
  573. cfg->cfg_ver = psnet_read32(psnet, off);
  574. off += 4;
  575. /* The negotiated config version is the lower one between this driver's config
  576. * and the DPU's.
  577. */
  578. psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION);
  579. SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver);
  580. cfg->vf_num = psnet_read32(psnet, off);
  581. off += 4;
  582. cfg->vf_bar = psnet_read32(psnet, off);
  583. off += 4;
  584. cfg->host_cfg_off = psnet_read32(psnet, off);
  585. off += 4;
  586. cfg->max_size_host_cfg = psnet_read32(psnet, off);
  587. off += 4;
  588. cfg->virtio_cfg_off = psnet_read32(psnet, off);
  589. off += 4;
  590. cfg->kick_off = psnet_read32(psnet, off);
  591. off += 4;
  592. cfg->hwmon_off = psnet_read32(psnet, off);
  593. off += 4;
  594. cfg->ctrl_off = psnet_read32(psnet, off);
  595. off += 4;
  596. cfg->flags = psnet_read32(psnet, off);
  597. off += 4;
  598. /* Ignore Reserved */
  599. off += sizeof(cfg->rsvd);
  600. cfg->devices_num = psnet_read32(psnet, off);
  601. off += 4;
  602. /* Allocate memory to hold pointer to the devices */
  603. cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL);
  604. if (!cfg->devs)
  605. return -ENOMEM;
  606. /* Load device configuration from BAR */
  607. for (i = 0; i < cfg->devices_num; i++) {
  608. cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL);
  609. if (!cfg->devs[i]) {
  610. snet_free_cfg(cfg);
  611. return -ENOMEM;
  612. }
  613. /* Read device config */
  614. cfg->devs[i]->virtio_id = psnet_read32(psnet, off);
  615. off += 4;
  616. cfg->devs[i]->vq_num = psnet_read32(psnet, off);
  617. off += 4;
  618. cfg->devs[i]->vq_size = psnet_read32(psnet, off);
  619. off += 4;
  620. cfg->devs[i]->vfid = psnet_read32(psnet, off);
  621. off += 4;
  622. cfg->devs[i]->features = psnet_read64(psnet, off);
  623. off += 8;
  624. /* Ignore Reserved */
  625. off += sizeof(cfg->devs[i]->rsvd);
  626. cfg->devs[i]->cfg_size = psnet_read32(psnet, off);
  627. off += 4;
  628. /* Is the config witten to the DPU going to be too big? */
  629. if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num >
  630. cfg->max_size_host_cfg) {
  631. SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n");
  632. snet_free_cfg(cfg);
  633. return -EINVAL;
  634. }
  635. }
  636. return 0;
  637. }
  638. static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet)
  639. {
  640. int ret = 0;
  641. u32 i, irq_num = 0;
  642. /* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */
  643. for (i = 0; i < psnet->cfg.devices_num; i++)
  644. irq_num += psnet->cfg.devs[i]->vq_num + 1;
  645. ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
  646. if (ret != irq_num) {
  647. SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
  648. return ret;
  649. }
  650. SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num);
  651. return 0;
  652. }
  653. static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg)
  654. {
  655. int ret = 0;
  656. u32 irq_num;
  657. /* We want 1 IRQ for every VQ + 1 for config change events */
  658. irq_num = snet_cfg->vq_num + 1;
  659. ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
  660. if (ret <= 0) {
  661. SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
  662. return ret;
  663. }
  664. return 0;
  665. }
  666. static void snet_free_vqs(struct snet *snet)
  667. {
  668. u32 i;
  669. if (!snet->vqs)
  670. return;
  671. for (i = 0 ; i < snet->cfg->vq_num ; i++) {
  672. if (!snet->vqs[i])
  673. break;
  674. kfree(snet->vqs[i]);
  675. }
  676. kfree(snet->vqs);
  677. }
  678. static int snet_build_vqs(struct snet *snet)
  679. {
  680. u32 i;
  681. /* Allocate the VQ pointers array */
  682. snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL);
  683. if (!snet->vqs)
  684. return -ENOMEM;
  685. /* Allocate the VQs */
  686. for (i = 0; i < snet->cfg->vq_num; i++) {
  687. snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL);
  688. if (!snet->vqs[i]) {
  689. snet_free_vqs(snet);
  690. return -ENOMEM;
  691. }
  692. /* Reset IRQ num */
  693. snet->vqs[i]->irq = -1;
  694. /* VQ serial ID */
  695. snet->vqs[i]->sid = i;
  696. /* Kick address - every VQ gets 4B */
  697. snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off +
  698. snet->vqs[i]->sid * 4;
  699. /* Clear kick address for this VQ */
  700. iowrite32(0, snet->vqs[i]->kick_ptr);
  701. }
  702. return 0;
  703. }
  704. static int psnet_get_next_irq_num(struct psnet *psnet)
  705. {
  706. int irq;
  707. spin_lock(&psnet->lock);
  708. irq = psnet->next_irq++;
  709. spin_unlock(&psnet->lock);
  710. return irq;
  711. }
  712. static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet)
  713. {
  714. struct psnet *psnet = snet->psnet;
  715. int i;
  716. /* one IRQ for every VQ, and one for config changes */
  717. snet->cfg_irq_idx = psnet_get_next_irq_num(psnet);
  718. snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]",
  719. pci_name(pdev), snet->cfg_irq_idx);
  720. for (i = 0; i < snet->cfg->vq_num; i++) {
  721. /* Get next free IRQ ID */
  722. snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet);
  723. /* Write IRQ name */
  724. snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]",
  725. pci_name(pdev), snet->vqs[i]->irq_idx);
  726. }
  727. }
  728. /* Find a device config based on virtual function id */
  729. static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid)
  730. {
  731. u32 i;
  732. for (i = 0; i < cfg->devices_num; i++) {
  733. if (cfg->devs[i]->vfid == vfid)
  734. return cfg->devs[i];
  735. }
  736. /* Oppss.. no config found.. */
  737. return NULL;
  738. }
  739. /* Probe function for a physical PCI function */
  740. static int snet_vdpa_probe_pf(struct pci_dev *pdev)
  741. {
  742. struct psnet *psnet;
  743. int ret = 0;
  744. bool pf_irqs = false;
  745. ret = pcim_enable_device(pdev);
  746. if (ret) {
  747. SNET_ERR(pdev, "Failed to enable PCI device\n");
  748. return ret;
  749. }
  750. /* Allocate a PCI physical function device */
  751. psnet = kzalloc(sizeof(*psnet), GFP_KERNEL);
  752. if (!psnet)
  753. return -ENOMEM;
  754. /* Init PSNET spinlock */
  755. spin_lock_init(&psnet->lock);
  756. pci_set_master(pdev);
  757. pci_set_drvdata(pdev, psnet);
  758. /* Open SNET MAIN BAR */
  759. ret = psnet_open_pf_bar(pdev, psnet);
  760. if (ret)
  761. goto free_psnet;
  762. /* Try to read SNET's config from PCI BAR */
  763. ret = psnet_read_cfg(pdev, psnet);
  764. if (ret)
  765. goto free_psnet;
  766. /* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use
  767. * PF MSI-X vectors
  768. */
  769. pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
  770. if (pf_irqs) {
  771. ret = psnet_alloc_irq_vector(pdev, psnet);
  772. if (ret)
  773. goto free_cfg;
  774. }
  775. SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num);
  776. ret = pci_enable_sriov(pdev, psnet->cfg.vf_num);
  777. if (ret) {
  778. SNET_ERR(pdev, "Failed to enable SR-IOV\n");
  779. goto free_irq;
  780. }
  781. /* Create HW monitor device */
  782. if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) {
  783. #if IS_ENABLED(CONFIG_HWMON)
  784. psnet_create_hwmon(pdev);
  785. #else
  786. SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n");
  787. #endif
  788. }
  789. return 0;
  790. free_irq:
  791. if (pf_irqs)
  792. pci_free_irq_vectors(pdev);
  793. free_cfg:
  794. snet_free_cfg(&psnet->cfg);
  795. free_psnet:
  796. kfree(psnet);
  797. return ret;
  798. }
  799. /* Probe function for a virtual PCI function */
  800. static int snet_vdpa_probe_vf(struct pci_dev *pdev)
  801. {
  802. struct pci_dev *pdev_pf = pdev->physfn;
  803. struct psnet *psnet = pci_get_drvdata(pdev_pf);
  804. struct snet_dev_cfg *dev_cfg;
  805. struct snet *snet;
  806. u32 vfid;
  807. int ret;
  808. bool pf_irqs = false;
  809. /* Get virtual function id.
  810. * (the DPU counts the VFs from 1)
  811. */
  812. ret = pci_iov_vf_id(pdev);
  813. if (ret < 0) {
  814. SNET_ERR(pdev, "Failed to find a VF id\n");
  815. return ret;
  816. }
  817. vfid = ret + 1;
  818. /* Find the snet_dev_cfg based on vfid */
  819. dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid);
  820. if (!dev_cfg) {
  821. SNET_WARN(pdev, "Failed to find a VF config..\n");
  822. return -ENODEV;
  823. }
  824. /* Which PCI device should allocate the IRQs?
  825. * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs
  826. */
  827. pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
  828. ret = pcim_enable_device(pdev);
  829. if (ret) {
  830. SNET_ERR(pdev, "Failed to enable PCI VF device\n");
  831. return ret;
  832. }
  833. /* Request for MSI-X IRQs */
  834. if (!pf_irqs) {
  835. ret = snet_alloc_irq_vector(pdev, dev_cfg);
  836. if (ret)
  837. return ret;
  838. }
  839. /* Allocate vdpa device */
  840. snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL,
  841. false);
  842. if (!snet) {
  843. SNET_ERR(pdev, "Failed to allocate a vdpa device\n");
  844. ret = -ENOMEM;
  845. goto free_irqs;
  846. }
  847. /* Init control mutex and spinlock */
  848. mutex_init(&snet->ctrl_lock);
  849. spin_lock_init(&snet->ctrl_spinlock);
  850. /* Save pci device pointer */
  851. snet->pdev = pdev;
  852. snet->psnet = psnet;
  853. snet->cfg = dev_cfg;
  854. snet->dpu_ready = false;
  855. snet->sid = vfid;
  856. /* Reset IRQ value */
  857. snet->cfg_irq = -1;
  858. ret = snet_open_vf_bar(pdev, snet);
  859. if (ret)
  860. goto put_device;
  861. /* Create a VirtIO config pointer */
  862. snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off;
  863. /* Clear control registers */
  864. snet_ctrl_clear(snet);
  865. pci_set_master(pdev);
  866. pci_set_drvdata(pdev, snet);
  867. ret = snet_build_vqs(snet);
  868. if (ret)
  869. goto put_device;
  870. /* Reserve IRQ indexes,
  871. * The IRQs may be requested and freed multiple times,
  872. * but the indexes won't change.
  873. */
  874. snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
  875. /*set DMA device*/
  876. snet->vdpa.dma_dev = &pdev->dev;
  877. /* Register VDPA device */
  878. ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
  879. if (ret) {
  880. SNET_ERR(pdev, "Failed to register vdpa device\n");
  881. goto free_vqs;
  882. }
  883. return 0;
  884. free_vqs:
  885. snet_free_vqs(snet);
  886. put_device:
  887. put_device(&snet->vdpa.dev);
  888. free_irqs:
  889. if (!pf_irqs)
  890. pci_free_irq_vectors(pdev);
  891. return ret;
  892. }
  893. static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  894. {
  895. if (pdev->is_virtfn)
  896. return snet_vdpa_probe_vf(pdev);
  897. else
  898. return snet_vdpa_probe_pf(pdev);
  899. }
  900. static void snet_vdpa_remove_pf(struct pci_dev *pdev)
  901. {
  902. struct psnet *psnet = pci_get_drvdata(pdev);
  903. pci_disable_sriov(pdev);
  904. /* If IRQs are allocated from the PF, we should free the IRQs */
  905. if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
  906. pci_free_irq_vectors(pdev);
  907. snet_free_cfg(&psnet->cfg);
  908. kfree(psnet);
  909. }
  910. static void snet_vdpa_remove_vf(struct pci_dev *pdev)
  911. {
  912. struct snet *snet = pci_get_drvdata(pdev);
  913. struct psnet *psnet = snet->psnet;
  914. vdpa_unregister_device(&snet->vdpa);
  915. snet_free_vqs(snet);
  916. /* If IRQs are allocated from the VF, we should free the IRQs */
  917. if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
  918. pci_free_irq_vectors(pdev);
  919. }
  920. static void snet_vdpa_remove(struct pci_dev *pdev)
  921. {
  922. if (pdev->is_virtfn)
  923. snet_vdpa_remove_vf(pdev);
  924. else
  925. snet_vdpa_remove_pf(pdev);
  926. }
  927. static struct pci_device_id snet_driver_pci_ids[] = {
  928. { PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID,
  929. PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) },
  930. { 0 },
  931. };
  932. MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids);
  933. static struct pci_driver snet_vdpa_driver = {
  934. .name = "snet-vdpa-driver",
  935. .id_table = snet_driver_pci_ids,
  936. .probe = snet_vdpa_probe,
  937. .remove = snet_vdpa_remove,
  938. };
  939. module_pci_driver(snet_vdpa_driver);
  940. MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>");
  941. MODULE_DESCRIPTION("SolidRun vDPA driver");
  942. MODULE_LICENSE("GPL v2");