eni_vdpa.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
  4. *
  5. * Copyright (c) 2021, Alibaba Inc. All rights reserved.
  6. * Author: Wu Zongyong <wuzongyong@linux.alibaba.com>
  7. *
  8. */
  9. #include "linux/bits.h"
  10. #include <linux/interrupt.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/vdpa.h>
  14. #include <linux/virtio.h>
  15. #include <linux/virtio_config.h>
  16. #include <linux/virtio_ring.h>
  17. #include <linux/virtio_pci.h>
  18. #include <linux/virtio_pci_legacy.h>
  19. #include <uapi/linux/virtio_net.h>
  20. #define ENI_MSIX_NAME_SIZE 256
  21. #define ENI_ERR(pdev, fmt, ...) \
  22. dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  23. #define ENI_DBG(pdev, fmt, ...) \
  24. dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  25. #define ENI_INFO(pdev, fmt, ...) \
  26. dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  27. struct eni_vring {
  28. void __iomem *notify;
  29. char msix_name[ENI_MSIX_NAME_SIZE];
  30. struct vdpa_callback cb;
  31. int irq;
  32. };
  33. struct eni_vdpa {
  34. struct vdpa_device vdpa;
  35. struct virtio_pci_legacy_device ldev;
  36. struct eni_vring *vring;
  37. struct vdpa_callback config_cb;
  38. char msix_name[ENI_MSIX_NAME_SIZE];
  39. int config_irq;
  40. int queues;
  41. int vectors;
  42. };
  43. static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
  44. {
  45. return container_of(vdpa, struct eni_vdpa, vdpa);
  46. }
  47. static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
  48. {
  49. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  50. return &eni_vdpa->ldev;
  51. }
  52. static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
  53. {
  54. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  55. u64 features = vp_legacy_get_features(ldev);
  56. features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
  57. features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
  58. return features;
  59. }
  60. static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
  61. {
  62. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  63. if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
  64. ENI_ERR(ldev->pci_dev,
  65. "VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
  66. return -EINVAL;
  67. }
  68. vp_legacy_set_features(ldev, (u32)features);
  69. return 0;
  70. }
  71. static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
  72. {
  73. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  74. return vp_legacy_get_driver_features(ldev);
  75. }
  76. static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
  77. {
  78. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  79. return vp_legacy_get_status(ldev);
  80. }
  81. static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
  82. {
  83. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  84. int irq = eni_vdpa->vring[idx].irq;
  85. if (irq == VIRTIO_MSI_NO_VECTOR)
  86. return -EINVAL;
  87. return irq;
  88. }
  89. static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
  90. {
  91. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  92. struct pci_dev *pdev = ldev->pci_dev;
  93. int i;
  94. for (i = 0; i < eni_vdpa->queues; i++) {
  95. if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
  96. vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
  97. devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
  98. &eni_vdpa->vring[i]);
  99. eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
  100. }
  101. }
  102. if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
  103. vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
  104. devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
  105. eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
  106. }
  107. if (eni_vdpa->vectors) {
  108. pci_free_irq_vectors(pdev);
  109. eni_vdpa->vectors = 0;
  110. }
  111. }
  112. static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
  113. {
  114. struct eni_vring *vring = arg;
  115. if (vring->cb.callback)
  116. return vring->cb.callback(vring->cb.private);
  117. return IRQ_HANDLED;
  118. }
  119. static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
  120. {
  121. struct eni_vdpa *eni_vdpa = arg;
  122. if (eni_vdpa->config_cb.callback)
  123. return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
  124. return IRQ_HANDLED;
  125. }
  126. static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
  127. {
  128. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  129. struct pci_dev *pdev = ldev->pci_dev;
  130. int i, ret, irq;
  131. int queues = eni_vdpa->queues;
  132. int vectors = queues + 1;
  133. ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
  134. if (ret != vectors) {
  135. ENI_ERR(pdev,
  136. "failed to allocate irq vectors want %d but %d\n",
  137. vectors, ret);
  138. return ret;
  139. }
  140. eni_vdpa->vectors = vectors;
  141. for (i = 0; i < queues; i++) {
  142. snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
  143. "eni-vdpa[%s]-%d\n", pci_name(pdev), i);
  144. irq = pci_irq_vector(pdev, i);
  145. ret = devm_request_irq(&pdev->dev, irq,
  146. eni_vdpa_vq_handler,
  147. 0, eni_vdpa->vring[i].msix_name,
  148. &eni_vdpa->vring[i]);
  149. if (ret) {
  150. ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
  151. goto err;
  152. }
  153. vp_legacy_queue_vector(ldev, i, i);
  154. eni_vdpa->vring[i].irq = irq;
  155. }
  156. snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
  157. pci_name(pdev));
  158. irq = pci_irq_vector(pdev, queues);
  159. ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
  160. eni_vdpa->msix_name, eni_vdpa);
  161. if (ret) {
  162. ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
  163. goto err;
  164. }
  165. vp_legacy_config_vector(ldev, queues);
  166. eni_vdpa->config_irq = irq;
  167. return 0;
  168. err:
  169. eni_vdpa_free_irq(eni_vdpa);
  170. return ret;
  171. }
  172. static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
  173. {
  174. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  175. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  176. u8 s = eni_vdpa_get_status(vdpa);
  177. if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
  178. !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
  179. eni_vdpa_request_irq(eni_vdpa);
  180. }
  181. vp_legacy_set_status(ldev, status);
  182. if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  183. (s & VIRTIO_CONFIG_S_DRIVER_OK))
  184. eni_vdpa_free_irq(eni_vdpa);
  185. }
  186. static int eni_vdpa_reset(struct vdpa_device *vdpa)
  187. {
  188. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  189. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  190. u8 s = eni_vdpa_get_status(vdpa);
  191. vp_legacy_set_status(ldev, 0);
  192. if (s & VIRTIO_CONFIG_S_DRIVER_OK)
  193. eni_vdpa_free_irq(eni_vdpa);
  194. return 0;
  195. }
  196. static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
  197. {
  198. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  199. return vp_legacy_get_queue_size(ldev, 0);
  200. }
  201. static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
  202. {
  203. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  204. return vp_legacy_get_queue_size(ldev, 0);
  205. }
  206. static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
  207. {
  208. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  209. return vp_legacy_get_queue_size(ldev, qid);
  210. }
  211. static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
  212. struct vdpa_vq_state *state)
  213. {
  214. return -EOPNOTSUPP;
  215. }
  216. static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
  217. const struct vdpa_vq_state *state)
  218. {
  219. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  220. const struct vdpa_vq_state_split *split = &state->split;
  221. /* ENI is build upon virtio-pci specfication which not support
  222. * to set state of virtqueue. But if the state is equal to the
  223. * device initial state by chance, we can let it go.
  224. */
  225. if (!vp_legacy_get_queue_enable(ldev, qid)
  226. && split->avail_index == 0)
  227. return 0;
  228. return -EOPNOTSUPP;
  229. }
  230. static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
  231. struct vdpa_callback *cb)
  232. {
  233. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  234. eni_vdpa->vring[qid].cb = *cb;
  235. }
  236. static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
  237. bool ready)
  238. {
  239. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  240. /* ENI is a legacy virtio-pci device. This is not supported
  241. * by specification. But we can disable virtqueue by setting
  242. * address to 0.
  243. */
  244. if (!ready)
  245. vp_legacy_set_queue_address(ldev, qid, 0);
  246. }
  247. static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
  248. {
  249. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  250. return vp_legacy_get_queue_enable(ldev, qid);
  251. }
  252. static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
  253. u32 num)
  254. {
  255. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  256. struct pci_dev *pdev = ldev->pci_dev;
  257. u16 n = vp_legacy_get_queue_size(ldev, qid);
  258. /* ENI is a legacy virtio-pci device which not allow to change
  259. * virtqueue size. Just report a error if someone tries to
  260. * change it.
  261. */
  262. if (num != n)
  263. ENI_ERR(pdev,
  264. "not support to set vq %u fixed num %u to %u\n",
  265. qid, n, num);
  266. }
  267. static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
  268. u64 desc_area, u64 driver_area,
  269. u64 device_area)
  270. {
  271. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  272. u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  273. vp_legacy_set_queue_address(ldev, qid, pfn);
  274. return 0;
  275. }
  276. static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
  277. {
  278. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  279. iowrite16(qid, eni_vdpa->vring[qid].notify);
  280. }
  281. static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
  282. {
  283. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  284. return ldev->id.device;
  285. }
  286. static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
  287. {
  288. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  289. return ldev->id.vendor;
  290. }
  291. static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
  292. {
  293. return VIRTIO_PCI_VRING_ALIGN;
  294. }
  295. static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
  296. {
  297. return sizeof(struct virtio_net_config);
  298. }
  299. static void eni_vdpa_get_config(struct vdpa_device *vdpa,
  300. unsigned int offset,
  301. void *buf, unsigned int len)
  302. {
  303. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  304. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  305. void __iomem *ioaddr = ldev->ioaddr +
  306. VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
  307. offset;
  308. u8 *p = buf;
  309. int i;
  310. for (i = 0; i < len; i++)
  311. *p++ = ioread8(ioaddr + i);
  312. }
  313. static void eni_vdpa_set_config(struct vdpa_device *vdpa,
  314. unsigned int offset, const void *buf,
  315. unsigned int len)
  316. {
  317. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  318. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  319. void __iomem *ioaddr = ldev->ioaddr +
  320. VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
  321. offset;
  322. const u8 *p = buf;
  323. int i;
  324. for (i = 0; i < len; i++)
  325. iowrite8(*p++, ioaddr + i);
  326. }
  327. static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
  328. struct vdpa_callback *cb)
  329. {
  330. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  331. eni_vdpa->config_cb = *cb;
  332. }
  333. static const struct vdpa_config_ops eni_vdpa_ops = {
  334. .get_device_features = eni_vdpa_get_device_features,
  335. .set_driver_features = eni_vdpa_set_driver_features,
  336. .get_driver_features = eni_vdpa_get_driver_features,
  337. .get_status = eni_vdpa_get_status,
  338. .set_status = eni_vdpa_set_status,
  339. .reset = eni_vdpa_reset,
  340. .get_vq_num_max = eni_vdpa_get_vq_num_max,
  341. .get_vq_num_min = eni_vdpa_get_vq_num_min,
  342. .get_vq_size = eni_vdpa_get_vq_size,
  343. .get_vq_state = eni_vdpa_get_vq_state,
  344. .set_vq_state = eni_vdpa_set_vq_state,
  345. .set_vq_cb = eni_vdpa_set_vq_cb,
  346. .set_vq_ready = eni_vdpa_set_vq_ready,
  347. .get_vq_ready = eni_vdpa_get_vq_ready,
  348. .set_vq_num = eni_vdpa_set_vq_num,
  349. .set_vq_address = eni_vdpa_set_vq_address,
  350. .kick_vq = eni_vdpa_kick_vq,
  351. .get_device_id = eni_vdpa_get_device_id,
  352. .get_vendor_id = eni_vdpa_get_vendor_id,
  353. .get_vq_align = eni_vdpa_get_vq_align,
  354. .get_config_size = eni_vdpa_get_config_size,
  355. .get_config = eni_vdpa_get_config,
  356. .set_config = eni_vdpa_set_config,
  357. .set_config_cb = eni_vdpa_set_config_cb,
  358. .get_vq_irq = eni_vdpa_get_vq_irq,
  359. };
  360. static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
  361. {
  362. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  363. u32 features = vp_legacy_get_features(ldev);
  364. u16 num = 2;
  365. if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
  366. __virtio16 max_virtqueue_pairs;
  367. eni_vdpa_get_config(&eni_vdpa->vdpa,
  368. offsetof(struct virtio_net_config, max_virtqueue_pairs),
  369. &max_virtqueue_pairs,
  370. sizeof(max_virtqueue_pairs));
  371. num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
  372. max_virtqueue_pairs);
  373. }
  374. if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
  375. num += 1;
  376. return num;
  377. }
  378. static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  379. {
  380. struct device *dev = &pdev->dev;
  381. struct eni_vdpa *eni_vdpa;
  382. struct virtio_pci_legacy_device *ldev;
  383. int ret, i;
  384. ret = pcim_enable_device(pdev);
  385. if (ret)
  386. return ret;
  387. eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
  388. dev, &eni_vdpa_ops, 1, 1, NULL, false);
  389. if (IS_ERR(eni_vdpa)) {
  390. ENI_ERR(pdev, "failed to allocate vDPA structure\n");
  391. return PTR_ERR(eni_vdpa);
  392. }
  393. ldev = &eni_vdpa->ldev;
  394. ldev->pci_dev = pdev;
  395. ret = vp_legacy_probe(ldev);
  396. if (ret) {
  397. ENI_ERR(pdev, "failed to probe legacy PCI device\n");
  398. goto err;
  399. }
  400. pci_set_master(pdev);
  401. pci_set_drvdata(pdev, eni_vdpa);
  402. eni_vdpa->vdpa.dma_dev = &pdev->dev;
  403. eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
  404. eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
  405. sizeof(*eni_vdpa->vring),
  406. GFP_KERNEL);
  407. if (!eni_vdpa->vring) {
  408. ret = -ENOMEM;
  409. ENI_ERR(pdev, "failed to allocate virtqueues\n");
  410. goto err_remove_vp_legacy;
  411. }
  412. for (i = 0; i < eni_vdpa->queues; i++) {
  413. eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
  414. eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  415. }
  416. eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
  417. ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
  418. if (ret) {
  419. ENI_ERR(pdev, "failed to register to vdpa bus\n");
  420. goto err_remove_vp_legacy;
  421. }
  422. return 0;
  423. err_remove_vp_legacy:
  424. vp_legacy_remove(&eni_vdpa->ldev);
  425. err:
  426. put_device(&eni_vdpa->vdpa.dev);
  427. return ret;
  428. }
  429. static void eni_vdpa_remove(struct pci_dev *pdev)
  430. {
  431. struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
  432. vdpa_unregister_device(&eni_vdpa->vdpa);
  433. vp_legacy_remove(&eni_vdpa->ldev);
  434. }
  435. static struct pci_device_id eni_pci_ids[] = {
  436. { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
  437. VIRTIO_TRANS_ID_NET,
  438. PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
  439. VIRTIO_ID_NET) },
  440. { 0 },
  441. };
  442. static struct pci_driver eni_vdpa_driver = {
  443. .name = "alibaba-eni-vdpa",
  444. .id_table = eni_pci_ids,
  445. .probe = eni_vdpa_probe,
  446. .remove = eni_vdpa_remove,
  447. };
  448. module_pci_driver(eni_vdpa_driver);
  449. MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
  450. MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
  451. MODULE_LICENSE("GPL v2");