vop_main.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2016 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * The full GNU General Public License is included in this distribution in
  16. * the file called "COPYING".
  17. *
  18. * Adapted from:
  19. *
  20. * virtio for kvm on s390
  21. *
  22. * Copyright IBM Corp. 2008
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License (version 2 only)
  26. * as published by the Free Software Foundation.
  27. *
  28. * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  29. *
  30. * Intel Virtio Over PCIe (VOP) driver.
  31. *
  32. */
  33. #include <linux/delay.h>
  34. #include <linux/module.h>
  35. #include <linux/sched.h>
  36. #include <linux/dma-mapping.h>
  37. #include "vop_main.h"
  38. #define VOP_MAX_VRINGS 4
  39. /*
  40. * _vop_vdev - Allocated per virtio device instance injected by the peer.
  41. *
  42. * @vdev: Virtio device
  43. * @desc: Virtio device page descriptor
  44. * @dc: Virtio device control
  45. * @vpdev: VOP device which is the parent for this virtio device
  46. * @vr: Buffer for accessing the VRING
  47. * @used: Buffer for used
  48. * @used_size: Size of the used buffer
  49. * @reset_done: Track whether VOP reset is complete
  50. * @virtio_cookie: Cookie returned upon requesting a interrupt
  51. * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
  52. * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
  53. * @dnode: The destination node
  54. */
  55. struct _vop_vdev {
  56. struct virtio_device vdev;
  57. struct mic_device_desc __iomem *desc;
  58. struct mic_device_ctrl __iomem *dc;
  59. struct vop_device *vpdev;
  60. void __iomem *vr[VOP_MAX_VRINGS];
  61. dma_addr_t used[VOP_MAX_VRINGS];
  62. int used_size[VOP_MAX_VRINGS];
  63. struct completion reset_done;
  64. struct mic_irq *virtio_cookie;
  65. int c2h_vdev_db;
  66. int h2c_vdev_db;
  67. int dnode;
  68. };
  69. #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
  70. #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
  71. /* Helper API to obtain the parent of the virtio device */
  72. static inline struct device *_vop_dev(struct _vop_vdev *vdev)
  73. {
  74. return vdev->vdev.dev.parent;
  75. }
  76. static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
  77. {
  78. return sizeof(*desc)
  79. + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
  80. + ioread8(&desc->feature_len) * 2
  81. + ioread8(&desc->config_len);
  82. }
  83. static inline struct mic_vqconfig __iomem *
  84. _vop_vq_config(struct mic_device_desc __iomem *desc)
  85. {
  86. return (struct mic_vqconfig __iomem *)(desc + 1);
  87. }
  88. static inline u8 __iomem *
  89. _vop_vq_features(struct mic_device_desc __iomem *desc)
  90. {
  91. return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
  92. }
  93. static inline u8 __iomem *
  94. _vop_vq_configspace(struct mic_device_desc __iomem *desc)
  95. {
  96. return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
  97. }
  98. static inline unsigned
  99. _vop_total_desc_size(struct mic_device_desc __iomem *desc)
  100. {
  101. return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
  102. }
  103. /* This gets the device's feature bits. */
  104. static u64 vop_get_features(struct virtio_device *vdev)
  105. {
  106. unsigned int i, bits;
  107. u32 features = 0;
  108. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  109. u8 __iomem *in_features = _vop_vq_features(desc);
  110. int feature_len = ioread8(&desc->feature_len);
  111. bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
  112. for (i = 0; i < bits; i++)
  113. if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
  114. features |= BIT(i);
  115. return features;
  116. }
  117. static int vop_finalize_features(struct virtio_device *vdev)
  118. {
  119. unsigned int i, bits;
  120. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  121. u8 feature_len = ioread8(&desc->feature_len);
  122. /* Second half of bitmap is features we accept. */
  123. u8 __iomem *out_features =
  124. _vop_vq_features(desc) + feature_len;
  125. /* Give virtio_ring a chance to accept features. */
  126. vring_transport_features(vdev);
  127. memset_io(out_features, 0, feature_len);
  128. bits = min_t(unsigned, feature_len,
  129. sizeof(vdev->features)) * 8;
  130. for (i = 0; i < bits; i++) {
  131. if (__virtio_test_bit(vdev, i))
  132. iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
  133. &out_features[i / 8]);
  134. }
  135. return 0;
  136. }
  137. /*
  138. * Reading and writing elements in config space
  139. */
  140. static void vop_get(struct virtio_device *vdev, unsigned int offset,
  141. void *buf, unsigned len)
  142. {
  143. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  144. if (offset + len > ioread8(&desc->config_len))
  145. return;
  146. memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
  147. }
  148. static void vop_set(struct virtio_device *vdev, unsigned int offset,
  149. const void *buf, unsigned len)
  150. {
  151. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  152. if (offset + len > ioread8(&desc->config_len))
  153. return;
  154. memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
  155. }
  156. /*
  157. * The operations to get and set the status word just access the status
  158. * field of the device descriptor. set_status also interrupts the host
  159. * to tell about status changes.
  160. */
  161. static u8 vop_get_status(struct virtio_device *vdev)
  162. {
  163. return ioread8(&to_vopvdev(vdev)->desc->status);
  164. }
  165. static void vop_set_status(struct virtio_device *dev, u8 status)
  166. {
  167. struct _vop_vdev *vdev = to_vopvdev(dev);
  168. struct vop_device *vpdev = vdev->vpdev;
  169. if (!status)
  170. return;
  171. iowrite8(status, &vdev->desc->status);
  172. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  173. }
  174. /* Inform host on a virtio device reset and wait for ack from host */
  175. static void vop_reset_inform_host(struct virtio_device *dev)
  176. {
  177. struct _vop_vdev *vdev = to_vopvdev(dev);
  178. struct mic_device_ctrl __iomem *dc = vdev->dc;
  179. struct vop_device *vpdev = vdev->vpdev;
  180. int retry;
  181. iowrite8(0, &dc->host_ack);
  182. iowrite8(1, &dc->vdev_reset);
  183. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  184. /* Wait till host completes all card accesses and acks the reset */
  185. for (retry = 100; retry--;) {
  186. if (ioread8(&dc->host_ack))
  187. break;
  188. msleep(100);
  189. };
  190. dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
  191. /* Reset status to 0 in case we timed out */
  192. iowrite8(0, &vdev->desc->status);
  193. }
  194. static void vop_reset(struct virtio_device *dev)
  195. {
  196. struct _vop_vdev *vdev = to_vopvdev(dev);
  197. dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
  198. __func__, dev->id.device);
  199. vop_reset_inform_host(dev);
  200. complete_all(&vdev->reset_done);
  201. }
  202. /*
  203. * The virtio_ring code calls this API when it wants to notify the Host.
  204. */
  205. static bool vop_notify(struct virtqueue *vq)
  206. {
  207. struct _vop_vdev *vdev = vq->priv;
  208. struct vop_device *vpdev = vdev->vpdev;
  209. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  210. return true;
  211. }
  212. static void vop_del_vq(struct virtqueue *vq, int n)
  213. {
  214. struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
  215. struct vring *vr = (struct vring *)(vq + 1);
  216. struct vop_device *vpdev = vdev->vpdev;
  217. dma_unmap_single(&vpdev->dev, vdev->used[n],
  218. vdev->used_size[n], DMA_BIDIRECTIONAL);
  219. free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
  220. vring_del_virtqueue(vq);
  221. vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
  222. vdev->vr[n] = NULL;
  223. }
  224. static void vop_del_vqs(struct virtio_device *dev)
  225. {
  226. struct _vop_vdev *vdev = to_vopvdev(dev);
  227. struct virtqueue *vq, *n;
  228. int idx = 0;
  229. dev_dbg(_vop_dev(vdev), "%s\n", __func__);
  230. list_for_each_entry_safe(vq, n, &dev->vqs, list)
  231. vop_del_vq(vq, idx++);
  232. }
  233. /*
  234. * This routine will assign vring's allocated in host/io memory. Code in
  235. * virtio_ring.c however continues to access this io memory as if it were local
  236. * memory without io accessors.
  237. */
  238. static struct virtqueue *vop_find_vq(struct virtio_device *dev,
  239. unsigned index,
  240. void (*callback)(struct virtqueue *vq),
  241. const char *name, bool ctx)
  242. {
  243. struct _vop_vdev *vdev = to_vopvdev(dev);
  244. struct vop_device *vpdev = vdev->vpdev;
  245. struct mic_vqconfig __iomem *vqconfig;
  246. struct mic_vqconfig config;
  247. struct virtqueue *vq;
  248. void __iomem *va;
  249. struct _mic_vring_info __iomem *info;
  250. void *used;
  251. int vr_size, _vr_size, err, magic;
  252. struct vring *vr;
  253. u8 type = ioread8(&vdev->desc->type);
  254. if (index >= ioread8(&vdev->desc->num_vq))
  255. return ERR_PTR(-ENOENT);
  256. if (!name)
  257. return ERR_PTR(-ENOENT);
  258. /* First assign the vring's allocated in host memory */
  259. vqconfig = _vop_vq_config(vdev->desc) + index;
  260. memcpy_fromio(&config, vqconfig, sizeof(config));
  261. _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
  262. vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
  263. va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
  264. vr_size);
  265. if (!va)
  266. return ERR_PTR(-ENOMEM);
  267. vdev->vr[index] = va;
  268. memset_io(va, 0x0, _vr_size);
  269. vq = vring_new_virtqueue(
  270. index,
  271. le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
  272. dev,
  273. false,
  274. ctx,
  275. (void __force *)va, vop_notify, callback, name);
  276. if (!vq) {
  277. err = -ENOMEM;
  278. goto unmap;
  279. }
  280. info = va + _vr_size;
  281. magic = ioread32(&info->magic);
  282. if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
  283. err = -EIO;
  284. goto unmap;
  285. }
  286. /* Allocate and reassign used ring now */
  287. vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
  288. sizeof(struct vring_used_elem) *
  289. le16_to_cpu(config.num));
  290. used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  291. get_order(vdev->used_size[index]));
  292. if (!used) {
  293. err = -ENOMEM;
  294. dev_err(_vop_dev(vdev), "%s %d err %d\n",
  295. __func__, __LINE__, err);
  296. goto del_vq;
  297. }
  298. vdev->used[index] = dma_map_single(&vpdev->dev, used,
  299. vdev->used_size[index],
  300. DMA_BIDIRECTIONAL);
  301. if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
  302. err = -ENOMEM;
  303. dev_err(_vop_dev(vdev), "%s %d err %d\n",
  304. __func__, __LINE__, err);
  305. goto free_used;
  306. }
  307. writeq(vdev->used[index], &vqconfig->used_address);
  308. /*
  309. * To reassign the used ring here we are directly accessing
  310. * struct vring_virtqueue which is a private data structure
  311. * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
  312. * vring_new_virtqueue() would ensure that
  313. * (&vq->vring == (struct vring *) (&vq->vq + 1));
  314. */
  315. vr = (struct vring *)(vq + 1);
  316. vr->used = used;
  317. vq->priv = vdev;
  318. return vq;
  319. free_used:
  320. free_pages((unsigned long)used,
  321. get_order(vdev->used_size[index]));
  322. del_vq:
  323. vring_del_virtqueue(vq);
  324. unmap:
  325. vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
  326. return ERR_PTR(err);
  327. }
  328. static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
  329. struct virtqueue *vqs[],
  330. vq_callback_t *callbacks[],
  331. const char * const names[], const bool *ctx,
  332. struct irq_affinity *desc)
  333. {
  334. struct _vop_vdev *vdev = to_vopvdev(dev);
  335. struct vop_device *vpdev = vdev->vpdev;
  336. struct mic_device_ctrl __iomem *dc = vdev->dc;
  337. int i, err, retry;
  338. /* We must have this many virtqueues. */
  339. if (nvqs > ioread8(&vdev->desc->num_vq))
  340. return -ENOENT;
  341. for (i = 0; i < nvqs; ++i) {
  342. dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
  343. __func__, i, names[i]);
  344. vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
  345. ctx ? ctx[i] : false);
  346. if (IS_ERR(vqs[i])) {
  347. err = PTR_ERR(vqs[i]);
  348. goto error;
  349. }
  350. }
  351. iowrite8(1, &dc->used_address_updated);
  352. /*
  353. * Send an interrupt to the host to inform it that used
  354. * rings have been re-assigned.
  355. */
  356. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  357. for (retry = 100; --retry;) {
  358. if (!ioread8(&dc->used_address_updated))
  359. break;
  360. msleep(100);
  361. };
  362. dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
  363. if (!retry) {
  364. err = -ENODEV;
  365. goto error;
  366. }
  367. return 0;
  368. error:
  369. vop_del_vqs(dev);
  370. return err;
  371. }
  372. /*
  373. * The config ops structure as defined by virtio config
  374. */
  375. static struct virtio_config_ops vop_vq_config_ops = {
  376. .get_features = vop_get_features,
  377. .finalize_features = vop_finalize_features,
  378. .get = vop_get,
  379. .set = vop_set,
  380. .get_status = vop_get_status,
  381. .set_status = vop_set_status,
  382. .reset = vop_reset,
  383. .find_vqs = vop_find_vqs,
  384. .del_vqs = vop_del_vqs,
  385. };
  386. static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
  387. {
  388. struct _vop_vdev *vdev = data;
  389. struct vop_device *vpdev = vdev->vpdev;
  390. struct virtqueue *vq;
  391. vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
  392. list_for_each_entry(vq, &vdev->vdev.vqs, list)
  393. vring_interrupt(0, vq);
  394. return IRQ_HANDLED;
  395. }
  396. static void vop_virtio_release_dev(struct device *_d)
  397. {
  398. struct virtio_device *vdev =
  399. container_of(_d, struct virtio_device, dev);
  400. struct _vop_vdev *vop_vdev =
  401. container_of(vdev, struct _vop_vdev, vdev);
  402. kfree(vop_vdev);
  403. }
  404. /*
  405. * adds a new device and register it with virtio
  406. * appropriate drivers are loaded by the device model
  407. */
  408. static int _vop_add_device(struct mic_device_desc __iomem *d,
  409. unsigned int offset, struct vop_device *vpdev,
  410. int dnode)
  411. {
  412. struct _vop_vdev *vdev, *reg_dev = NULL;
  413. int ret;
  414. u8 type = ioread8(&d->type);
  415. vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  416. if (!vdev)
  417. return -ENOMEM;
  418. vdev->vpdev = vpdev;
  419. vdev->vdev.dev.parent = &vpdev->dev;
  420. vdev->vdev.dev.release = vop_virtio_release_dev;
  421. vdev->vdev.id.device = type;
  422. vdev->vdev.config = &vop_vq_config_ops;
  423. vdev->desc = d;
  424. vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
  425. vdev->dnode = dnode;
  426. vdev->vdev.priv = (void *)(u64)dnode;
  427. init_completion(&vdev->reset_done);
  428. vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
  429. vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
  430. vop_virtio_intr_handler, "virtio intr",
  431. vdev, vdev->h2c_vdev_db);
  432. if (IS_ERR(vdev->virtio_cookie)) {
  433. ret = PTR_ERR(vdev->virtio_cookie);
  434. goto kfree;
  435. }
  436. iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
  437. vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
  438. ret = register_virtio_device(&vdev->vdev);
  439. reg_dev = vdev;
  440. if (ret) {
  441. dev_err(_vop_dev(vdev),
  442. "Failed to register vop device %u type %u\n",
  443. offset, type);
  444. goto free_irq;
  445. }
  446. writeq((u64)vdev, &vdev->dc->vdev);
  447. dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
  448. __func__, offset, type, vdev);
  449. return 0;
  450. free_irq:
  451. vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
  452. kfree:
  453. if (reg_dev)
  454. put_device(&vdev->vdev.dev);
  455. else
  456. kfree(vdev);
  457. return ret;
  458. }
  459. /*
  460. * match for a vop device with a specific desc pointer
  461. */
  462. static int vop_match_desc(struct device *dev, void *data)
  463. {
  464. struct virtio_device *_dev = dev_to_virtio(dev);
  465. struct _vop_vdev *vdev = to_vopvdev(_dev);
  466. return vdev->desc == (void __iomem *)data;
  467. }
  468. static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
  469. unsigned int offset,
  470. struct vop_device *vpdev)
  471. {
  472. struct mic_device_ctrl __iomem *dc
  473. = (void __iomem *)d + _vop_aligned_desc_size(d);
  474. struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
  475. if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
  476. return;
  477. dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
  478. virtio_config_changed(&vdev->vdev);
  479. iowrite8(1, &dc->guest_ack);
  480. }
  481. /*
  482. * removes a virtio device if a hot remove event has been
  483. * requested by the host.
  484. */
  485. static int _vop_remove_device(struct mic_device_desc __iomem *d,
  486. unsigned int offset, struct vop_device *vpdev)
  487. {
  488. struct mic_device_ctrl __iomem *dc
  489. = (void __iomem *)d + _vop_aligned_desc_size(d);
  490. struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
  491. u8 status;
  492. int ret = -1;
  493. if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
  494. struct device *dev = get_device(&vdev->vdev.dev);
  495. dev_dbg(&vpdev->dev,
  496. "%s %d config_change %d type %d vdev %p\n",
  497. __func__, __LINE__,
  498. ioread8(&dc->config_change), ioread8(&d->type), vdev);
  499. status = ioread8(&d->status);
  500. reinit_completion(&vdev->reset_done);
  501. unregister_virtio_device(&vdev->vdev);
  502. vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
  503. iowrite8(-1, &dc->h2c_vdev_db);
  504. if (status & VIRTIO_CONFIG_S_DRIVER_OK)
  505. wait_for_completion(&vdev->reset_done);
  506. put_device(dev);
  507. iowrite8(1, &dc->guest_ack);
  508. dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
  509. __func__, __LINE__, ioread8(&dc->guest_ack));
  510. iowrite8(-1, &d->type);
  511. ret = 0;
  512. }
  513. return ret;
  514. }
  515. #define REMOVE_DEVICES true
  516. static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
  517. bool remove, int dnode)
  518. {
  519. s8 type;
  520. unsigned int i;
  521. struct mic_device_desc __iomem *d;
  522. struct mic_device_ctrl __iomem *dc;
  523. struct device *dev;
  524. int ret;
  525. for (i = sizeof(struct mic_bootparam);
  526. i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
  527. d = dp + i;
  528. dc = (void __iomem *)d + _vop_aligned_desc_size(d);
  529. /*
  530. * This read barrier is paired with the corresponding write
  531. * barrier on the host which is inserted before adding or
  532. * removing a virtio device descriptor, by updating the type.
  533. */
  534. rmb();
  535. type = ioread8(&d->type);
  536. /* end of list */
  537. if (type == 0)
  538. break;
  539. if (type == -1)
  540. continue;
  541. /* device already exists */
  542. dev = device_find_child(&vpdev->dev, (void __force *)d,
  543. vop_match_desc);
  544. if (dev) {
  545. if (remove)
  546. iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
  547. &dc->config_change);
  548. put_device(dev);
  549. _vop_handle_config_change(d, i, vpdev);
  550. ret = _vop_remove_device(d, i, vpdev);
  551. if (remove) {
  552. iowrite8(0, &dc->config_change);
  553. iowrite8(0, &dc->guest_ack);
  554. }
  555. continue;
  556. }
  557. /* new device */
  558. dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
  559. __func__, __LINE__, d);
  560. if (!remove)
  561. _vop_add_device(d, i, vpdev, dnode);
  562. }
  563. }
  564. static void vop_scan_devices(struct vop_info *vi,
  565. struct vop_device *vpdev, bool remove)
  566. {
  567. void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
  568. if (!dp)
  569. return;
  570. mutex_lock(&vi->vop_mutex);
  571. _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
  572. mutex_unlock(&vi->vop_mutex);
  573. }
  574. /*
  575. * vop_hotplug_device tries to find changes in the device page.
  576. */
  577. static void vop_hotplug_devices(struct work_struct *work)
  578. {
  579. struct vop_info *vi = container_of(work, struct vop_info,
  580. hotplug_work);
  581. vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
  582. }
  583. /*
  584. * Interrupt handler for hot plug/config changes etc.
  585. */
  586. static irqreturn_t vop_extint_handler(int irq, void *data)
  587. {
  588. struct vop_info *vi = data;
  589. struct mic_bootparam __iomem *bp;
  590. struct vop_device *vpdev = vi->vpdev;
  591. bp = vpdev->hw_ops->get_remote_dp(vpdev);
  592. dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
  593. __func__, __LINE__);
  594. vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
  595. schedule_work(&vi->hotplug_work);
  596. return IRQ_HANDLED;
  597. }
  598. static int vop_driver_probe(struct vop_device *vpdev)
  599. {
  600. struct vop_info *vi;
  601. int rc;
  602. vi = kzalloc(sizeof(*vi), GFP_KERNEL);
  603. if (!vi) {
  604. rc = -ENOMEM;
  605. goto exit;
  606. }
  607. dev_set_drvdata(&vpdev->dev, vi);
  608. vi->vpdev = vpdev;
  609. mutex_init(&vi->vop_mutex);
  610. INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
  611. if (vpdev->dnode) {
  612. rc = vop_host_init(vi);
  613. if (rc < 0)
  614. goto free;
  615. } else {
  616. struct mic_bootparam __iomem *bootparam;
  617. vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
  618. vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
  619. vi->cookie = vpdev->hw_ops->request_irq(vpdev,
  620. vop_extint_handler,
  621. "virtio_config_intr",
  622. vi, vi->h2c_config_db);
  623. if (IS_ERR(vi->cookie)) {
  624. rc = PTR_ERR(vi->cookie);
  625. goto free;
  626. }
  627. bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
  628. iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
  629. }
  630. vop_init_debugfs(vi);
  631. return 0;
  632. free:
  633. kfree(vi);
  634. exit:
  635. return rc;
  636. }
  637. static void vop_driver_remove(struct vop_device *vpdev)
  638. {
  639. struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
  640. if (vpdev->dnode) {
  641. vop_host_uninit(vi);
  642. } else {
  643. struct mic_bootparam __iomem *bootparam =
  644. vpdev->hw_ops->get_remote_dp(vpdev);
  645. if (bootparam)
  646. iowrite8(-1, &bootparam->h2c_config_db);
  647. vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
  648. flush_work(&vi->hotplug_work);
  649. vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
  650. }
  651. vop_exit_debugfs(vi);
  652. kfree(vi);
  653. }
  654. static struct vop_device_id id_table[] = {
  655. { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
  656. { 0 },
  657. };
  658. static struct vop_driver vop_driver = {
  659. .driver.name = KBUILD_MODNAME,
  660. .driver.owner = THIS_MODULE,
  661. .id_table = id_table,
  662. .probe = vop_driver_probe,
  663. .remove = vop_driver_remove,
  664. };
  665. module_vop_driver(vop_driver);
  666. MODULE_DEVICE_TABLE(mbus, id_table);
  667. MODULE_AUTHOR("Intel Corporation");
  668. MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
  669. MODULE_LICENSE("GPL v2");