mtty.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516
  1. /*
  2. * Mediated virtual PCI serial host device driver
  3. *
  4. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  5. * Author: Neo Jia <cjia@nvidia.com>
  6. * Kirti Wankhede <kwankhede@nvidia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Sample driver that creates mdev device that simulates serial port over PCI
  13. * card.
  14. *
  15. */
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/device.h>
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/poll.h>
  22. #include <linux/slab.h>
  23. #include <linux/cdev.h>
  24. #include <linux/sched.h>
  25. #include <linux/wait.h>
  26. #include <linux/uuid.h>
  27. #include <linux/vfio.h>
  28. #include <linux/iommu.h>
  29. #include <linux/sysfs.h>
  30. #include <linux/ctype.h>
  31. #include <linux/file.h>
  32. #include <linux/mdev.h>
  33. #include <linux/pci.h>
  34. #include <linux/serial.h>
  35. #include <uapi/linux/serial_reg.h>
  36. #include <linux/eventfd.h>
  37. /*
  38. * #defines
  39. */
  40. #define VERSION_STRING "0.1"
  41. #define DRIVER_AUTHOR "NVIDIA Corporation"
  42. #define MTTY_CLASS_NAME "mtty"
  43. #define MTTY_NAME "mtty"
  44. #define MTTY_STRING_LEN 16
  45. #define MTTY_CONFIG_SPACE_SIZE 0xff
  46. #define MTTY_IO_BAR_SIZE 0x8
  47. #define MTTY_MMIO_BAR_SIZE 0x100000
  48. #define STORE_LE16(addr, val) (*(u16 *)addr = val)
  49. #define STORE_LE32(addr, val) (*(u32 *)addr = val)
  50. #define MAX_FIFO_SIZE 16
  51. #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  52. #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
  53. #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  54. #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  55. ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  56. #define MTTY_VFIO_PCI_OFFSET_MASK \
  57. (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  58. #define MAX_MTTYS 24
  59. /*
  60. * Global Structures
  61. */
  62. struct mtty_dev {
  63. dev_t vd_devt;
  64. struct class *vd_class;
  65. struct cdev vd_cdev;
  66. struct idr vd_idr;
  67. struct device dev;
  68. } mtty_dev;
  69. struct mdev_region_info {
  70. u64 start;
  71. u64 phys_start;
  72. u32 size;
  73. u64 vfio_offset;
  74. };
  75. #if defined(DEBUG_REGS)
  76. const char *wr_reg[] = {
  77. "TX",
  78. "IER",
  79. "FCR",
  80. "LCR",
  81. "MCR",
  82. "LSR",
  83. "MSR",
  84. "SCR"
  85. };
  86. const char *rd_reg[] = {
  87. "RX",
  88. "IER",
  89. "IIR",
  90. "LCR",
  91. "MCR",
  92. "LSR",
  93. "MSR",
  94. "SCR"
  95. };
  96. #endif
  97. /* loop back buffer */
  98. struct rxtx {
  99. u8 fifo[MAX_FIFO_SIZE];
  100. u8 head, tail;
  101. u8 count;
  102. };
  103. struct serial_port {
  104. u8 uart_reg[8]; /* 8 registers */
  105. struct rxtx rxtx; /* loop back buffer */
  106. bool dlab;
  107. bool overrun;
  108. u16 divisor;
  109. u8 fcr; /* FIFO control register */
  110. u8 max_fifo_size;
  111. u8 intr_trigger_level; /* interrupt trigger level */
  112. };
  113. /* State of each mdev device */
  114. struct mdev_state {
  115. int irq_fd;
  116. struct eventfd_ctx *intx_evtfd;
  117. struct eventfd_ctx *msi_evtfd;
  118. int irq_index;
  119. u8 *vconfig;
  120. struct mutex ops_lock;
  121. struct mdev_device *mdev;
  122. struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
  123. u32 bar_mask[VFIO_PCI_NUM_REGIONS];
  124. struct list_head next;
  125. struct serial_port s[2];
  126. struct mutex rxtx_lock;
  127. struct vfio_device_info dev_info;
  128. int nr_ports;
  129. };
  130. struct mutex mdev_list_lock;
  131. struct list_head mdev_devices_list;
  132. static const struct file_operations vd_fops = {
  133. .owner = THIS_MODULE,
  134. };
  135. /* function prototypes */
  136. static int mtty_trigger_interrupt(uuid_le uuid);
  137. /* Helper functions */
  138. static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
  139. {
  140. struct mdev_state *mds;
  141. list_for_each_entry(mds, &mdev_devices_list, next) {
  142. if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
  143. return mds;
  144. }
  145. return NULL;
  146. }
  147. void dump_buffer(u8 *buf, uint32_t count)
  148. {
  149. #if defined(DEBUG)
  150. int i;
  151. pr_info("Buffer:\n");
  152. for (i = 0; i < count; i++) {
  153. pr_info("%2x ", *(buf + i));
  154. if ((i + 1) % 16 == 0)
  155. pr_info("\n");
  156. }
  157. #endif
  158. }
  159. static void mtty_create_config_space(struct mdev_state *mdev_state)
  160. {
  161. /* PCI dev ID */
  162. STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
  163. /* Control: I/O+, Mem-, BusMaster- */
  164. STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
  165. /* Status: capabilities list absent */
  166. STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
  167. /* Rev ID */
  168. mdev_state->vconfig[0x8] = 0x10;
  169. /* programming interface class : 16550-compatible serial controller */
  170. mdev_state->vconfig[0x9] = 0x02;
  171. /* Sub class : 00 */
  172. mdev_state->vconfig[0xa] = 0x00;
  173. /* Base class : Simple Communication controllers */
  174. mdev_state->vconfig[0xb] = 0x07;
  175. /* base address registers */
  176. /* BAR0: IO space */
  177. STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
  178. mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
  179. if (mdev_state->nr_ports == 2) {
  180. /* BAR1: IO space */
  181. STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
  182. mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
  183. }
  184. /* Subsystem ID */
  185. STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
  186. mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
  187. mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
  188. /* Vendor specific data */
  189. mdev_state->vconfig[0x40] = 0x23;
  190. mdev_state->vconfig[0x43] = 0x80;
  191. mdev_state->vconfig[0x44] = 0x23;
  192. mdev_state->vconfig[0x48] = 0x23;
  193. mdev_state->vconfig[0x4c] = 0x23;
  194. mdev_state->vconfig[0x60] = 0x50;
  195. mdev_state->vconfig[0x61] = 0x43;
  196. mdev_state->vconfig[0x62] = 0x49;
  197. mdev_state->vconfig[0x63] = 0x20;
  198. mdev_state->vconfig[0x64] = 0x53;
  199. mdev_state->vconfig[0x65] = 0x65;
  200. mdev_state->vconfig[0x66] = 0x72;
  201. mdev_state->vconfig[0x67] = 0x69;
  202. mdev_state->vconfig[0x68] = 0x61;
  203. mdev_state->vconfig[0x69] = 0x6c;
  204. mdev_state->vconfig[0x6a] = 0x2f;
  205. mdev_state->vconfig[0x6b] = 0x55;
  206. mdev_state->vconfig[0x6c] = 0x41;
  207. mdev_state->vconfig[0x6d] = 0x52;
  208. mdev_state->vconfig[0x6e] = 0x54;
  209. }
  210. static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
  211. u8 *buf, u32 count)
  212. {
  213. u32 cfg_addr, bar_mask, bar_index = 0;
  214. switch (offset) {
  215. case 0x04: /* device control */
  216. case 0x06: /* device status */
  217. /* do nothing */
  218. break;
  219. case 0x3c: /* interrupt line */
  220. mdev_state->vconfig[0x3c] = buf[0];
  221. break;
  222. case 0x3d:
  223. /*
  224. * Interrupt Pin is hardwired to INTA.
  225. * This field is write protected by hardware
  226. */
  227. break;
  228. case 0x10: /* BAR0 */
  229. case 0x14: /* BAR1 */
  230. if (offset == 0x10)
  231. bar_index = 0;
  232. else if (offset == 0x14)
  233. bar_index = 1;
  234. if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
  235. STORE_LE32(&mdev_state->vconfig[offset], 0);
  236. break;
  237. }
  238. cfg_addr = *(u32 *)buf;
  239. pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
  240. if (cfg_addr == 0xffffffff) {
  241. bar_mask = mdev_state->bar_mask[bar_index];
  242. cfg_addr = (cfg_addr & bar_mask);
  243. }
  244. cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
  245. STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
  246. break;
  247. case 0x18: /* BAR2 */
  248. case 0x1c: /* BAR3 */
  249. case 0x20: /* BAR4 */
  250. STORE_LE32(&mdev_state->vconfig[offset], 0);
  251. break;
  252. default:
  253. pr_info("PCI config write @0x%x of %d bytes not handled\n",
  254. offset, count);
  255. break;
  256. }
  257. }
  258. static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
  259. u16 offset, u8 *buf, u32 count)
  260. {
  261. u8 data = *buf;
  262. /* Handle data written by guest */
  263. switch (offset) {
  264. case UART_TX:
  265. /* if DLAB set, data is LSB of divisor */
  266. if (mdev_state->s[index].dlab) {
  267. mdev_state->s[index].divisor |= data;
  268. break;
  269. }
  270. mutex_lock(&mdev_state->rxtx_lock);
  271. /* save in TX buffer */
  272. if (mdev_state->s[index].rxtx.count <
  273. mdev_state->s[index].max_fifo_size) {
  274. mdev_state->s[index].rxtx.fifo[
  275. mdev_state->s[index].rxtx.head] = data;
  276. mdev_state->s[index].rxtx.count++;
  277. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
  278. mdev_state->s[index].overrun = false;
  279. /*
  280. * Trigger interrupt if receive data interrupt is
  281. * enabled and fifo reached trigger level
  282. */
  283. if ((mdev_state->s[index].uart_reg[UART_IER] &
  284. UART_IER_RDI) &&
  285. (mdev_state->s[index].rxtx.count ==
  286. mdev_state->s[index].intr_trigger_level)) {
  287. /* trigger interrupt */
  288. #if defined(DEBUG_INTR)
  289. pr_err("Serial port %d: Fifo level trigger\n",
  290. index);
  291. #endif
  292. mtty_trigger_interrupt(
  293. mdev_uuid(mdev_state->mdev));
  294. }
  295. } else {
  296. #if defined(DEBUG_INTR)
  297. pr_err("Serial port %d: Buffer Overflow\n", index);
  298. #endif
  299. mdev_state->s[index].overrun = true;
  300. /*
  301. * Trigger interrupt if receiver line status interrupt
  302. * is enabled
  303. */
  304. if (mdev_state->s[index].uart_reg[UART_IER] &
  305. UART_IER_RLSI)
  306. mtty_trigger_interrupt(
  307. mdev_uuid(mdev_state->mdev));
  308. }
  309. mutex_unlock(&mdev_state->rxtx_lock);
  310. break;
  311. case UART_IER:
  312. /* if DLAB set, data is MSB of divisor */
  313. if (mdev_state->s[index].dlab)
  314. mdev_state->s[index].divisor |= (u16)data << 8;
  315. else {
  316. mdev_state->s[index].uart_reg[offset] = data;
  317. mutex_lock(&mdev_state->rxtx_lock);
  318. if ((data & UART_IER_THRI) &&
  319. (mdev_state->s[index].rxtx.head ==
  320. mdev_state->s[index].rxtx.tail)) {
  321. #if defined(DEBUG_INTR)
  322. pr_err("Serial port %d: IER_THRI write\n",
  323. index);
  324. #endif
  325. mtty_trigger_interrupt(
  326. mdev_uuid(mdev_state->mdev));
  327. }
  328. mutex_unlock(&mdev_state->rxtx_lock);
  329. }
  330. break;
  331. case UART_FCR:
  332. mdev_state->s[index].fcr = data;
  333. mutex_lock(&mdev_state->rxtx_lock);
  334. if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
  335. /* clear loop back FIFO */
  336. mdev_state->s[index].rxtx.count = 0;
  337. mdev_state->s[index].rxtx.head = 0;
  338. mdev_state->s[index].rxtx.tail = 0;
  339. }
  340. mutex_unlock(&mdev_state->rxtx_lock);
  341. switch (data & UART_FCR_TRIGGER_MASK) {
  342. case UART_FCR_TRIGGER_1:
  343. mdev_state->s[index].intr_trigger_level = 1;
  344. break;
  345. case UART_FCR_TRIGGER_4:
  346. mdev_state->s[index].intr_trigger_level = 4;
  347. break;
  348. case UART_FCR_TRIGGER_8:
  349. mdev_state->s[index].intr_trigger_level = 8;
  350. break;
  351. case UART_FCR_TRIGGER_14:
  352. mdev_state->s[index].intr_trigger_level = 14;
  353. break;
  354. }
  355. /*
  356. * Set trigger level to 1 otherwise or implement timer with
  357. * timeout of 4 characters and on expiring that timer set
  358. * Recevice data timeout in IIR register
  359. */
  360. mdev_state->s[index].intr_trigger_level = 1;
  361. if (data & UART_FCR_ENABLE_FIFO)
  362. mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
  363. else {
  364. mdev_state->s[index].max_fifo_size = 1;
  365. mdev_state->s[index].intr_trigger_level = 1;
  366. }
  367. break;
  368. case UART_LCR:
  369. if (data & UART_LCR_DLAB) {
  370. mdev_state->s[index].dlab = true;
  371. mdev_state->s[index].divisor = 0;
  372. } else
  373. mdev_state->s[index].dlab = false;
  374. mdev_state->s[index].uart_reg[offset] = data;
  375. break;
  376. case UART_MCR:
  377. mdev_state->s[index].uart_reg[offset] = data;
  378. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  379. (data & UART_MCR_OUT2)) {
  380. #if defined(DEBUG_INTR)
  381. pr_err("Serial port %d: MCR_OUT2 write\n", index);
  382. #endif
  383. mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
  384. }
  385. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  386. (data & (UART_MCR_RTS | UART_MCR_DTR))) {
  387. #if defined(DEBUG_INTR)
  388. pr_err("Serial port %d: MCR RTS/DTR write\n", index);
  389. #endif
  390. mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
  391. }
  392. break;
  393. case UART_LSR:
  394. case UART_MSR:
  395. /* do nothing */
  396. break;
  397. case UART_SCR:
  398. mdev_state->s[index].uart_reg[offset] = data;
  399. break;
  400. default:
  401. break;
  402. }
  403. }
  404. static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
  405. u16 offset, u8 *buf, u32 count)
  406. {
  407. /* Handle read requests by guest */
  408. switch (offset) {
  409. case UART_RX:
  410. /* if DLAB set, data is LSB of divisor */
  411. if (mdev_state->s[index].dlab) {
  412. *buf = (u8)mdev_state->s[index].divisor;
  413. break;
  414. }
  415. mutex_lock(&mdev_state->rxtx_lock);
  416. /* return data in tx buffer */
  417. if (mdev_state->s[index].rxtx.head !=
  418. mdev_state->s[index].rxtx.tail) {
  419. *buf = mdev_state->s[index].rxtx.fifo[
  420. mdev_state->s[index].rxtx.tail];
  421. mdev_state->s[index].rxtx.count--;
  422. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
  423. }
  424. if (mdev_state->s[index].rxtx.head ==
  425. mdev_state->s[index].rxtx.tail) {
  426. /*
  427. * Trigger interrupt if tx buffer empty interrupt is
  428. * enabled and fifo is empty
  429. */
  430. #if defined(DEBUG_INTR)
  431. pr_err("Serial port %d: Buffer Empty\n", index);
  432. #endif
  433. if (mdev_state->s[index].uart_reg[UART_IER] &
  434. UART_IER_THRI)
  435. mtty_trigger_interrupt(
  436. mdev_uuid(mdev_state->mdev));
  437. }
  438. mutex_unlock(&mdev_state->rxtx_lock);
  439. break;
  440. case UART_IER:
  441. if (mdev_state->s[index].dlab) {
  442. *buf = (u8)(mdev_state->s[index].divisor >> 8);
  443. break;
  444. }
  445. *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
  446. break;
  447. case UART_IIR:
  448. {
  449. u8 ier = mdev_state->s[index].uart_reg[UART_IER];
  450. *buf = 0;
  451. mutex_lock(&mdev_state->rxtx_lock);
  452. /* Interrupt priority 1: Parity, overrun, framing or break */
  453. if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
  454. *buf |= UART_IIR_RLSI;
  455. /* Interrupt priority 2: Fifo trigger level reached */
  456. if ((ier & UART_IER_RDI) &&
  457. (mdev_state->s[index].rxtx.count >=
  458. mdev_state->s[index].intr_trigger_level))
  459. *buf |= UART_IIR_RDI;
  460. /* Interrupt priotiry 3: transmitter holding register empty */
  461. if ((ier & UART_IER_THRI) &&
  462. (mdev_state->s[index].rxtx.head ==
  463. mdev_state->s[index].rxtx.tail))
  464. *buf |= UART_IIR_THRI;
  465. /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
  466. if ((ier & UART_IER_MSI) &&
  467. (mdev_state->s[index].uart_reg[UART_MCR] &
  468. (UART_MCR_RTS | UART_MCR_DTR)))
  469. *buf |= UART_IIR_MSI;
  470. /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
  471. if (*buf == 0)
  472. *buf = UART_IIR_NO_INT;
  473. /* set bit 6 & 7 to be 16550 compatible */
  474. *buf |= 0xC0;
  475. mutex_unlock(&mdev_state->rxtx_lock);
  476. }
  477. break;
  478. case UART_LCR:
  479. case UART_MCR:
  480. *buf = mdev_state->s[index].uart_reg[offset];
  481. break;
  482. case UART_LSR:
  483. {
  484. u8 lsr = 0;
  485. mutex_lock(&mdev_state->rxtx_lock);
  486. /* atleast one char in FIFO */
  487. if (mdev_state->s[index].rxtx.head !=
  488. mdev_state->s[index].rxtx.tail)
  489. lsr |= UART_LSR_DR;
  490. /* if FIFO overrun */
  491. if (mdev_state->s[index].overrun)
  492. lsr |= UART_LSR_OE;
  493. /* transmit FIFO empty and tramsitter empty */
  494. if (mdev_state->s[index].rxtx.head ==
  495. mdev_state->s[index].rxtx.tail)
  496. lsr |= UART_LSR_TEMT | UART_LSR_THRE;
  497. mutex_unlock(&mdev_state->rxtx_lock);
  498. *buf = lsr;
  499. break;
  500. }
  501. case UART_MSR:
  502. *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
  503. mutex_lock(&mdev_state->rxtx_lock);
  504. /* if AFE is 1 and FIFO have space, set CTS bit */
  505. if (mdev_state->s[index].uart_reg[UART_MCR] &
  506. UART_MCR_AFE) {
  507. if (mdev_state->s[index].rxtx.count <
  508. mdev_state->s[index].max_fifo_size)
  509. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  510. } else
  511. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  512. mutex_unlock(&mdev_state->rxtx_lock);
  513. break;
  514. case UART_SCR:
  515. *buf = mdev_state->s[index].uart_reg[offset];
  516. break;
  517. default:
  518. break;
  519. }
  520. }
  521. static void mdev_read_base(struct mdev_state *mdev_state)
  522. {
  523. int index, pos;
  524. u32 start_lo, start_hi;
  525. u32 mem_type;
  526. pos = PCI_BASE_ADDRESS_0;
  527. for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
  528. if (!mdev_state->region_info[index].size)
  529. continue;
  530. start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
  531. PCI_BASE_ADDRESS_MEM_MASK;
  532. mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
  533. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  534. switch (mem_type) {
  535. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  536. start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
  537. pos += 4;
  538. break;
  539. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  540. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  541. /* 1M mem BAR treated as 32-bit BAR */
  542. default:
  543. /* mem unknown type treated as 32-bit BAR */
  544. start_hi = 0;
  545. break;
  546. }
  547. pos += 4;
  548. mdev_state->region_info[index].start = ((u64)start_hi << 32) |
  549. start_lo;
  550. }
  551. }
  552. static ssize_t mdev_access(struct mdev_device *mdev, u8 *buf, size_t count,
  553. loff_t pos, bool is_write)
  554. {
  555. struct mdev_state *mdev_state;
  556. unsigned int index;
  557. loff_t offset;
  558. int ret = 0;
  559. if (!mdev || !buf)
  560. return -EINVAL;
  561. mdev_state = mdev_get_drvdata(mdev);
  562. if (!mdev_state) {
  563. pr_err("%s mdev_state not found\n", __func__);
  564. return -EINVAL;
  565. }
  566. mutex_lock(&mdev_state->ops_lock);
  567. index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
  568. offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
  569. switch (index) {
  570. case VFIO_PCI_CONFIG_REGION_INDEX:
  571. #if defined(DEBUG)
  572. pr_info("%s: PCI config space %s at offset 0x%llx\n",
  573. __func__, is_write ? "write" : "read", offset);
  574. #endif
  575. if (is_write) {
  576. dump_buffer(buf, count);
  577. handle_pci_cfg_write(mdev_state, offset, buf, count);
  578. } else {
  579. memcpy(buf, (mdev_state->vconfig + offset), count);
  580. dump_buffer(buf, count);
  581. }
  582. break;
  583. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  584. if (!mdev_state->region_info[index].start)
  585. mdev_read_base(mdev_state);
  586. if (is_write) {
  587. dump_buffer(buf, count);
  588. #if defined(DEBUG_REGS)
  589. pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
  590. __func__, index, offset, wr_reg[offset],
  591. *buf, mdev_state->s[index].dlab);
  592. #endif
  593. handle_bar_write(index, mdev_state, offset, buf, count);
  594. } else {
  595. handle_bar_read(index, mdev_state, offset, buf, count);
  596. dump_buffer(buf, count);
  597. #if defined(DEBUG_REGS)
  598. pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
  599. __func__, index, offset, rd_reg[offset],
  600. *buf, mdev_state->s[index].dlab);
  601. #endif
  602. }
  603. break;
  604. default:
  605. ret = -1;
  606. goto accessfailed;
  607. }
  608. ret = count;
  609. accessfailed:
  610. mutex_unlock(&mdev_state->ops_lock);
  611. return ret;
  612. }
  613. int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
  614. {
  615. struct mdev_state *mdev_state;
  616. char name[MTTY_STRING_LEN];
  617. int nr_ports = 0, i;
  618. if (!mdev)
  619. return -EINVAL;
  620. for (i = 0; i < 2; i++) {
  621. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  622. dev_driver_string(mdev_parent_dev(mdev)), i + 1);
  623. if (!strcmp(kobj->name, name)) {
  624. nr_ports = i + 1;
  625. break;
  626. }
  627. }
  628. if (!nr_ports)
  629. return -EINVAL;
  630. mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
  631. if (mdev_state == NULL)
  632. return -ENOMEM;
  633. mdev_state->nr_ports = nr_ports;
  634. mdev_state->irq_index = -1;
  635. mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
  636. mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
  637. mutex_init(&mdev_state->rxtx_lock);
  638. mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
  639. if (mdev_state->vconfig == NULL) {
  640. kfree(mdev_state);
  641. return -ENOMEM;
  642. }
  643. mutex_init(&mdev_state->ops_lock);
  644. mdev_state->mdev = mdev;
  645. mdev_set_drvdata(mdev, mdev_state);
  646. mtty_create_config_space(mdev_state);
  647. mutex_lock(&mdev_list_lock);
  648. list_add(&mdev_state->next, &mdev_devices_list);
  649. mutex_unlock(&mdev_list_lock);
  650. return 0;
  651. }
  652. int mtty_remove(struct mdev_device *mdev)
  653. {
  654. struct mdev_state *mds, *tmp_mds;
  655. struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
  656. int ret = -EINVAL;
  657. mutex_lock(&mdev_list_lock);
  658. list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
  659. if (mdev_state == mds) {
  660. list_del(&mdev_state->next);
  661. mdev_set_drvdata(mdev, NULL);
  662. kfree(mdev_state->vconfig);
  663. kfree(mdev_state);
  664. ret = 0;
  665. break;
  666. }
  667. }
  668. mutex_unlock(&mdev_list_lock);
  669. return ret;
  670. }
  671. int mtty_reset(struct mdev_device *mdev)
  672. {
  673. struct mdev_state *mdev_state;
  674. if (!mdev)
  675. return -EINVAL;
  676. mdev_state = mdev_get_drvdata(mdev);
  677. if (!mdev_state)
  678. return -EINVAL;
  679. pr_info("%s: called\n", __func__);
  680. return 0;
  681. }
  682. ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
  683. loff_t *ppos)
  684. {
  685. unsigned int done = 0;
  686. int ret;
  687. while (count) {
  688. size_t filled;
  689. if (count >= 4 && !(*ppos % 4)) {
  690. u32 val;
  691. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  692. *ppos, false);
  693. if (ret <= 0)
  694. goto read_err;
  695. if (copy_to_user(buf, &val, sizeof(val)))
  696. goto read_err;
  697. filled = 4;
  698. } else if (count >= 2 && !(*ppos % 2)) {
  699. u16 val;
  700. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  701. *ppos, false);
  702. if (ret <= 0)
  703. goto read_err;
  704. if (copy_to_user(buf, &val, sizeof(val)))
  705. goto read_err;
  706. filled = 2;
  707. } else {
  708. u8 val;
  709. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  710. *ppos, false);
  711. if (ret <= 0)
  712. goto read_err;
  713. if (copy_to_user(buf, &val, sizeof(val)))
  714. goto read_err;
  715. filled = 1;
  716. }
  717. count -= filled;
  718. done += filled;
  719. *ppos += filled;
  720. buf += filled;
  721. }
  722. return done;
  723. read_err:
  724. return -EFAULT;
  725. }
  726. ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
  727. size_t count, loff_t *ppos)
  728. {
  729. unsigned int done = 0;
  730. int ret;
  731. while (count) {
  732. size_t filled;
  733. if (count >= 4 && !(*ppos % 4)) {
  734. u32 val;
  735. if (copy_from_user(&val, buf, sizeof(val)))
  736. goto write_err;
  737. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  738. *ppos, true);
  739. if (ret <= 0)
  740. goto write_err;
  741. filled = 4;
  742. } else if (count >= 2 && !(*ppos % 2)) {
  743. u16 val;
  744. if (copy_from_user(&val, buf, sizeof(val)))
  745. goto write_err;
  746. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  747. *ppos, true);
  748. if (ret <= 0)
  749. goto write_err;
  750. filled = 2;
  751. } else {
  752. u8 val;
  753. if (copy_from_user(&val, buf, sizeof(val)))
  754. goto write_err;
  755. ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
  756. *ppos, true);
  757. if (ret <= 0)
  758. goto write_err;
  759. filled = 1;
  760. }
  761. count -= filled;
  762. done += filled;
  763. *ppos += filled;
  764. buf += filled;
  765. }
  766. return done;
  767. write_err:
  768. return -EFAULT;
  769. }
  770. static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
  771. unsigned int index, unsigned int start,
  772. unsigned int count, void *data)
  773. {
  774. int ret = 0;
  775. struct mdev_state *mdev_state;
  776. if (!mdev)
  777. return -EINVAL;
  778. mdev_state = mdev_get_drvdata(mdev);
  779. if (!mdev_state)
  780. return -EINVAL;
  781. mutex_lock(&mdev_state->ops_lock);
  782. switch (index) {
  783. case VFIO_PCI_INTX_IRQ_INDEX:
  784. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  785. case VFIO_IRQ_SET_ACTION_MASK:
  786. case VFIO_IRQ_SET_ACTION_UNMASK:
  787. break;
  788. case VFIO_IRQ_SET_ACTION_TRIGGER:
  789. {
  790. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  791. pr_info("%s: disable INTx\n", __func__);
  792. if (mdev_state->intx_evtfd)
  793. eventfd_ctx_put(mdev_state->intx_evtfd);
  794. break;
  795. }
  796. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  797. int fd = *(int *)data;
  798. if (fd > 0) {
  799. struct eventfd_ctx *evt;
  800. evt = eventfd_ctx_fdget(fd);
  801. if (IS_ERR(evt)) {
  802. ret = PTR_ERR(evt);
  803. break;
  804. }
  805. mdev_state->intx_evtfd = evt;
  806. mdev_state->irq_fd = fd;
  807. mdev_state->irq_index = index;
  808. break;
  809. }
  810. }
  811. break;
  812. }
  813. }
  814. break;
  815. case VFIO_PCI_MSI_IRQ_INDEX:
  816. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  817. case VFIO_IRQ_SET_ACTION_MASK:
  818. case VFIO_IRQ_SET_ACTION_UNMASK:
  819. break;
  820. case VFIO_IRQ_SET_ACTION_TRIGGER:
  821. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  822. if (mdev_state->msi_evtfd)
  823. eventfd_ctx_put(mdev_state->msi_evtfd);
  824. pr_info("%s: disable MSI\n", __func__);
  825. mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
  826. break;
  827. }
  828. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  829. int fd = *(int *)data;
  830. struct eventfd_ctx *evt;
  831. if (fd <= 0)
  832. break;
  833. if (mdev_state->msi_evtfd)
  834. break;
  835. evt = eventfd_ctx_fdget(fd);
  836. if (IS_ERR(evt)) {
  837. ret = PTR_ERR(evt);
  838. break;
  839. }
  840. mdev_state->msi_evtfd = evt;
  841. mdev_state->irq_fd = fd;
  842. mdev_state->irq_index = index;
  843. }
  844. break;
  845. }
  846. break;
  847. case VFIO_PCI_MSIX_IRQ_INDEX:
  848. pr_info("%s: MSIX_IRQ\n", __func__);
  849. break;
  850. case VFIO_PCI_ERR_IRQ_INDEX:
  851. pr_info("%s: ERR_IRQ\n", __func__);
  852. break;
  853. case VFIO_PCI_REQ_IRQ_INDEX:
  854. pr_info("%s: REQ_IRQ\n", __func__);
  855. break;
  856. }
  857. mutex_unlock(&mdev_state->ops_lock);
  858. return ret;
  859. }
  860. static int mtty_trigger_interrupt(uuid_le uuid)
  861. {
  862. int ret = -1;
  863. struct mdev_state *mdev_state;
  864. mdev_state = find_mdev_state_by_uuid(uuid);
  865. if (!mdev_state) {
  866. pr_info("%s: mdev not found\n", __func__);
  867. return -EINVAL;
  868. }
  869. if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
  870. (!mdev_state->msi_evtfd))
  871. return -EINVAL;
  872. else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
  873. (!mdev_state->intx_evtfd)) {
  874. pr_info("%s: Intr eventfd not found\n", __func__);
  875. return -EINVAL;
  876. }
  877. if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
  878. ret = eventfd_signal(mdev_state->msi_evtfd, 1);
  879. else
  880. ret = eventfd_signal(mdev_state->intx_evtfd, 1);
  881. #if defined(DEBUG_INTR)
  882. pr_info("Intx triggered\n");
  883. #endif
  884. if (ret != 1)
  885. pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
  886. return ret;
  887. }
  888. int mtty_get_region_info(struct mdev_device *mdev,
  889. struct vfio_region_info *region_info,
  890. u16 *cap_type_id, void **cap_type)
  891. {
  892. unsigned int size = 0;
  893. struct mdev_state *mdev_state;
  894. u32 bar_index;
  895. if (!mdev)
  896. return -EINVAL;
  897. mdev_state = mdev_get_drvdata(mdev);
  898. if (!mdev_state)
  899. return -EINVAL;
  900. bar_index = region_info->index;
  901. if (bar_index >= VFIO_PCI_NUM_REGIONS)
  902. return -EINVAL;
  903. mutex_lock(&mdev_state->ops_lock);
  904. switch (bar_index) {
  905. case VFIO_PCI_CONFIG_REGION_INDEX:
  906. size = MTTY_CONFIG_SPACE_SIZE;
  907. break;
  908. case VFIO_PCI_BAR0_REGION_INDEX:
  909. size = MTTY_IO_BAR_SIZE;
  910. break;
  911. case VFIO_PCI_BAR1_REGION_INDEX:
  912. if (mdev_state->nr_ports == 2)
  913. size = MTTY_IO_BAR_SIZE;
  914. break;
  915. default:
  916. size = 0;
  917. break;
  918. }
  919. mdev_state->region_info[bar_index].size = size;
  920. mdev_state->region_info[bar_index].vfio_offset =
  921. MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  922. region_info->size = size;
  923. region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  924. region_info->flags = VFIO_REGION_INFO_FLAG_READ |
  925. VFIO_REGION_INFO_FLAG_WRITE;
  926. mutex_unlock(&mdev_state->ops_lock);
  927. return 0;
  928. }
  929. int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
  930. {
  931. switch (irq_info->index) {
  932. case VFIO_PCI_INTX_IRQ_INDEX:
  933. case VFIO_PCI_MSI_IRQ_INDEX:
  934. case VFIO_PCI_REQ_IRQ_INDEX:
  935. break;
  936. default:
  937. return -EINVAL;
  938. }
  939. irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
  940. irq_info->count = 1;
  941. if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
  942. irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
  943. VFIO_IRQ_INFO_AUTOMASKED);
  944. else
  945. irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
  946. return 0;
  947. }
  948. int mtty_get_device_info(struct mdev_device *mdev,
  949. struct vfio_device_info *dev_info)
  950. {
  951. dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
  952. dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
  953. dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
  954. return 0;
  955. }
  956. static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
  957. unsigned long arg)
  958. {
  959. int ret = 0;
  960. unsigned long minsz;
  961. struct mdev_state *mdev_state;
  962. if (!mdev)
  963. return -EINVAL;
  964. mdev_state = mdev_get_drvdata(mdev);
  965. if (!mdev_state)
  966. return -ENODEV;
  967. switch (cmd) {
  968. case VFIO_DEVICE_GET_INFO:
  969. {
  970. struct vfio_device_info info;
  971. minsz = offsetofend(struct vfio_device_info, num_irqs);
  972. if (copy_from_user(&info, (void __user *)arg, minsz))
  973. return -EFAULT;
  974. if (info.argsz < minsz)
  975. return -EINVAL;
  976. ret = mtty_get_device_info(mdev, &info);
  977. if (ret)
  978. return ret;
  979. memcpy(&mdev_state->dev_info, &info, sizeof(info));
  980. if (copy_to_user((void __user *)arg, &info, minsz))
  981. return -EFAULT;
  982. return 0;
  983. }
  984. case VFIO_DEVICE_GET_REGION_INFO:
  985. {
  986. struct vfio_region_info info;
  987. u16 cap_type_id = 0;
  988. void *cap_type = NULL;
  989. minsz = offsetofend(struct vfio_region_info, offset);
  990. if (copy_from_user(&info, (void __user *)arg, minsz))
  991. return -EFAULT;
  992. if (info.argsz < minsz)
  993. return -EINVAL;
  994. ret = mtty_get_region_info(mdev, &info, &cap_type_id,
  995. &cap_type);
  996. if (ret)
  997. return ret;
  998. if (copy_to_user((void __user *)arg, &info, minsz))
  999. return -EFAULT;
  1000. return 0;
  1001. }
  1002. case VFIO_DEVICE_GET_IRQ_INFO:
  1003. {
  1004. struct vfio_irq_info info;
  1005. minsz = offsetofend(struct vfio_irq_info, count);
  1006. if (copy_from_user(&info, (void __user *)arg, minsz))
  1007. return -EFAULT;
  1008. if ((info.argsz < minsz) ||
  1009. (info.index >= mdev_state->dev_info.num_irqs))
  1010. return -EINVAL;
  1011. ret = mtty_get_irq_info(mdev, &info);
  1012. if (ret)
  1013. return ret;
  1014. if (copy_to_user((void __user *)arg, &info, minsz))
  1015. return -EFAULT;
  1016. return 0;
  1017. }
  1018. case VFIO_DEVICE_SET_IRQS:
  1019. {
  1020. struct vfio_irq_set hdr;
  1021. u8 *data = NULL, *ptr = NULL;
  1022. size_t data_size = 0;
  1023. minsz = offsetofend(struct vfio_irq_set, count);
  1024. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  1025. return -EFAULT;
  1026. ret = vfio_set_irqs_validate_and_prepare(&hdr,
  1027. mdev_state->dev_info.num_irqs,
  1028. VFIO_PCI_NUM_IRQS,
  1029. &data_size);
  1030. if (ret)
  1031. return ret;
  1032. if (data_size) {
  1033. ptr = data = memdup_user((void __user *)(arg + minsz),
  1034. data_size);
  1035. if (IS_ERR(data))
  1036. return PTR_ERR(data);
  1037. }
  1038. ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
  1039. hdr.count, data);
  1040. kfree(ptr);
  1041. return ret;
  1042. }
  1043. case VFIO_DEVICE_RESET:
  1044. return mtty_reset(mdev);
  1045. }
  1046. return -ENOTTY;
  1047. }
  1048. int mtty_open(struct mdev_device *mdev)
  1049. {
  1050. pr_info("%s\n", __func__);
  1051. return 0;
  1052. }
  1053. void mtty_close(struct mdev_device *mdev)
  1054. {
  1055. pr_info("%s\n", __func__);
  1056. }
  1057. static ssize_t
  1058. sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
  1059. char *buf)
  1060. {
  1061. return sprintf(buf, "This is phy device\n");
  1062. }
  1063. static DEVICE_ATTR_RO(sample_mtty_dev);
  1064. static struct attribute *mtty_dev_attrs[] = {
  1065. &dev_attr_sample_mtty_dev.attr,
  1066. NULL,
  1067. };
  1068. static const struct attribute_group mtty_dev_group = {
  1069. .name = "mtty_dev",
  1070. .attrs = mtty_dev_attrs,
  1071. };
  1072. const struct attribute_group *mtty_dev_groups[] = {
  1073. &mtty_dev_group,
  1074. NULL,
  1075. };
  1076. static ssize_t
  1077. sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
  1078. char *buf)
  1079. {
  1080. if (mdev_from_dev(dev))
  1081. return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
  1082. return sprintf(buf, "\n");
  1083. }
  1084. static DEVICE_ATTR_RO(sample_mdev_dev);
  1085. static struct attribute *mdev_dev_attrs[] = {
  1086. &dev_attr_sample_mdev_dev.attr,
  1087. NULL,
  1088. };
  1089. static const struct attribute_group mdev_dev_group = {
  1090. .name = "vendor",
  1091. .attrs = mdev_dev_attrs,
  1092. };
  1093. const struct attribute_group *mdev_dev_groups[] = {
  1094. &mdev_dev_group,
  1095. NULL,
  1096. };
  1097. static ssize_t
  1098. name_show(struct kobject *kobj, struct device *dev, char *buf)
  1099. {
  1100. char name[MTTY_STRING_LEN];
  1101. int i;
  1102. const char *name_str[2] = {"Single port serial", "Dual port serial"};
  1103. for (i = 0; i < 2; i++) {
  1104. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1105. dev_driver_string(dev), i + 1);
  1106. if (!strcmp(kobj->name, name))
  1107. return sprintf(buf, "%s\n", name_str[i]);
  1108. }
  1109. return -EINVAL;
  1110. }
  1111. MDEV_TYPE_ATTR_RO(name);
  1112. static ssize_t
  1113. available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
  1114. {
  1115. char name[MTTY_STRING_LEN];
  1116. int i;
  1117. struct mdev_state *mds;
  1118. int ports = 0, used = 0;
  1119. for (i = 0; i < 2; i++) {
  1120. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1121. dev_driver_string(dev), i + 1);
  1122. if (!strcmp(kobj->name, name)) {
  1123. ports = i + 1;
  1124. break;
  1125. }
  1126. }
  1127. if (!ports)
  1128. return -EINVAL;
  1129. list_for_each_entry(mds, &mdev_devices_list, next)
  1130. used += mds->nr_ports;
  1131. return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
  1132. }
  1133. MDEV_TYPE_ATTR_RO(available_instances);
  1134. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  1135. char *buf)
  1136. {
  1137. return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
  1138. }
  1139. MDEV_TYPE_ATTR_RO(device_api);
  1140. static struct attribute *mdev_types_attrs[] = {
  1141. &mdev_type_attr_name.attr,
  1142. &mdev_type_attr_device_api.attr,
  1143. &mdev_type_attr_available_instances.attr,
  1144. NULL,
  1145. };
  1146. static struct attribute_group mdev_type_group1 = {
  1147. .name = "1",
  1148. .attrs = mdev_types_attrs,
  1149. };
  1150. static struct attribute_group mdev_type_group2 = {
  1151. .name = "2",
  1152. .attrs = mdev_types_attrs,
  1153. };
  1154. struct attribute_group *mdev_type_groups[] = {
  1155. &mdev_type_group1,
  1156. &mdev_type_group2,
  1157. NULL,
  1158. };
  1159. static const struct mdev_parent_ops mdev_fops = {
  1160. .owner = THIS_MODULE,
  1161. .dev_attr_groups = mtty_dev_groups,
  1162. .mdev_attr_groups = mdev_dev_groups,
  1163. .supported_type_groups = mdev_type_groups,
  1164. .create = mtty_create,
  1165. .remove = mtty_remove,
  1166. .open = mtty_open,
  1167. .release = mtty_close,
  1168. .read = mtty_read,
  1169. .write = mtty_write,
  1170. .ioctl = mtty_ioctl,
  1171. };
  1172. static void mtty_device_release(struct device *dev)
  1173. {
  1174. dev_dbg(dev, "mtty: released\n");
  1175. }
  1176. static int __init mtty_dev_init(void)
  1177. {
  1178. int ret = 0;
  1179. pr_info("mtty_dev: %s\n", __func__);
  1180. memset(&mtty_dev, 0, sizeof(mtty_dev));
  1181. idr_init(&mtty_dev.vd_idr);
  1182. ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
  1183. if (ret < 0) {
  1184. pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
  1185. return ret;
  1186. }
  1187. cdev_init(&mtty_dev.vd_cdev, &vd_fops);
  1188. cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
  1189. pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
  1190. mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
  1191. if (IS_ERR(mtty_dev.vd_class)) {
  1192. pr_err("Error: failed to register mtty_dev class\n");
  1193. ret = PTR_ERR(mtty_dev.vd_class);
  1194. goto failed1;
  1195. }
  1196. mtty_dev.dev.class = mtty_dev.vd_class;
  1197. mtty_dev.dev.release = mtty_device_release;
  1198. dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
  1199. ret = device_register(&mtty_dev.dev);
  1200. if (ret)
  1201. goto failed2;
  1202. ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
  1203. if (ret)
  1204. goto failed3;
  1205. mutex_init(&mdev_list_lock);
  1206. INIT_LIST_HEAD(&mdev_devices_list);
  1207. goto all_done;
  1208. failed3:
  1209. device_unregister(&mtty_dev.dev);
  1210. failed2:
  1211. class_destroy(mtty_dev.vd_class);
  1212. failed1:
  1213. cdev_del(&mtty_dev.vd_cdev);
  1214. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1215. all_done:
  1216. return ret;
  1217. }
  1218. static void __exit mtty_dev_exit(void)
  1219. {
  1220. mtty_dev.dev.bus = NULL;
  1221. mdev_unregister_device(&mtty_dev.dev);
  1222. device_unregister(&mtty_dev.dev);
  1223. idr_destroy(&mtty_dev.vd_idr);
  1224. cdev_del(&mtty_dev.vd_cdev);
  1225. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1226. class_destroy(mtty_dev.vd_class);
  1227. mtty_dev.vd_class = NULL;
  1228. pr_info("mtty_dev: Unloaded!\n");
  1229. }
  1230. module_init(mtty_dev_init)
  1231. module_exit(mtty_dev_exit)
  1232. MODULE_LICENSE("GPL v2");
  1233. MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
  1234. MODULE_VERSION(VERSION_STRING);
  1235. MODULE_AUTHOR(DRIVER_AUTHOR);