aia_imsic.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  4. * Copyright (C) 2022 Ventana Micro Systems Inc.
  5. *
  6. * Authors:
  7. * Anup Patel <apatel@ventanamicro.com>
  8. */
  9. #include <linux/atomic.h>
  10. #include <linux/bitmap.h>
  11. #include <linux/irqchip/riscv-imsic.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/math.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/swab.h>
  16. #include <kvm/iodev.h>
  17. #include <asm/csr.h>
  18. #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
  19. struct imsic_mrif_eix {
  20. unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
  21. unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
  22. };
  23. struct imsic_mrif {
  24. struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
  25. unsigned long eithreshold;
  26. unsigned long eidelivery;
  27. };
  28. struct imsic {
  29. struct kvm_io_device iodev;
  30. u32 nr_msis;
  31. u32 nr_eix;
  32. u32 nr_hw_eix;
  33. /*
  34. * At any point in time, the register state is in
  35. * one of the following places:
  36. *
  37. * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
  38. * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
  39. */
  40. /* IMSIC VS-file */
  41. rwlock_t vsfile_lock;
  42. int vsfile_cpu;
  43. int vsfile_hgei;
  44. void __iomem *vsfile_va;
  45. phys_addr_t vsfile_pa;
  46. /* IMSIC SW-file */
  47. struct imsic_mrif *swfile;
  48. phys_addr_t swfile_pa;
  49. raw_spinlock_t swfile_extirq_lock;
  50. };
  51. #define imsic_vs_csr_read(__c) \
  52. ({ \
  53. unsigned long __r; \
  54. csr_write(CSR_VSISELECT, __c); \
  55. __r = csr_read(CSR_VSIREG); \
  56. __r; \
  57. })
  58. #define imsic_read_switchcase(__ireg) \
  59. case __ireg: \
  60. return imsic_vs_csr_read(__ireg);
  61. #define imsic_read_switchcase_2(__ireg) \
  62. imsic_read_switchcase(__ireg + 0) \
  63. imsic_read_switchcase(__ireg + 1)
  64. #define imsic_read_switchcase_4(__ireg) \
  65. imsic_read_switchcase_2(__ireg + 0) \
  66. imsic_read_switchcase_2(__ireg + 2)
  67. #define imsic_read_switchcase_8(__ireg) \
  68. imsic_read_switchcase_4(__ireg + 0) \
  69. imsic_read_switchcase_4(__ireg + 4)
  70. #define imsic_read_switchcase_16(__ireg) \
  71. imsic_read_switchcase_8(__ireg + 0) \
  72. imsic_read_switchcase_8(__ireg + 8)
  73. #define imsic_read_switchcase_32(__ireg) \
  74. imsic_read_switchcase_16(__ireg + 0) \
  75. imsic_read_switchcase_16(__ireg + 16)
  76. #define imsic_read_switchcase_64(__ireg) \
  77. imsic_read_switchcase_32(__ireg + 0) \
  78. imsic_read_switchcase_32(__ireg + 32)
  79. static unsigned long imsic_eix_read(int ireg)
  80. {
  81. switch (ireg) {
  82. imsic_read_switchcase_64(IMSIC_EIP0)
  83. imsic_read_switchcase_64(IMSIC_EIE0)
  84. }
  85. return 0;
  86. }
  87. #define imsic_vs_csr_swap(__c, __v) \
  88. ({ \
  89. unsigned long __r; \
  90. csr_write(CSR_VSISELECT, __c); \
  91. __r = csr_swap(CSR_VSIREG, __v); \
  92. __r; \
  93. })
  94. #define imsic_swap_switchcase(__ireg, __v) \
  95. case __ireg: \
  96. return imsic_vs_csr_swap(__ireg, __v);
  97. #define imsic_swap_switchcase_2(__ireg, __v) \
  98. imsic_swap_switchcase(__ireg + 0, __v) \
  99. imsic_swap_switchcase(__ireg + 1, __v)
  100. #define imsic_swap_switchcase_4(__ireg, __v) \
  101. imsic_swap_switchcase_2(__ireg + 0, __v) \
  102. imsic_swap_switchcase_2(__ireg + 2, __v)
  103. #define imsic_swap_switchcase_8(__ireg, __v) \
  104. imsic_swap_switchcase_4(__ireg + 0, __v) \
  105. imsic_swap_switchcase_4(__ireg + 4, __v)
  106. #define imsic_swap_switchcase_16(__ireg, __v) \
  107. imsic_swap_switchcase_8(__ireg + 0, __v) \
  108. imsic_swap_switchcase_8(__ireg + 8, __v)
  109. #define imsic_swap_switchcase_32(__ireg, __v) \
  110. imsic_swap_switchcase_16(__ireg + 0, __v) \
  111. imsic_swap_switchcase_16(__ireg + 16, __v)
  112. #define imsic_swap_switchcase_64(__ireg, __v) \
  113. imsic_swap_switchcase_32(__ireg + 0, __v) \
  114. imsic_swap_switchcase_32(__ireg + 32, __v)
  115. static unsigned long imsic_eix_swap(int ireg, unsigned long val)
  116. {
  117. switch (ireg) {
  118. imsic_swap_switchcase_64(IMSIC_EIP0, val)
  119. imsic_swap_switchcase_64(IMSIC_EIE0, val)
  120. }
  121. return 0;
  122. }
  123. #define imsic_vs_csr_write(__c, __v) \
  124. do { \
  125. csr_write(CSR_VSISELECT, __c); \
  126. csr_write(CSR_VSIREG, __v); \
  127. } while (0)
  128. #define imsic_write_switchcase(__ireg, __v) \
  129. case __ireg: \
  130. imsic_vs_csr_write(__ireg, __v); \
  131. break;
  132. #define imsic_write_switchcase_2(__ireg, __v) \
  133. imsic_write_switchcase(__ireg + 0, __v) \
  134. imsic_write_switchcase(__ireg + 1, __v)
  135. #define imsic_write_switchcase_4(__ireg, __v) \
  136. imsic_write_switchcase_2(__ireg + 0, __v) \
  137. imsic_write_switchcase_2(__ireg + 2, __v)
  138. #define imsic_write_switchcase_8(__ireg, __v) \
  139. imsic_write_switchcase_4(__ireg + 0, __v) \
  140. imsic_write_switchcase_4(__ireg + 4, __v)
  141. #define imsic_write_switchcase_16(__ireg, __v) \
  142. imsic_write_switchcase_8(__ireg + 0, __v) \
  143. imsic_write_switchcase_8(__ireg + 8, __v)
  144. #define imsic_write_switchcase_32(__ireg, __v) \
  145. imsic_write_switchcase_16(__ireg + 0, __v) \
  146. imsic_write_switchcase_16(__ireg + 16, __v)
  147. #define imsic_write_switchcase_64(__ireg, __v) \
  148. imsic_write_switchcase_32(__ireg + 0, __v) \
  149. imsic_write_switchcase_32(__ireg + 32, __v)
  150. static void imsic_eix_write(int ireg, unsigned long val)
  151. {
  152. switch (ireg) {
  153. imsic_write_switchcase_64(IMSIC_EIP0, val)
  154. imsic_write_switchcase_64(IMSIC_EIE0, val)
  155. }
  156. }
  157. #define imsic_vs_csr_set(__c, __v) \
  158. do { \
  159. csr_write(CSR_VSISELECT, __c); \
  160. csr_set(CSR_VSIREG, __v); \
  161. } while (0)
  162. #define imsic_set_switchcase(__ireg, __v) \
  163. case __ireg: \
  164. imsic_vs_csr_set(__ireg, __v); \
  165. break;
  166. #define imsic_set_switchcase_2(__ireg, __v) \
  167. imsic_set_switchcase(__ireg + 0, __v) \
  168. imsic_set_switchcase(__ireg + 1, __v)
  169. #define imsic_set_switchcase_4(__ireg, __v) \
  170. imsic_set_switchcase_2(__ireg + 0, __v) \
  171. imsic_set_switchcase_2(__ireg + 2, __v)
  172. #define imsic_set_switchcase_8(__ireg, __v) \
  173. imsic_set_switchcase_4(__ireg + 0, __v) \
  174. imsic_set_switchcase_4(__ireg + 4, __v)
  175. #define imsic_set_switchcase_16(__ireg, __v) \
  176. imsic_set_switchcase_8(__ireg + 0, __v) \
  177. imsic_set_switchcase_8(__ireg + 8, __v)
  178. #define imsic_set_switchcase_32(__ireg, __v) \
  179. imsic_set_switchcase_16(__ireg + 0, __v) \
  180. imsic_set_switchcase_16(__ireg + 16, __v)
  181. #define imsic_set_switchcase_64(__ireg, __v) \
  182. imsic_set_switchcase_32(__ireg + 0, __v) \
  183. imsic_set_switchcase_32(__ireg + 32, __v)
  184. static void imsic_eix_set(int ireg, unsigned long val)
  185. {
  186. switch (ireg) {
  187. imsic_set_switchcase_64(IMSIC_EIP0, val)
  188. imsic_set_switchcase_64(IMSIC_EIE0, val)
  189. }
  190. }
  191. static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
  192. unsigned long *ptr,
  193. unsigned long new_val,
  194. unsigned long wr_mask)
  195. {
  196. unsigned long old_val = 0, tmp = 0;
  197. __asm__ __volatile__ (
  198. "0: lr.w.aq %1, %0\n"
  199. " and %2, %1, %3\n"
  200. " or %2, %2, %4\n"
  201. " sc.w.rl %2, %2, %0\n"
  202. " bnez %2, 0b"
  203. : "+A" (*ptr), "+r" (old_val), "+r" (tmp)
  204. : "r" (~wr_mask), "r" (new_val & wr_mask)
  205. : "memory");
  206. return old_val;
  207. }
  208. static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
  209. unsigned long *ptr,
  210. unsigned long val)
  211. {
  212. return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
  213. }
  214. #define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \
  215. imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
  216. #define imsic_mrif_atomic_read(__mrif, __ptr) \
  217. imsic_mrif_atomic_or(__mrif, __ptr, 0)
  218. static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
  219. {
  220. struct imsic_mrif_eix *eix;
  221. u32 i, imin, imax, ei, max_msi;
  222. unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
  223. unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
  224. &mrif->eithreshold);
  225. max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
  226. eithreshold : nr_msis;
  227. for (ei = 0; ei < nr_eix; ei++) {
  228. eix = &mrif->eix[ei];
  229. eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
  230. imsic_mrif_atomic_read(mrif, &eix->eip[0]);
  231. #ifdef CONFIG_32BIT
  232. eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
  233. imsic_mrif_atomic_read(mrif, &eix->eip[1]);
  234. if (!eipend[0] && !eipend[1])
  235. #else
  236. if (!eipend[0])
  237. #endif
  238. continue;
  239. imin = ei * BITS_PER_TYPE(u64);
  240. imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
  241. imin + BITS_PER_TYPE(u64) : max_msi;
  242. for (i = (!imin) ? 1 : imin; i < imax; i++) {
  243. if (test_bit(i - imin, eipend))
  244. return (i << TOPEI_ID_SHIFT) | i;
  245. }
  246. }
  247. return 0;
  248. }
  249. static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
  250. {
  251. u32 num = 0;
  252. switch (isel) {
  253. case IMSIC_EIDELIVERY:
  254. case IMSIC_EITHRESHOLD:
  255. break;
  256. case IMSIC_EIP0 ... IMSIC_EIP63:
  257. num = isel - IMSIC_EIP0;
  258. break;
  259. case IMSIC_EIE0 ... IMSIC_EIE63:
  260. num = isel - IMSIC_EIE0;
  261. break;
  262. default:
  263. return -ENOENT;
  264. }
  265. #ifndef CONFIG_32BIT
  266. if (num & 0x1)
  267. return -EINVAL;
  268. #endif
  269. if ((num / 2) >= nr_eix)
  270. return -EINVAL;
  271. return 0;
  272. }
  273. static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
  274. unsigned long isel, unsigned long *val,
  275. unsigned long new_val, unsigned long wr_mask)
  276. {
  277. bool pend;
  278. struct imsic_mrif_eix *eix;
  279. unsigned long *ei, num, old_val = 0;
  280. switch (isel) {
  281. case IMSIC_EIDELIVERY:
  282. old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
  283. new_val, wr_mask & 0x1);
  284. break;
  285. case IMSIC_EITHRESHOLD:
  286. old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
  287. new_val, wr_mask & (IMSIC_MAX_ID - 1));
  288. break;
  289. case IMSIC_EIP0 ... IMSIC_EIP63:
  290. case IMSIC_EIE0 ... IMSIC_EIE63:
  291. if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
  292. pend = true;
  293. num = isel - IMSIC_EIP0;
  294. } else {
  295. pend = false;
  296. num = isel - IMSIC_EIE0;
  297. }
  298. if ((num / 2) >= nr_eix)
  299. return -EINVAL;
  300. eix = &mrif->eix[num / 2];
  301. #ifndef CONFIG_32BIT
  302. if (num & 0x1)
  303. return -EINVAL;
  304. ei = (pend) ? &eix->eip[0] : &eix->eie[0];
  305. #else
  306. ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
  307. #endif
  308. /* Bit0 of EIP0 or EIE0 is read-only */
  309. if (!num)
  310. wr_mask &= ~BIT(0);
  311. old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
  312. break;
  313. default:
  314. return -ENOENT;
  315. }
  316. if (val)
  317. *val = old_val;
  318. return 0;
  319. }
  320. struct imsic_vsfile_read_data {
  321. int hgei;
  322. u32 nr_eix;
  323. bool clear;
  324. struct imsic_mrif *mrif;
  325. };
  326. static void imsic_vsfile_local_read(void *data)
  327. {
  328. u32 i;
  329. struct imsic_mrif_eix *eix;
  330. struct imsic_vsfile_read_data *idata = data;
  331. struct imsic_mrif *mrif = idata->mrif;
  332. unsigned long new_hstatus, old_hstatus, old_vsiselect;
  333. old_vsiselect = csr_read(CSR_VSISELECT);
  334. old_hstatus = csr_read(CSR_HSTATUS);
  335. new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
  336. new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
  337. csr_write(CSR_HSTATUS, new_hstatus);
  338. /*
  339. * We don't use imsic_mrif_atomic_xyz() functions to store
  340. * values in MRIF because imsic_vsfile_read() is always called
  341. * with pointer to temporary MRIF on stack.
  342. */
  343. if (idata->clear) {
  344. mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
  345. mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
  346. for (i = 0; i < idata->nr_eix; i++) {
  347. eix = &mrif->eix[i];
  348. eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
  349. eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
  350. #ifdef CONFIG_32BIT
  351. eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
  352. eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
  353. #endif
  354. }
  355. } else {
  356. mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
  357. mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
  358. for (i = 0; i < idata->nr_eix; i++) {
  359. eix = &mrif->eix[i];
  360. eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
  361. eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
  362. #ifdef CONFIG_32BIT
  363. eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
  364. eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
  365. #endif
  366. }
  367. }
  368. csr_write(CSR_HSTATUS, old_hstatus);
  369. csr_write(CSR_VSISELECT, old_vsiselect);
  370. }
  371. static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
  372. bool clear, struct imsic_mrif *mrif)
  373. {
  374. struct imsic_vsfile_read_data idata;
  375. /* We can only read clear if we have a IMSIC VS-file */
  376. if (vsfile_cpu < 0 || vsfile_hgei <= 0)
  377. return;
  378. /* We can only read clear on local CPU */
  379. idata.hgei = vsfile_hgei;
  380. idata.nr_eix = nr_eix;
  381. idata.clear = clear;
  382. idata.mrif = mrif;
  383. on_each_cpu_mask(cpumask_of(vsfile_cpu),
  384. imsic_vsfile_local_read, &idata, 1);
  385. }
  386. struct imsic_vsfile_rw_data {
  387. int hgei;
  388. int isel;
  389. bool write;
  390. unsigned long val;
  391. };
  392. static void imsic_vsfile_local_rw(void *data)
  393. {
  394. struct imsic_vsfile_rw_data *idata = data;
  395. unsigned long new_hstatus, old_hstatus, old_vsiselect;
  396. old_vsiselect = csr_read(CSR_VSISELECT);
  397. old_hstatus = csr_read(CSR_HSTATUS);
  398. new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
  399. new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
  400. csr_write(CSR_HSTATUS, new_hstatus);
  401. switch (idata->isel) {
  402. case IMSIC_EIDELIVERY:
  403. if (idata->write)
  404. imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
  405. else
  406. idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
  407. break;
  408. case IMSIC_EITHRESHOLD:
  409. if (idata->write)
  410. imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
  411. else
  412. idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
  413. break;
  414. case IMSIC_EIP0 ... IMSIC_EIP63:
  415. case IMSIC_EIE0 ... IMSIC_EIE63:
  416. #ifndef CONFIG_32BIT
  417. if (idata->isel & 0x1)
  418. break;
  419. #endif
  420. if (idata->write)
  421. imsic_eix_write(idata->isel, idata->val);
  422. else
  423. idata->val = imsic_eix_read(idata->isel);
  424. break;
  425. default:
  426. break;
  427. }
  428. csr_write(CSR_HSTATUS, old_hstatus);
  429. csr_write(CSR_VSISELECT, old_vsiselect);
  430. }
  431. static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
  432. unsigned long isel, bool write,
  433. unsigned long *val)
  434. {
  435. int rc;
  436. struct imsic_vsfile_rw_data rdata;
  437. /* We can only access register if we have a IMSIC VS-file */
  438. if (vsfile_cpu < 0 || vsfile_hgei <= 0)
  439. return -EINVAL;
  440. /* Check IMSIC register iselect */
  441. rc = imsic_mrif_isel_check(nr_eix, isel);
  442. if (rc)
  443. return rc;
  444. /* We can only access register on local CPU */
  445. rdata.hgei = vsfile_hgei;
  446. rdata.isel = isel;
  447. rdata.write = write;
  448. rdata.val = (write) ? *val : 0;
  449. on_each_cpu_mask(cpumask_of(vsfile_cpu),
  450. imsic_vsfile_local_rw, &rdata, 1);
  451. if (!write)
  452. *val = rdata.val;
  453. return 0;
  454. }
  455. static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
  456. {
  457. u32 i;
  458. unsigned long new_hstatus, old_hstatus, old_vsiselect;
  459. /* We can only zero-out if we have a IMSIC VS-file */
  460. if (vsfile_hgei <= 0)
  461. return;
  462. old_vsiselect = csr_read(CSR_VSISELECT);
  463. old_hstatus = csr_read(CSR_HSTATUS);
  464. new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
  465. new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
  466. csr_write(CSR_HSTATUS, new_hstatus);
  467. imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
  468. imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
  469. for (i = 0; i < nr_eix; i++) {
  470. imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
  471. imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
  472. #ifdef CONFIG_32BIT
  473. imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
  474. imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
  475. #endif
  476. }
  477. csr_write(CSR_HSTATUS, old_hstatus);
  478. csr_write(CSR_VSISELECT, old_vsiselect);
  479. }
  480. static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
  481. struct imsic_mrif *mrif)
  482. {
  483. u32 i;
  484. struct imsic_mrif_eix *eix;
  485. unsigned long new_hstatus, old_hstatus, old_vsiselect;
  486. /* We can only update if we have a HW IMSIC context */
  487. if (vsfile_hgei <= 0)
  488. return;
  489. /*
  490. * We don't use imsic_mrif_atomic_xyz() functions to read values
  491. * from MRIF in this function because it is always called with
  492. * pointer to temporary MRIF on stack.
  493. */
  494. old_vsiselect = csr_read(CSR_VSISELECT);
  495. old_hstatus = csr_read(CSR_HSTATUS);
  496. new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
  497. new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
  498. csr_write(CSR_HSTATUS, new_hstatus);
  499. for (i = 0; i < nr_eix; i++) {
  500. eix = &mrif->eix[i];
  501. imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
  502. imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
  503. #ifdef CONFIG_32BIT
  504. imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
  505. imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
  506. #endif
  507. }
  508. imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
  509. imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
  510. csr_write(CSR_HSTATUS, old_hstatus);
  511. csr_write(CSR_VSISELECT, old_vsiselect);
  512. }
  513. static void imsic_vsfile_cleanup(struct imsic *imsic)
  514. {
  515. int old_vsfile_hgei, old_vsfile_cpu;
  516. unsigned long flags;
  517. /*
  518. * We don't use imsic_mrif_atomic_xyz() functions to clear the
  519. * SW-file in this function because it is always called when the
  520. * VCPU is being destroyed.
  521. */
  522. write_lock_irqsave(&imsic->vsfile_lock, flags);
  523. old_vsfile_hgei = imsic->vsfile_hgei;
  524. old_vsfile_cpu = imsic->vsfile_cpu;
  525. imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
  526. imsic->vsfile_va = NULL;
  527. imsic->vsfile_pa = 0;
  528. write_unlock_irqrestore(&imsic->vsfile_lock, flags);
  529. memset(imsic->swfile, 0, sizeof(*imsic->swfile));
  530. if (old_vsfile_cpu >= 0)
  531. kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
  532. }
  533. static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
  534. {
  535. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  536. struct imsic_mrif *mrif = imsic->swfile;
  537. unsigned long flags;
  538. /*
  539. * The critical section is necessary during external interrupt
  540. * updates to avoid the risk of losing interrupts due to potential
  541. * interruptions between reading topei and updating pending status.
  542. */
  543. raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
  544. if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
  545. imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
  546. kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
  547. else
  548. kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
  549. raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
  550. }
  551. static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
  552. struct imsic_mrif *mrif)
  553. {
  554. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  555. /*
  556. * We don't use imsic_mrif_atomic_xyz() functions to read and
  557. * write SW-file and MRIF in this function because it is always
  558. * called when VCPU is not using SW-file and the MRIF points to
  559. * a temporary MRIF on stack.
  560. */
  561. memcpy(mrif, imsic->swfile, sizeof(*mrif));
  562. if (clear) {
  563. memset(imsic->swfile, 0, sizeof(*imsic->swfile));
  564. kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
  565. }
  566. }
  567. static void imsic_swfile_update(struct kvm_vcpu *vcpu,
  568. struct imsic_mrif *mrif)
  569. {
  570. u32 i;
  571. struct imsic_mrif_eix *seix, *eix;
  572. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  573. struct imsic_mrif *smrif = imsic->swfile;
  574. imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
  575. imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
  576. for (i = 0; i < imsic->nr_eix; i++) {
  577. seix = &smrif->eix[i];
  578. eix = &mrif->eix[i];
  579. imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
  580. imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
  581. #ifdef CONFIG_32BIT
  582. imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
  583. imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
  584. #endif
  585. }
  586. imsic_swfile_extirq_update(vcpu);
  587. }
  588. void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
  589. {
  590. unsigned long flags;
  591. struct imsic_mrif tmrif;
  592. int old_vsfile_hgei, old_vsfile_cpu;
  593. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  594. /* Read and clear IMSIC VS-file details */
  595. write_lock_irqsave(&imsic->vsfile_lock, flags);
  596. old_vsfile_hgei = imsic->vsfile_hgei;
  597. old_vsfile_cpu = imsic->vsfile_cpu;
  598. imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
  599. imsic->vsfile_va = NULL;
  600. imsic->vsfile_pa = 0;
  601. write_unlock_irqrestore(&imsic->vsfile_lock, flags);
  602. /* Do nothing, if no IMSIC VS-file to release */
  603. if (old_vsfile_cpu < 0)
  604. return;
  605. /*
  606. * At this point, all interrupt producers are still using
  607. * the old IMSIC VS-file so we first re-direct all interrupt
  608. * producers.
  609. */
  610. /* Purge the G-stage mapping */
  611. kvm_riscv_gstage_iounmap(vcpu->kvm,
  612. vcpu->arch.aia_context.imsic_addr,
  613. IMSIC_MMIO_PAGE_SZ);
  614. /* TODO: Purge the IOMMU mapping ??? */
  615. /*
  616. * At this point, all interrupt producers have been re-directed
  617. * to somewhere else so we move register state from the old IMSIC
  618. * VS-file to the IMSIC SW-file.
  619. */
  620. /* Read and clear register state from old IMSIC VS-file */
  621. memset(&tmrif, 0, sizeof(tmrif));
  622. imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
  623. true, &tmrif);
  624. /* Update register state in IMSIC SW-file */
  625. imsic_swfile_update(vcpu, &tmrif);
  626. /* Free-up old IMSIC VS-file */
  627. kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
  628. }
  629. int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
  630. {
  631. unsigned long flags;
  632. phys_addr_t new_vsfile_pa;
  633. struct imsic_mrif tmrif;
  634. void __iomem *new_vsfile_va;
  635. struct kvm *kvm = vcpu->kvm;
  636. struct kvm_run *run = vcpu->run;
  637. struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
  638. struct imsic *imsic = vaia->imsic_state;
  639. int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
  640. /* Do nothing for emulation mode */
  641. if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
  642. return 1;
  643. /* Read old IMSIC VS-file details */
  644. read_lock_irqsave(&imsic->vsfile_lock, flags);
  645. old_vsfile_hgei = imsic->vsfile_hgei;
  646. old_vsfile_cpu = imsic->vsfile_cpu;
  647. read_unlock_irqrestore(&imsic->vsfile_lock, flags);
  648. /* Do nothing if we are continuing on same CPU */
  649. if (old_vsfile_cpu == vcpu->cpu)
  650. return 1;
  651. /* Allocate new IMSIC VS-file */
  652. ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
  653. &new_vsfile_va, &new_vsfile_pa);
  654. if (ret <= 0) {
  655. /* For HW acceleration mode, we can't continue */
  656. if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
  657. run->fail_entry.hardware_entry_failure_reason =
  658. CSR_HSTATUS;
  659. run->fail_entry.cpu = vcpu->cpu;
  660. run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  661. return 0;
  662. }
  663. /* Release old IMSIC VS-file */
  664. if (old_vsfile_cpu >= 0)
  665. kvm_riscv_vcpu_aia_imsic_release(vcpu);
  666. /* For automatic mode, we continue */
  667. goto done;
  668. }
  669. new_vsfile_hgei = ret;
  670. /*
  671. * At this point, all interrupt producers are still using
  672. * to the old IMSIC VS-file so we first move all interrupt
  673. * producers to the new IMSIC VS-file.
  674. */
  675. /* Zero-out new IMSIC VS-file */
  676. imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
  677. /* Update G-stage mapping for the new IMSIC VS-file */
  678. ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
  679. new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
  680. true, true);
  681. if (ret)
  682. goto fail_free_vsfile_hgei;
  683. /* TODO: Update the IOMMU mapping ??? */
  684. /* Update new IMSIC VS-file details in IMSIC context */
  685. write_lock_irqsave(&imsic->vsfile_lock, flags);
  686. imsic->vsfile_hgei = new_vsfile_hgei;
  687. imsic->vsfile_cpu = vcpu->cpu;
  688. imsic->vsfile_va = new_vsfile_va;
  689. imsic->vsfile_pa = new_vsfile_pa;
  690. write_unlock_irqrestore(&imsic->vsfile_lock, flags);
  691. /*
  692. * At this point, all interrupt producers have been moved
  693. * to the new IMSIC VS-file so we move register state from
  694. * the old IMSIC VS/SW-file to the new IMSIC VS-file.
  695. */
  696. memset(&tmrif, 0, sizeof(tmrif));
  697. if (old_vsfile_cpu >= 0) {
  698. /* Read and clear register state from old IMSIC VS-file */
  699. imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
  700. imsic->nr_hw_eix, true, &tmrif);
  701. /* Free-up old IMSIC VS-file */
  702. kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
  703. } else {
  704. /* Read and clear register state from IMSIC SW-file */
  705. imsic_swfile_read(vcpu, true, &tmrif);
  706. }
  707. /* Restore register state in the new IMSIC VS-file */
  708. imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
  709. done:
  710. /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
  711. vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
  712. if (new_vsfile_hgei > 0)
  713. vcpu->arch.guest_context.hstatus |=
  714. ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
  715. /* Continue run-loop */
  716. return 1;
  717. fail_free_vsfile_hgei:
  718. kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
  719. return ret;
  720. }
  721. int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
  722. unsigned long *val, unsigned long new_val,
  723. unsigned long wr_mask)
  724. {
  725. u32 topei;
  726. struct imsic_mrif_eix *eix;
  727. int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
  728. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  729. if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
  730. /* Read pending and enabled interrupt with highest priority */
  731. topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
  732. imsic->nr_msis);
  733. if (val)
  734. *val = topei;
  735. /* Writes ignore value and clear top pending interrupt */
  736. if (topei && wr_mask) {
  737. topei >>= TOPEI_ID_SHIFT;
  738. if (topei) {
  739. eix = &imsic->swfile->eix[topei /
  740. BITS_PER_TYPE(u64)];
  741. clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
  742. eix->eip);
  743. }
  744. }
  745. } else {
  746. r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
  747. val, new_val, wr_mask);
  748. /* Forward unknown IMSIC register to user-space */
  749. if (r)
  750. rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
  751. }
  752. if (wr_mask)
  753. imsic_swfile_extirq_update(vcpu);
  754. return rc;
  755. }
  756. int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
  757. bool write, unsigned long *val)
  758. {
  759. u32 isel, vcpu_id;
  760. unsigned long flags;
  761. struct imsic *imsic;
  762. struct kvm_vcpu *vcpu;
  763. int rc, vsfile_hgei, vsfile_cpu;
  764. if (!kvm_riscv_aia_initialized(kvm))
  765. return -ENODEV;
  766. vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
  767. vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
  768. if (!vcpu)
  769. return -ENODEV;
  770. isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
  771. imsic = vcpu->arch.aia_context.imsic_state;
  772. read_lock_irqsave(&imsic->vsfile_lock, flags);
  773. rc = 0;
  774. vsfile_hgei = imsic->vsfile_hgei;
  775. vsfile_cpu = imsic->vsfile_cpu;
  776. if (vsfile_cpu < 0) {
  777. if (write) {
  778. rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
  779. isel, NULL, *val, -1UL);
  780. imsic_swfile_extirq_update(vcpu);
  781. } else
  782. rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
  783. isel, val, 0, 0);
  784. }
  785. read_unlock_irqrestore(&imsic->vsfile_lock, flags);
  786. if (!rc && vsfile_cpu >= 0)
  787. rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
  788. isel, write, val);
  789. return rc;
  790. }
  791. int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
  792. {
  793. u32 isel, vcpu_id;
  794. struct imsic *imsic;
  795. struct kvm_vcpu *vcpu;
  796. if (!kvm_riscv_aia_initialized(kvm))
  797. return -ENODEV;
  798. vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
  799. vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
  800. if (!vcpu)
  801. return -ENODEV;
  802. isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
  803. imsic = vcpu->arch.aia_context.imsic_state;
  804. return imsic_mrif_isel_check(imsic->nr_eix, isel);
  805. }
  806. void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
  807. {
  808. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  809. if (!imsic)
  810. return;
  811. kvm_riscv_vcpu_aia_imsic_release(vcpu);
  812. memset(imsic->swfile, 0, sizeof(*imsic->swfile));
  813. }
  814. int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
  815. u32 guest_index, u32 offset, u32 iid)
  816. {
  817. unsigned long flags;
  818. struct imsic_mrif_eix *eix;
  819. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  820. /* We only emulate one IMSIC MMIO page for each Guest VCPU */
  821. if (!imsic || !iid || guest_index ||
  822. (offset != IMSIC_MMIO_SETIPNUM_LE &&
  823. offset != IMSIC_MMIO_SETIPNUM_BE))
  824. return -ENODEV;
  825. iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
  826. if (imsic->nr_msis <= iid)
  827. return -EINVAL;
  828. read_lock_irqsave(&imsic->vsfile_lock, flags);
  829. if (imsic->vsfile_cpu >= 0) {
  830. writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
  831. kvm_vcpu_kick(vcpu);
  832. } else {
  833. eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
  834. set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
  835. imsic_swfile_extirq_update(vcpu);
  836. }
  837. read_unlock_irqrestore(&imsic->vsfile_lock, flags);
  838. return 0;
  839. }
  840. static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  841. gpa_t addr, int len, void *val)
  842. {
  843. if (len != 4 || (addr & 0x3) != 0)
  844. return -EOPNOTSUPP;
  845. *((u32 *)val) = 0;
  846. return 0;
  847. }
  848. static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  849. gpa_t addr, int len, const void *val)
  850. {
  851. struct kvm_msi msi = { 0 };
  852. if (len != 4 || (addr & 0x3) != 0)
  853. return -EOPNOTSUPP;
  854. msi.address_hi = addr >> 32;
  855. msi.address_lo = (u32)addr;
  856. msi.data = *((const u32 *)val);
  857. kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
  858. return 0;
  859. };
  860. static struct kvm_io_device_ops imsic_iodoev_ops = {
  861. .read = imsic_mmio_read,
  862. .write = imsic_mmio_write,
  863. };
  864. int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
  865. {
  866. int ret = 0;
  867. struct imsic *imsic;
  868. struct page *swfile_page;
  869. struct kvm *kvm = vcpu->kvm;
  870. /* Fail if we have zero IDs */
  871. if (!kvm->arch.aia.nr_ids)
  872. return -EINVAL;
  873. /* Allocate IMSIC context */
  874. imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
  875. if (!imsic)
  876. return -ENOMEM;
  877. vcpu->arch.aia_context.imsic_state = imsic;
  878. /* Setup IMSIC context */
  879. imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
  880. rwlock_init(&imsic->vsfile_lock);
  881. imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
  882. imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
  883. imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
  884. /* Setup IMSIC SW-file */
  885. swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
  886. get_order(sizeof(*imsic->swfile)));
  887. if (!swfile_page) {
  888. ret = -ENOMEM;
  889. goto fail_free_imsic;
  890. }
  891. imsic->swfile = page_to_virt(swfile_page);
  892. imsic->swfile_pa = page_to_phys(swfile_page);
  893. raw_spin_lock_init(&imsic->swfile_extirq_lock);
  894. /* Setup IO device */
  895. kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
  896. mutex_lock(&kvm->slots_lock);
  897. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
  898. vcpu->arch.aia_context.imsic_addr,
  899. KVM_DEV_RISCV_IMSIC_SIZE,
  900. &imsic->iodev);
  901. mutex_unlock(&kvm->slots_lock);
  902. if (ret)
  903. goto fail_free_swfile;
  904. return 0;
  905. fail_free_swfile:
  906. free_pages((unsigned long)imsic->swfile,
  907. get_order(sizeof(*imsic->swfile)));
  908. fail_free_imsic:
  909. vcpu->arch.aia_context.imsic_state = NULL;
  910. kfree(imsic);
  911. return ret;
  912. }
  913. void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
  914. {
  915. struct kvm *kvm = vcpu->kvm;
  916. struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
  917. if (!imsic)
  918. return;
  919. imsic_vsfile_cleanup(imsic);
  920. mutex_lock(&kvm->slots_lock);
  921. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
  922. mutex_unlock(&kvm->slots_lock);
  923. free_pages((unsigned long)imsic->swfile,
  924. get_order(sizeof(*imsic->swfile)));
  925. vcpu->arch.aia_context.imsic_state = NULL;
  926. kfree(imsic);
  927. }