interrupt.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * handling kvm guest interrupts
  4. *
  5. * Copyright IBM Corp. 2008, 2020
  6. *
  7. * Author(s): Carsten Otte <cotte@de.ibm.com>
  8. */
  9. #define KMSG_COMPONENT "kvm-s390"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/interrupt.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/hrtimer.h>
  14. #include <linux/mmu_context.h>
  15. #include <linux/nospec.h>
  16. #include <linux/signal.h>
  17. #include <linux/slab.h>
  18. #include <linux/bitmap.h>
  19. #include <linux/vmalloc.h>
  20. #include <asm/access-regs.h>
  21. #include <asm/asm-offsets.h>
  22. #include <asm/dis.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/sclp.h>
  25. #include <asm/isc.h>
  26. #include <asm/gmap.h>
  27. #include <asm/nmi.h>
  28. #include <asm/airq.h>
  29. #include <asm/tpi.h>
  30. #include "kvm-s390.h"
  31. #include "gaccess.h"
  32. #include "trace-s390.h"
  33. #include "pci.h"
  34. #define PFAULT_INIT 0x0600
  35. #define PFAULT_DONE 0x0680
  36. #define VIRTIO_PARAM 0x0d00
  37. static struct kvm_s390_gib *gib;
  38. /* handle external calls via sigp interpretation facility */
  39. static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
  40. {
  41. int c, scn;
  42. if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
  43. return 0;
  44. BUG_ON(!kvm_s390_use_sca_entries());
  45. read_lock(&vcpu->kvm->arch.sca_lock);
  46. if (vcpu->kvm->arch.use_esca) {
  47. struct esca_block *sca = vcpu->kvm->arch.sca;
  48. union esca_sigp_ctrl sigp_ctrl =
  49. sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  50. c = sigp_ctrl.c;
  51. scn = sigp_ctrl.scn;
  52. } else {
  53. struct bsca_block *sca = vcpu->kvm->arch.sca;
  54. union bsca_sigp_ctrl sigp_ctrl =
  55. sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  56. c = sigp_ctrl.c;
  57. scn = sigp_ctrl.scn;
  58. }
  59. read_unlock(&vcpu->kvm->arch.sca_lock);
  60. if (src_id)
  61. *src_id = scn;
  62. return c;
  63. }
  64. static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
  65. {
  66. int expect, rc;
  67. BUG_ON(!kvm_s390_use_sca_entries());
  68. read_lock(&vcpu->kvm->arch.sca_lock);
  69. if (vcpu->kvm->arch.use_esca) {
  70. struct esca_block *sca = vcpu->kvm->arch.sca;
  71. union esca_sigp_ctrl *sigp_ctrl =
  72. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  73. union esca_sigp_ctrl new_val = {0}, old_val;
  74. old_val = READ_ONCE(*sigp_ctrl);
  75. new_val.scn = src_id;
  76. new_val.c = 1;
  77. old_val.c = 0;
  78. expect = old_val.value;
  79. rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
  80. } else {
  81. struct bsca_block *sca = vcpu->kvm->arch.sca;
  82. union bsca_sigp_ctrl *sigp_ctrl =
  83. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  84. union bsca_sigp_ctrl new_val = {0}, old_val;
  85. old_val = READ_ONCE(*sigp_ctrl);
  86. new_val.scn = src_id;
  87. new_val.c = 1;
  88. old_val.c = 0;
  89. expect = old_val.value;
  90. rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
  91. }
  92. read_unlock(&vcpu->kvm->arch.sca_lock);
  93. if (rc != expect) {
  94. /* another external call is pending */
  95. return -EBUSY;
  96. }
  97. kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
  98. return 0;
  99. }
  100. static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
  101. {
  102. int rc, expect;
  103. if (!kvm_s390_use_sca_entries())
  104. return;
  105. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
  106. read_lock(&vcpu->kvm->arch.sca_lock);
  107. if (vcpu->kvm->arch.use_esca) {
  108. struct esca_block *sca = vcpu->kvm->arch.sca;
  109. union esca_sigp_ctrl *sigp_ctrl =
  110. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  111. union esca_sigp_ctrl old;
  112. old = READ_ONCE(*sigp_ctrl);
  113. expect = old.value;
  114. rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
  115. } else {
  116. struct bsca_block *sca = vcpu->kvm->arch.sca;
  117. union bsca_sigp_ctrl *sigp_ctrl =
  118. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  119. union bsca_sigp_ctrl old;
  120. old = READ_ONCE(*sigp_ctrl);
  121. expect = old.value;
  122. rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
  123. }
  124. read_unlock(&vcpu->kvm->arch.sca_lock);
  125. WARN_ON(rc != expect); /* cannot clear? */
  126. }
  127. int psw_extint_disabled(struct kvm_vcpu *vcpu)
  128. {
  129. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
  130. }
  131. static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
  132. {
  133. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
  134. }
  135. static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
  136. {
  137. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
  138. }
  139. static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
  140. {
  141. return psw_extint_disabled(vcpu) &&
  142. psw_ioint_disabled(vcpu) &&
  143. psw_mchk_disabled(vcpu);
  144. }
  145. static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
  146. {
  147. if (psw_extint_disabled(vcpu) ||
  148. !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
  149. return 0;
  150. if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
  151. /* No timer interrupts when single stepping */
  152. return 0;
  153. return 1;
  154. }
  155. static int ckc_irq_pending(struct kvm_vcpu *vcpu)
  156. {
  157. const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
  158. const u64 ckc = vcpu->arch.sie_block->ckc;
  159. if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
  160. if ((s64)ckc >= (s64)now)
  161. return 0;
  162. } else if (ckc >= now) {
  163. return 0;
  164. }
  165. return ckc_interrupts_enabled(vcpu);
  166. }
  167. static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
  168. {
  169. return !psw_extint_disabled(vcpu) &&
  170. (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
  171. }
  172. static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
  173. {
  174. if (!cpu_timer_interrupts_enabled(vcpu))
  175. return 0;
  176. return kvm_s390_get_cpu_timer(vcpu) >> 63;
  177. }
  178. static uint64_t isc_to_isc_bits(int isc)
  179. {
  180. return (0x80 >> isc) << 24;
  181. }
  182. static inline u32 isc_to_int_word(u8 isc)
  183. {
  184. return ((u32)isc << 27) | 0x80000000;
  185. }
  186. static inline u8 int_word_to_isc(u32 int_word)
  187. {
  188. return (int_word & 0x38000000) >> 27;
  189. }
  190. /*
  191. * To use atomic bitmap functions, we have to provide a bitmap address
  192. * that is u64 aligned. However, the ipm might be u32 aligned.
  193. * Therefore, we logically start the bitmap at the very beginning of the
  194. * struct and fixup the bit number.
  195. */
  196. #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
  197. /**
  198. * gisa_set_iam - change the GISA interruption alert mask
  199. *
  200. * @gisa: gisa to operate on
  201. * @iam: new IAM value to use
  202. *
  203. * Change the IAM atomically with the next alert address and the IPM
  204. * of the GISA if the GISA is not part of the GIB alert list. All three
  205. * fields are located in the first long word of the GISA.
  206. *
  207. * Returns: 0 on success
  208. * -EBUSY in case the gisa is part of the alert list
  209. */
  210. static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
  211. {
  212. u64 word, _word;
  213. do {
  214. word = READ_ONCE(gisa->u64.word[0]);
  215. if ((u64)gisa != word >> 32)
  216. return -EBUSY;
  217. _word = (word & ~0xffUL) | iam;
  218. } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
  219. return 0;
  220. }
  221. /**
  222. * gisa_clear_ipm - clear the GISA interruption pending mask
  223. *
  224. * @gisa: gisa to operate on
  225. *
  226. * Clear the IPM atomically with the next alert address and the IAM
  227. * of the GISA unconditionally. All three fields are located in the
  228. * first long word of the GISA.
  229. */
  230. static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
  231. {
  232. u64 word, _word;
  233. do {
  234. word = READ_ONCE(gisa->u64.word[0]);
  235. _word = word & ~(0xffUL << 24);
  236. } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
  237. }
  238. /**
  239. * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
  240. *
  241. * @gi: gisa interrupt struct to work on
  242. *
  243. * Atomically restores the interruption alert mask if none of the
  244. * relevant ISCs are pending and return the IPM.
  245. *
  246. * Returns: the relevant pending ISCs
  247. */
  248. static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
  249. {
  250. u8 pending_mask, alert_mask;
  251. u64 word, _word;
  252. do {
  253. word = READ_ONCE(gi->origin->u64.word[0]);
  254. alert_mask = READ_ONCE(gi->alert.mask);
  255. pending_mask = (u8)(word >> 24) & alert_mask;
  256. if (pending_mask)
  257. return pending_mask;
  258. _word = (word & ~0xffUL) | alert_mask;
  259. } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
  260. return 0;
  261. }
  262. static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
  263. {
  264. set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
  265. }
  266. static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
  267. {
  268. return READ_ONCE(gisa->ipm);
  269. }
  270. static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
  271. {
  272. return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
  273. }
  274. static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
  275. {
  276. unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
  277. vcpu->arch.local_int.pending_irqs;
  278. pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
  279. return pending;
  280. }
  281. static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
  282. {
  283. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  284. unsigned long pending_mask;
  285. pending_mask = pending_irqs_no_gisa(vcpu);
  286. if (gi->origin)
  287. pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
  288. return pending_mask;
  289. }
  290. static inline int isc_to_irq_type(unsigned long isc)
  291. {
  292. return IRQ_PEND_IO_ISC_0 - isc;
  293. }
  294. static inline int irq_type_to_isc(unsigned long irq_type)
  295. {
  296. return IRQ_PEND_IO_ISC_0 - irq_type;
  297. }
  298. static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
  299. unsigned long active_mask)
  300. {
  301. int i;
  302. for (i = 0; i <= MAX_ISC; i++)
  303. if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
  304. active_mask &= ~(1UL << (isc_to_irq_type(i)));
  305. return active_mask;
  306. }
  307. static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
  308. {
  309. unsigned long active_mask;
  310. active_mask = pending_irqs(vcpu);
  311. if (!active_mask)
  312. return 0;
  313. if (psw_extint_disabled(vcpu))
  314. active_mask &= ~IRQ_PEND_EXT_MASK;
  315. if (psw_ioint_disabled(vcpu))
  316. active_mask &= ~IRQ_PEND_IO_MASK;
  317. else
  318. active_mask = disable_iscs(vcpu, active_mask);
  319. if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
  320. __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
  321. if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
  322. __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
  323. if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
  324. __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
  325. if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
  326. __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
  327. if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
  328. __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
  329. __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
  330. }
  331. if (psw_mchk_disabled(vcpu))
  332. active_mask &= ~IRQ_PEND_MCHK_MASK;
  333. /* PV guest cpus can have a single interruption injected at a time. */
  334. if (kvm_s390_pv_cpu_get_handle(vcpu) &&
  335. vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
  336. active_mask &= ~(IRQ_PEND_EXT_II_MASK |
  337. IRQ_PEND_IO_MASK |
  338. IRQ_PEND_MCHK_MASK);
  339. /*
  340. * Check both floating and local interrupt's cr14 because
  341. * bit IRQ_PEND_MCHK_REP could be set in both cases.
  342. */
  343. if (!(vcpu->arch.sie_block->gcr[14] &
  344. (vcpu->kvm->arch.float_int.mchk.cr14 |
  345. vcpu->arch.local_int.irq.mchk.cr14)))
  346. __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
  347. /*
  348. * STOP irqs will never be actively delivered. They are triggered via
  349. * intercept requests and cleared when the stop intercept is performed.
  350. */
  351. __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
  352. return active_mask;
  353. }
  354. static void __set_cpu_idle(struct kvm_vcpu *vcpu)
  355. {
  356. kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
  357. set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
  358. }
  359. static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
  360. {
  361. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
  362. clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
  363. }
  364. static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
  365. {
  366. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
  367. CPUSTAT_STOP_INT);
  368. vcpu->arch.sie_block->lctl = 0x0000;
  369. vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
  370. if (guestdbg_enabled(vcpu)) {
  371. vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
  372. LCTL_CR10 | LCTL_CR11);
  373. vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
  374. }
  375. }
  376. static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
  377. {
  378. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
  379. return;
  380. if (psw_ioint_disabled(vcpu))
  381. kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
  382. else
  383. vcpu->arch.sie_block->lctl |= LCTL_CR6;
  384. }
  385. static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
  386. {
  387. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
  388. return;
  389. if (psw_extint_disabled(vcpu))
  390. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  391. else
  392. vcpu->arch.sie_block->lctl |= LCTL_CR0;
  393. }
  394. static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
  395. {
  396. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
  397. return;
  398. if (psw_mchk_disabled(vcpu))
  399. vcpu->arch.sie_block->ictl |= ICTL_LPSW;
  400. else
  401. vcpu->arch.sie_block->lctl |= LCTL_CR14;
  402. }
  403. static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
  404. {
  405. if (kvm_s390_is_stop_irq_pending(vcpu))
  406. kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
  407. }
  408. /* Set interception request for non-deliverable interrupts */
  409. static void set_intercept_indicators(struct kvm_vcpu *vcpu)
  410. {
  411. set_intercept_indicators_io(vcpu);
  412. set_intercept_indicators_ext(vcpu);
  413. set_intercept_indicators_mchk(vcpu);
  414. set_intercept_indicators_stop(vcpu);
  415. }
  416. static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
  417. {
  418. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  419. int rc = 0;
  420. vcpu->stat.deliver_cputm++;
  421. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
  422. 0, 0);
  423. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  424. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  425. vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
  426. } else {
  427. rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
  428. (u16 *)__LC_EXT_INT_CODE);
  429. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  430. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  431. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  432. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  433. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  434. }
  435. clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  436. return rc ? -EFAULT : 0;
  437. }
  438. static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
  439. {
  440. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  441. int rc = 0;
  442. vcpu->stat.deliver_ckc++;
  443. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
  444. 0, 0);
  445. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  446. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  447. vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
  448. } else {
  449. rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
  450. (u16 __user *)__LC_EXT_INT_CODE);
  451. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  452. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  453. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  454. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  455. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  456. }
  457. clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  458. return rc ? -EFAULT : 0;
  459. }
  460. static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
  461. {
  462. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  463. struct kvm_s390_ext_info ext;
  464. int rc;
  465. spin_lock(&li->lock);
  466. ext = li->irq.ext;
  467. clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
  468. li->irq.ext.ext_params2 = 0;
  469. spin_unlock(&li->lock);
  470. VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
  471. ext.ext_params2);
  472. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  473. KVM_S390_INT_PFAULT_INIT,
  474. 0, ext.ext_params2);
  475. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
  476. rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
  477. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  478. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  479. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  480. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  481. rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
  482. return rc ? -EFAULT : 0;
  483. }
  484. static int __write_machine_check(struct kvm_vcpu *vcpu,
  485. struct kvm_s390_mchk_info *mchk)
  486. {
  487. unsigned long ext_sa_addr;
  488. unsigned long lc;
  489. freg_t fprs[NUM_FPRS];
  490. union mci mci;
  491. int rc;
  492. /*
  493. * All other possible payload for a machine check (e.g. the register
  494. * contents in the save area) will be handled by the ultravisor, as
  495. * the hypervisor does not not have the needed information for
  496. * protected guests.
  497. */
  498. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  499. vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
  500. vcpu->arch.sie_block->mcic = mchk->mcic;
  501. vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
  502. vcpu->arch.sie_block->edc = mchk->ext_damage_code;
  503. return 0;
  504. }
  505. mci.val = mchk->mcic;
  506. /* take care of lazy register loading */
  507. kvm_s390_fpu_store(vcpu->run);
  508. save_access_regs(vcpu->run->s.regs.acrs);
  509. if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
  510. save_gs_cb(current->thread.gs_cb);
  511. /* Extended save area */
  512. rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
  513. sizeof(unsigned long));
  514. /* Only bits 0 through 63-LC are used for address formation */
  515. lc = ext_sa_addr & MCESA_LC_MASK;
  516. if (test_kvm_facility(vcpu->kvm, 133)) {
  517. switch (lc) {
  518. case 0:
  519. case 10:
  520. ext_sa_addr &= ~0x3ffUL;
  521. break;
  522. case 11:
  523. ext_sa_addr &= ~0x7ffUL;
  524. break;
  525. case 12:
  526. ext_sa_addr &= ~0xfffUL;
  527. break;
  528. default:
  529. ext_sa_addr = 0;
  530. break;
  531. }
  532. } else {
  533. ext_sa_addr &= ~0x3ffUL;
  534. }
  535. if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
  536. if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
  537. 512))
  538. mci.vr = 0;
  539. } else {
  540. mci.vr = 0;
  541. }
  542. if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
  543. && (lc == 11 || lc == 12)) {
  544. if (write_guest_abs(vcpu, ext_sa_addr + 1024,
  545. &vcpu->run->s.regs.gscb, 32))
  546. mci.gs = 0;
  547. } else {
  548. mci.gs = 0;
  549. }
  550. /* General interruption information */
  551. rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
  552. rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
  553. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  554. rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
  555. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  556. rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
  557. /* Register-save areas */
  558. if (cpu_has_vx()) {
  559. convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
  560. rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
  561. } else {
  562. rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
  563. vcpu->run->s.regs.fprs, 128);
  564. }
  565. rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
  566. vcpu->run->s.regs.gprs, 128);
  567. rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
  568. (u32 __user *) __LC_FP_CREG_SAVE_AREA);
  569. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
  570. (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
  571. rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
  572. (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
  573. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
  574. (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
  575. rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
  576. &vcpu->run->s.regs.acrs, 64);
  577. rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
  578. &vcpu->arch.sie_block->gcr, 128);
  579. /* Extended interruption information */
  580. rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
  581. (u32 __user *) __LC_EXT_DAMAGE_CODE);
  582. rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
  583. (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
  584. rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
  585. sizeof(mchk->fixed_logout));
  586. return rc ? -EFAULT : 0;
  587. }
  588. static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
  589. {
  590. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  591. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  592. struct kvm_s390_mchk_info mchk = {};
  593. int deliver = 0;
  594. int rc = 0;
  595. spin_lock(&fi->lock);
  596. spin_lock(&li->lock);
  597. if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
  598. test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
  599. /*
  600. * If there was an exigent machine check pending, then any
  601. * repressible machine checks that might have been pending
  602. * are indicated along with it, so always clear bits for
  603. * repressible and exigent interrupts
  604. */
  605. mchk = li->irq.mchk;
  606. clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
  607. clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
  608. memset(&li->irq.mchk, 0, sizeof(mchk));
  609. deliver = 1;
  610. }
  611. /*
  612. * We indicate floating repressible conditions along with
  613. * other pending conditions. Channel Report Pending and Channel
  614. * Subsystem damage are the only two and are indicated by
  615. * bits in mcic and masked in cr14.
  616. */
  617. if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
  618. mchk.mcic |= fi->mchk.mcic;
  619. mchk.cr14 |= fi->mchk.cr14;
  620. memset(&fi->mchk, 0, sizeof(mchk));
  621. deliver = 1;
  622. }
  623. spin_unlock(&li->lock);
  624. spin_unlock(&fi->lock);
  625. if (deliver) {
  626. VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
  627. mchk.mcic);
  628. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  629. KVM_S390_MCHK,
  630. mchk.cr14, mchk.mcic);
  631. vcpu->stat.deliver_machine_check++;
  632. rc = __write_machine_check(vcpu, &mchk);
  633. }
  634. return rc;
  635. }
  636. static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
  637. {
  638. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  639. int rc = 0;
  640. VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
  641. vcpu->stat.deliver_restart_signal++;
  642. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
  643. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  644. vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
  645. } else {
  646. rc = write_guest_lc(vcpu,
  647. offsetof(struct lowcore, restart_old_psw),
  648. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  649. rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
  650. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  651. }
  652. clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  653. return rc ? -EFAULT : 0;
  654. }
  655. static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
  656. {
  657. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  658. struct kvm_s390_prefix_info prefix;
  659. spin_lock(&li->lock);
  660. prefix = li->irq.prefix;
  661. li->irq.prefix.address = 0;
  662. clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
  663. spin_unlock(&li->lock);
  664. vcpu->stat.deliver_prefix_signal++;
  665. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  666. KVM_S390_SIGP_SET_PREFIX,
  667. prefix.address, 0);
  668. kvm_s390_set_prefix(vcpu, prefix.address);
  669. return 0;
  670. }
  671. static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
  672. {
  673. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  674. int rc;
  675. int cpu_addr;
  676. spin_lock(&li->lock);
  677. cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
  678. clear_bit(cpu_addr, li->sigp_emerg_pending);
  679. if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
  680. clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
  681. spin_unlock(&li->lock);
  682. VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
  683. vcpu->stat.deliver_emergency_signal++;
  684. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
  685. cpu_addr, 0);
  686. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  687. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  688. vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
  689. vcpu->arch.sie_block->extcpuaddr = cpu_addr;
  690. return 0;
  691. }
  692. rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
  693. (u16 *)__LC_EXT_INT_CODE);
  694. rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
  695. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  696. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  697. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  698. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  699. return rc ? -EFAULT : 0;
  700. }
  701. static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
  702. {
  703. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  704. struct kvm_s390_extcall_info extcall;
  705. int rc;
  706. spin_lock(&li->lock);
  707. extcall = li->irq.extcall;
  708. li->irq.extcall.code = 0;
  709. clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
  710. spin_unlock(&li->lock);
  711. VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
  712. vcpu->stat.deliver_external_call++;
  713. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  714. KVM_S390_INT_EXTERNAL_CALL,
  715. extcall.code, 0);
  716. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  717. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  718. vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
  719. vcpu->arch.sie_block->extcpuaddr = extcall.code;
  720. return 0;
  721. }
  722. rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
  723. (u16 *)__LC_EXT_INT_CODE);
  724. rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
  725. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  726. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  727. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
  728. sizeof(psw_t));
  729. return rc ? -EFAULT : 0;
  730. }
  731. static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
  732. {
  733. switch (code) {
  734. case PGM_SPECIFICATION:
  735. vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
  736. break;
  737. case PGM_OPERAND:
  738. vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
  739. break;
  740. default:
  741. return -EINVAL;
  742. }
  743. return 0;
  744. }
  745. static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
  746. {
  747. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  748. struct kvm_s390_pgm_info pgm_info;
  749. int rc = 0, nullifying = false;
  750. u16 ilen;
  751. spin_lock(&li->lock);
  752. pgm_info = li->irq.pgm;
  753. clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
  754. memset(&li->irq.pgm, 0, sizeof(pgm_info));
  755. spin_unlock(&li->lock);
  756. ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
  757. VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
  758. pgm_info.code, ilen);
  759. vcpu->stat.deliver_program++;
  760. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
  761. pgm_info.code, 0);
  762. /* PER is handled by the ultravisor */
  763. if (kvm_s390_pv_cpu_is_protected(vcpu))
  764. return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
  765. switch (pgm_info.code & ~PGM_PER) {
  766. case PGM_AFX_TRANSLATION:
  767. case PGM_ASX_TRANSLATION:
  768. case PGM_EX_TRANSLATION:
  769. case PGM_LFX_TRANSLATION:
  770. case PGM_LSTE_SEQUENCE:
  771. case PGM_LSX_TRANSLATION:
  772. case PGM_LX_TRANSLATION:
  773. case PGM_PRIMARY_AUTHORITY:
  774. case PGM_SECONDARY_AUTHORITY:
  775. nullifying = true;
  776. fallthrough;
  777. case PGM_SPACE_SWITCH:
  778. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  779. (u64 *)__LC_TRANS_EXC_CODE);
  780. break;
  781. case PGM_ALEN_TRANSLATION:
  782. case PGM_ALE_SEQUENCE:
  783. case PGM_ASTE_INSTANCE:
  784. case PGM_ASTE_SEQUENCE:
  785. case PGM_ASTE_VALIDITY:
  786. case PGM_EXTENDED_AUTHORITY:
  787. rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
  788. (u8 *)__LC_EXC_ACCESS_ID);
  789. nullifying = true;
  790. break;
  791. case PGM_ASCE_TYPE:
  792. case PGM_PAGE_TRANSLATION:
  793. case PGM_REGION_FIRST_TRANS:
  794. case PGM_REGION_SECOND_TRANS:
  795. case PGM_REGION_THIRD_TRANS:
  796. case PGM_SEGMENT_TRANSLATION:
  797. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  798. (u64 *)__LC_TRANS_EXC_CODE);
  799. rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
  800. (u8 *)__LC_EXC_ACCESS_ID);
  801. rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
  802. (u8 *)__LC_OP_ACCESS_ID);
  803. nullifying = true;
  804. break;
  805. case PGM_MONITOR:
  806. rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
  807. (u16 *)__LC_MON_CLASS_NR);
  808. rc |= put_guest_lc(vcpu, pgm_info.mon_code,
  809. (u64 *)__LC_MON_CODE);
  810. break;
  811. case PGM_VECTOR_PROCESSING:
  812. case PGM_DATA:
  813. rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
  814. (u32 *)__LC_DATA_EXC_CODE);
  815. break;
  816. case PGM_PROTECTION:
  817. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  818. (u64 *)__LC_TRANS_EXC_CODE);
  819. rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
  820. (u8 *)__LC_EXC_ACCESS_ID);
  821. break;
  822. case PGM_STACK_FULL:
  823. case PGM_STACK_EMPTY:
  824. case PGM_STACK_SPECIFICATION:
  825. case PGM_STACK_TYPE:
  826. case PGM_STACK_OPERATION:
  827. case PGM_TRACE_TABEL:
  828. case PGM_CRYPTO_OPERATION:
  829. nullifying = true;
  830. break;
  831. }
  832. if (pgm_info.code & PGM_PER) {
  833. rc |= put_guest_lc(vcpu, pgm_info.per_code,
  834. (u8 *) __LC_PER_CODE);
  835. rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
  836. (u8 *)__LC_PER_ATMID);
  837. rc |= put_guest_lc(vcpu, pgm_info.per_address,
  838. (u64 *) __LC_PER_ADDRESS);
  839. rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
  840. (u8 *) __LC_PER_ACCESS_ID);
  841. }
  842. if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
  843. kvm_s390_rewind_psw(vcpu, ilen);
  844. /* bit 1+2 of the target are the ilc, so we can directly use ilen */
  845. rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
  846. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
  847. (u64 *) __LC_PGM_LAST_BREAK);
  848. rc |= put_guest_lc(vcpu, pgm_info.code,
  849. (u16 *)__LC_PGM_INT_CODE);
  850. rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
  851. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  852. rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
  853. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  854. return rc ? -EFAULT : 0;
  855. }
  856. #define SCCB_MASK 0xFFFFFFF8
  857. #define SCCB_EVENT_PENDING 0x3
  858. static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
  859. {
  860. int rc;
  861. if (kvm_s390_pv_cpu_get_handle(vcpu)) {
  862. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  863. vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
  864. vcpu->arch.sie_block->eiparams = parm;
  865. return 0;
  866. }
  867. rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
  868. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  869. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  870. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  871. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  872. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  873. rc |= put_guest_lc(vcpu, parm,
  874. (u32 *)__LC_EXT_PARAMS);
  875. return rc ? -EFAULT : 0;
  876. }
  877. static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
  878. {
  879. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  880. struct kvm_s390_ext_info ext;
  881. spin_lock(&fi->lock);
  882. if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
  883. !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
  884. spin_unlock(&fi->lock);
  885. return 0;
  886. }
  887. ext = fi->srv_signal;
  888. memset(&fi->srv_signal, 0, sizeof(ext));
  889. clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  890. clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  891. if (kvm_s390_pv_cpu_is_protected(vcpu))
  892. set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
  893. spin_unlock(&fi->lock);
  894. VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
  895. ext.ext_params);
  896. vcpu->stat.deliver_service_signal++;
  897. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
  898. ext.ext_params, 0);
  899. return write_sclp(vcpu, ext.ext_params);
  900. }
  901. static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
  902. {
  903. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  904. struct kvm_s390_ext_info ext;
  905. spin_lock(&fi->lock);
  906. if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
  907. spin_unlock(&fi->lock);
  908. return 0;
  909. }
  910. ext = fi->srv_signal;
  911. /* only clear the event bits */
  912. fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
  913. clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  914. spin_unlock(&fi->lock);
  915. VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
  916. vcpu->stat.deliver_service_signal++;
  917. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
  918. ext.ext_params, 0);
  919. return write_sclp(vcpu, ext.ext_params & SCCB_EVENT_PENDING);
  920. }
  921. static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
  922. {
  923. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  924. struct kvm_s390_interrupt_info *inti;
  925. int rc = 0;
  926. spin_lock(&fi->lock);
  927. inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
  928. struct kvm_s390_interrupt_info,
  929. list);
  930. if (inti) {
  931. list_del(&inti->list);
  932. fi->counters[FIRQ_CNTR_PFAULT] -= 1;
  933. }
  934. if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
  935. clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
  936. spin_unlock(&fi->lock);
  937. if (inti) {
  938. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  939. KVM_S390_INT_PFAULT_DONE, 0,
  940. inti->ext.ext_params2);
  941. VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
  942. inti->ext.ext_params2);
  943. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
  944. (u16 *)__LC_EXT_INT_CODE);
  945. rc |= put_guest_lc(vcpu, PFAULT_DONE,
  946. (u16 *)__LC_EXT_CPU_ADDR);
  947. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  948. &vcpu->arch.sie_block->gpsw,
  949. sizeof(psw_t));
  950. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  951. &vcpu->arch.sie_block->gpsw,
  952. sizeof(psw_t));
  953. rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
  954. (u64 *)__LC_EXT_PARAMS2);
  955. kfree(inti);
  956. }
  957. return rc ? -EFAULT : 0;
  958. }
  959. static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
  960. {
  961. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  962. struct kvm_s390_interrupt_info *inti;
  963. int rc = 0;
  964. spin_lock(&fi->lock);
  965. inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
  966. struct kvm_s390_interrupt_info,
  967. list);
  968. if (inti) {
  969. VCPU_EVENT(vcpu, 4,
  970. "deliver: virtio parm: 0x%x,parm64: 0x%llx",
  971. inti->ext.ext_params, inti->ext.ext_params2);
  972. vcpu->stat.deliver_virtio++;
  973. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  974. inti->type,
  975. inti->ext.ext_params,
  976. inti->ext.ext_params2);
  977. list_del(&inti->list);
  978. fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
  979. }
  980. if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
  981. clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
  982. spin_unlock(&fi->lock);
  983. if (inti) {
  984. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
  985. (u16 *)__LC_EXT_INT_CODE);
  986. rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
  987. (u16 *)__LC_EXT_CPU_ADDR);
  988. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  989. &vcpu->arch.sie_block->gpsw,
  990. sizeof(psw_t));
  991. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  992. &vcpu->arch.sie_block->gpsw,
  993. sizeof(psw_t));
  994. rc |= put_guest_lc(vcpu, inti->ext.ext_params,
  995. (u32 *)__LC_EXT_PARAMS);
  996. rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
  997. (u64 *)__LC_EXT_PARAMS2);
  998. kfree(inti);
  999. }
  1000. return rc ? -EFAULT : 0;
  1001. }
  1002. static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
  1003. {
  1004. int rc;
  1005. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  1006. vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
  1007. vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
  1008. vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
  1009. vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
  1010. vcpu->arch.sie_block->io_int_word = io->io_int_word;
  1011. return 0;
  1012. }
  1013. rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
  1014. rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
  1015. rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
  1016. rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
  1017. rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
  1018. &vcpu->arch.sie_block->gpsw,
  1019. sizeof(psw_t));
  1020. rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
  1021. &vcpu->arch.sie_block->gpsw,
  1022. sizeof(psw_t));
  1023. return rc ? -EFAULT : 0;
  1024. }
  1025. static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
  1026. unsigned long irq_type)
  1027. {
  1028. struct list_head *isc_list;
  1029. struct kvm_s390_float_interrupt *fi;
  1030. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  1031. struct kvm_s390_interrupt_info *inti = NULL;
  1032. struct kvm_s390_io_info io;
  1033. u32 isc;
  1034. int rc = 0;
  1035. fi = &vcpu->kvm->arch.float_int;
  1036. spin_lock(&fi->lock);
  1037. isc = irq_type_to_isc(irq_type);
  1038. isc_list = &fi->lists[isc];
  1039. inti = list_first_entry_or_null(isc_list,
  1040. struct kvm_s390_interrupt_info,
  1041. list);
  1042. if (inti) {
  1043. if (inti->type & KVM_S390_INT_IO_AI_MASK)
  1044. VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
  1045. else
  1046. VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
  1047. inti->io.subchannel_id >> 8,
  1048. inti->io.subchannel_id >> 1 & 0x3,
  1049. inti->io.subchannel_nr);
  1050. vcpu->stat.deliver_io++;
  1051. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  1052. inti->type,
  1053. ((__u32)inti->io.subchannel_id << 16) |
  1054. inti->io.subchannel_nr,
  1055. ((__u64)inti->io.io_int_parm << 32) |
  1056. inti->io.io_int_word);
  1057. list_del(&inti->list);
  1058. fi->counters[FIRQ_CNTR_IO] -= 1;
  1059. }
  1060. if (list_empty(isc_list))
  1061. clear_bit(irq_type, &fi->pending_irqs);
  1062. spin_unlock(&fi->lock);
  1063. if (inti) {
  1064. rc = __do_deliver_io(vcpu, &(inti->io));
  1065. kfree(inti);
  1066. goto out;
  1067. }
  1068. if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
  1069. /*
  1070. * in case an adapter interrupt was not delivered
  1071. * in SIE context KVM will handle the delivery
  1072. */
  1073. VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
  1074. memset(&io, 0, sizeof(io));
  1075. io.io_int_word = isc_to_int_word(isc);
  1076. vcpu->stat.deliver_io++;
  1077. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  1078. KVM_S390_INT_IO(1, 0, 0, 0),
  1079. ((__u32)io.subchannel_id << 16) |
  1080. io.subchannel_nr,
  1081. ((__u64)io.io_int_parm << 32) |
  1082. io.io_int_word);
  1083. rc = __do_deliver_io(vcpu, &io);
  1084. }
  1085. out:
  1086. return rc;
  1087. }
  1088. /* Check whether an external call is pending (deliverable or not) */
  1089. int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
  1090. {
  1091. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1092. if (!sclp.has_sigpif)
  1093. return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
  1094. return sca_ext_call_pending(vcpu, NULL);
  1095. }
  1096. int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
  1097. {
  1098. if (deliverable_irqs(vcpu))
  1099. return 1;
  1100. if (kvm_cpu_has_pending_timer(vcpu))
  1101. return 1;
  1102. /* external call pending and deliverable */
  1103. if (kvm_s390_ext_call_pending(vcpu) &&
  1104. !psw_extint_disabled(vcpu) &&
  1105. (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
  1106. return 1;
  1107. if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
  1108. return 1;
  1109. return 0;
  1110. }
  1111. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  1112. {
  1113. return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
  1114. }
  1115. static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
  1116. {
  1117. const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
  1118. const u64 ckc = vcpu->arch.sie_block->ckc;
  1119. u64 cputm, sltime = 0;
  1120. if (ckc_interrupts_enabled(vcpu)) {
  1121. if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
  1122. if ((s64)now < (s64)ckc)
  1123. sltime = tod_to_ns((s64)ckc - (s64)now);
  1124. } else if (now < ckc) {
  1125. sltime = tod_to_ns(ckc - now);
  1126. }
  1127. /* already expired */
  1128. if (!sltime)
  1129. return 0;
  1130. if (cpu_timer_interrupts_enabled(vcpu)) {
  1131. cputm = kvm_s390_get_cpu_timer(vcpu);
  1132. /* already expired? */
  1133. if (cputm >> 63)
  1134. return 0;
  1135. return min_t(u64, sltime, tod_to_ns(cputm));
  1136. }
  1137. } else if (cpu_timer_interrupts_enabled(vcpu)) {
  1138. sltime = kvm_s390_get_cpu_timer(vcpu);
  1139. /* already expired? */
  1140. if (sltime >> 63)
  1141. return 0;
  1142. }
  1143. return sltime;
  1144. }
  1145. int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
  1146. {
  1147. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  1148. u64 sltime;
  1149. vcpu->stat.exit_wait_state++;
  1150. /* fast path */
  1151. if (kvm_arch_vcpu_runnable(vcpu))
  1152. return 0;
  1153. if (psw_interrupts_disabled(vcpu)) {
  1154. VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
  1155. return -EOPNOTSUPP; /* disabled wait */
  1156. }
  1157. if (gi->origin &&
  1158. (gisa_get_ipm_or_restore_iam(gi) &
  1159. vcpu->arch.sie_block->gcr[6] >> 24))
  1160. return 0;
  1161. if (!ckc_interrupts_enabled(vcpu) &&
  1162. !cpu_timer_interrupts_enabled(vcpu)) {
  1163. VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
  1164. __set_cpu_idle(vcpu);
  1165. goto no_timer;
  1166. }
  1167. sltime = __calculate_sltime(vcpu);
  1168. if (!sltime)
  1169. return 0;
  1170. __set_cpu_idle(vcpu);
  1171. hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
  1172. VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
  1173. no_timer:
  1174. kvm_vcpu_srcu_read_unlock(vcpu);
  1175. kvm_vcpu_halt(vcpu);
  1176. vcpu->valid_wakeup = false;
  1177. __unset_cpu_idle(vcpu);
  1178. kvm_vcpu_srcu_read_lock(vcpu);
  1179. hrtimer_cancel(&vcpu->arch.ckc_timer);
  1180. return 0;
  1181. }
  1182. void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
  1183. {
  1184. vcpu->valid_wakeup = true;
  1185. kvm_vcpu_wake_up(vcpu);
  1186. /*
  1187. * The VCPU might not be sleeping but rather executing VSIE. Let's
  1188. * kick it, so it leaves the SIE to process the request.
  1189. */
  1190. kvm_s390_vsie_kick(vcpu);
  1191. }
  1192. enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
  1193. {
  1194. struct kvm_vcpu *vcpu;
  1195. u64 sltime;
  1196. vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
  1197. sltime = __calculate_sltime(vcpu);
  1198. /*
  1199. * If the monotonic clock runs faster than the tod clock we might be
  1200. * woken up too early and have to go back to sleep to avoid deadlocks.
  1201. */
  1202. if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
  1203. return HRTIMER_RESTART;
  1204. kvm_s390_vcpu_wakeup(vcpu);
  1205. return HRTIMER_NORESTART;
  1206. }
  1207. void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
  1208. {
  1209. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1210. spin_lock(&li->lock);
  1211. li->pending_irqs = 0;
  1212. bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
  1213. memset(&li->irq, 0, sizeof(li->irq));
  1214. spin_unlock(&li->lock);
  1215. sca_clear_ext_call(vcpu);
  1216. }
  1217. int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
  1218. {
  1219. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1220. int rc = 0;
  1221. bool delivered = false;
  1222. unsigned long irq_type;
  1223. unsigned long irqs;
  1224. __reset_intercept_indicators(vcpu);
  1225. /* pending ckc conditions might have been invalidated */
  1226. clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1227. if (ckc_irq_pending(vcpu))
  1228. set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1229. /* pending cpu timer conditions might have been invalidated */
  1230. clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1231. if (cpu_timer_irq_pending(vcpu))
  1232. set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1233. while ((irqs = deliverable_irqs(vcpu)) && !rc) {
  1234. /* bits are in the reverse order of interrupt priority */
  1235. irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
  1236. switch (irq_type) {
  1237. case IRQ_PEND_IO_ISC_0:
  1238. case IRQ_PEND_IO_ISC_1:
  1239. case IRQ_PEND_IO_ISC_2:
  1240. case IRQ_PEND_IO_ISC_3:
  1241. case IRQ_PEND_IO_ISC_4:
  1242. case IRQ_PEND_IO_ISC_5:
  1243. case IRQ_PEND_IO_ISC_6:
  1244. case IRQ_PEND_IO_ISC_7:
  1245. rc = __deliver_io(vcpu, irq_type);
  1246. break;
  1247. case IRQ_PEND_MCHK_EX:
  1248. case IRQ_PEND_MCHK_REP:
  1249. rc = __deliver_machine_check(vcpu);
  1250. break;
  1251. case IRQ_PEND_PROG:
  1252. rc = __deliver_prog(vcpu);
  1253. break;
  1254. case IRQ_PEND_EXT_EMERGENCY:
  1255. rc = __deliver_emergency_signal(vcpu);
  1256. break;
  1257. case IRQ_PEND_EXT_EXTERNAL:
  1258. rc = __deliver_external_call(vcpu);
  1259. break;
  1260. case IRQ_PEND_EXT_CLOCK_COMP:
  1261. rc = __deliver_ckc(vcpu);
  1262. break;
  1263. case IRQ_PEND_EXT_CPU_TIMER:
  1264. rc = __deliver_cpu_timer(vcpu);
  1265. break;
  1266. case IRQ_PEND_RESTART:
  1267. rc = __deliver_restart(vcpu);
  1268. break;
  1269. case IRQ_PEND_SET_PREFIX:
  1270. rc = __deliver_set_prefix(vcpu);
  1271. break;
  1272. case IRQ_PEND_PFAULT_INIT:
  1273. rc = __deliver_pfault_init(vcpu);
  1274. break;
  1275. case IRQ_PEND_EXT_SERVICE:
  1276. rc = __deliver_service(vcpu);
  1277. break;
  1278. case IRQ_PEND_EXT_SERVICE_EV:
  1279. rc = __deliver_service_ev(vcpu);
  1280. break;
  1281. case IRQ_PEND_PFAULT_DONE:
  1282. rc = __deliver_pfault_done(vcpu);
  1283. break;
  1284. case IRQ_PEND_VIRTIO:
  1285. rc = __deliver_virtio(vcpu);
  1286. break;
  1287. default:
  1288. WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
  1289. clear_bit(irq_type, &li->pending_irqs);
  1290. }
  1291. delivered |= !rc;
  1292. }
  1293. /*
  1294. * We delivered at least one interrupt and modified the PC. Force a
  1295. * singlestep event now.
  1296. */
  1297. if (delivered && guestdbg_sstep_enabled(vcpu)) {
  1298. struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
  1299. debug_exit->addr = vcpu->arch.sie_block->gpsw.addr;
  1300. debug_exit->type = KVM_SINGLESTEP;
  1301. vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
  1302. }
  1303. set_intercept_indicators(vcpu);
  1304. return rc;
  1305. }
  1306. static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1307. {
  1308. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1309. vcpu->stat.inject_program++;
  1310. VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
  1311. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
  1312. irq->u.pgm.code, 0);
  1313. if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
  1314. /* auto detection if no valid ILC was given */
  1315. irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
  1316. irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
  1317. irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
  1318. }
  1319. if (irq->u.pgm.code == PGM_PER) {
  1320. li->irq.pgm.code |= PGM_PER;
  1321. li->irq.pgm.flags = irq->u.pgm.flags;
  1322. /* only modify PER related information */
  1323. li->irq.pgm.per_address = irq->u.pgm.per_address;
  1324. li->irq.pgm.per_code = irq->u.pgm.per_code;
  1325. li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
  1326. li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
  1327. } else if (!(irq->u.pgm.code & PGM_PER)) {
  1328. li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
  1329. irq->u.pgm.code;
  1330. li->irq.pgm.flags = irq->u.pgm.flags;
  1331. /* only modify non-PER information */
  1332. li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
  1333. li->irq.pgm.mon_code = irq->u.pgm.mon_code;
  1334. li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
  1335. li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
  1336. li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
  1337. li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
  1338. } else {
  1339. li->irq.pgm = irq->u.pgm;
  1340. }
  1341. set_bit(IRQ_PEND_PROG, &li->pending_irqs);
  1342. return 0;
  1343. }
  1344. static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1345. {
  1346. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1347. vcpu->stat.inject_pfault_init++;
  1348. VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
  1349. irq->u.ext.ext_params2);
  1350. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
  1351. irq->u.ext.ext_params,
  1352. irq->u.ext.ext_params2);
  1353. li->irq.ext = irq->u.ext;
  1354. set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
  1355. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1356. return 0;
  1357. }
  1358. static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1359. {
  1360. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1361. struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
  1362. uint16_t src_id = irq->u.extcall.code;
  1363. vcpu->stat.inject_external_call++;
  1364. VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
  1365. src_id);
  1366. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
  1367. src_id, 0);
  1368. /* sending vcpu invalid */
  1369. if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
  1370. return -EINVAL;
  1371. if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
  1372. return sca_inject_ext_call(vcpu, src_id);
  1373. if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
  1374. return -EBUSY;
  1375. *extcall = irq->u.extcall;
  1376. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1377. return 0;
  1378. }
  1379. static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1380. {
  1381. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1382. struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
  1383. vcpu->stat.inject_set_prefix++;
  1384. VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
  1385. irq->u.prefix.address);
  1386. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
  1387. irq->u.prefix.address, 0);
  1388. if (!is_vcpu_stopped(vcpu))
  1389. return -EBUSY;
  1390. *prefix = irq->u.prefix;
  1391. set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
  1392. return 0;
  1393. }
  1394. #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
  1395. static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1396. {
  1397. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1398. struct kvm_s390_stop_info *stop = &li->irq.stop;
  1399. int rc = 0;
  1400. vcpu->stat.inject_stop_signal++;
  1401. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
  1402. if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
  1403. return -EINVAL;
  1404. if (is_vcpu_stopped(vcpu)) {
  1405. if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
  1406. rc = kvm_s390_store_status_unloaded(vcpu,
  1407. KVM_S390_STORE_STATUS_NOADDR);
  1408. return rc;
  1409. }
  1410. if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
  1411. return -EBUSY;
  1412. stop->flags = irq->u.stop.flags;
  1413. kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
  1414. return 0;
  1415. }
  1416. static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
  1417. {
  1418. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1419. vcpu->stat.inject_restart++;
  1420. VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
  1421. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
  1422. set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  1423. return 0;
  1424. }
  1425. static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
  1426. struct kvm_s390_irq *irq)
  1427. {
  1428. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1429. vcpu->stat.inject_emergency_signal++;
  1430. VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
  1431. irq->u.emerg.code);
  1432. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
  1433. irq->u.emerg.code, 0);
  1434. /* sending vcpu invalid */
  1435. if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
  1436. return -EINVAL;
  1437. set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
  1438. set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
  1439. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1440. return 0;
  1441. }
  1442. static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1443. {
  1444. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1445. struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
  1446. vcpu->stat.inject_mchk++;
  1447. VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
  1448. irq->u.mchk.mcic);
  1449. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
  1450. irq->u.mchk.mcic);
  1451. /*
  1452. * Because repressible machine checks can be indicated along with
  1453. * exigent machine checks (PoP, Chapter 11, Interruption action)
  1454. * we need to combine cr14, mcic and external damage code.
  1455. * Failing storage address and the logout area should not be or'ed
  1456. * together, we just indicate the last occurrence of the corresponding
  1457. * machine check
  1458. */
  1459. mchk->cr14 |= irq->u.mchk.cr14;
  1460. mchk->mcic |= irq->u.mchk.mcic;
  1461. mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
  1462. mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
  1463. memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
  1464. sizeof(mchk->fixed_logout));
  1465. if (mchk->mcic & MCHK_EX_MASK)
  1466. set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
  1467. else if (mchk->mcic & MCHK_REP_MASK)
  1468. set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
  1469. return 0;
  1470. }
  1471. static int __inject_ckc(struct kvm_vcpu *vcpu)
  1472. {
  1473. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1474. vcpu->stat.inject_ckc++;
  1475. VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
  1476. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
  1477. 0, 0);
  1478. set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1479. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1480. return 0;
  1481. }
  1482. static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
  1483. {
  1484. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1485. vcpu->stat.inject_cputm++;
  1486. VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
  1487. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
  1488. 0, 0);
  1489. set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1490. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1491. return 0;
  1492. }
  1493. static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
  1494. int isc, u32 schid)
  1495. {
  1496. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1497. struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
  1498. struct kvm_s390_interrupt_info *iter;
  1499. u16 id = (schid & 0xffff0000U) >> 16;
  1500. u16 nr = schid & 0x0000ffffU;
  1501. spin_lock(&fi->lock);
  1502. list_for_each_entry(iter, isc_list, list) {
  1503. if (schid && (id != iter->io.subchannel_id ||
  1504. nr != iter->io.subchannel_nr))
  1505. continue;
  1506. /* found an appropriate entry */
  1507. list_del_init(&iter->list);
  1508. fi->counters[FIRQ_CNTR_IO] -= 1;
  1509. if (list_empty(isc_list))
  1510. clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
  1511. spin_unlock(&fi->lock);
  1512. return iter;
  1513. }
  1514. spin_unlock(&fi->lock);
  1515. return NULL;
  1516. }
  1517. static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
  1518. u64 isc_mask, u32 schid)
  1519. {
  1520. struct kvm_s390_interrupt_info *inti = NULL;
  1521. int isc;
  1522. for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
  1523. if (isc_mask & isc_to_isc_bits(isc))
  1524. inti = get_io_int(kvm, isc, schid);
  1525. }
  1526. return inti;
  1527. }
  1528. static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
  1529. {
  1530. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1531. unsigned long active_mask;
  1532. int isc;
  1533. if (schid)
  1534. goto out;
  1535. if (!gi->origin)
  1536. goto out;
  1537. active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
  1538. while (active_mask) {
  1539. isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
  1540. if (gisa_tac_ipm_gisc(gi->origin, isc))
  1541. return isc;
  1542. clear_bit_inv(isc, &active_mask);
  1543. }
  1544. out:
  1545. return -EINVAL;
  1546. }
  1547. /*
  1548. * Dequeue and return an I/O interrupt matching any of the interruption
  1549. * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
  1550. * Take into account the interrupts pending in the interrupt list and in GISA.
  1551. *
  1552. * Note that for a guest that does not enable I/O interrupts
  1553. * but relies on TPI, a flood of classic interrupts may starve
  1554. * out adapter interrupts on the same isc. Linux does not do
  1555. * that, and it is possible to work around the issue by configuring
  1556. * different iscs for classic and adapter interrupts in the guest,
  1557. * but we may want to revisit this in the future.
  1558. */
  1559. struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
  1560. u64 isc_mask, u32 schid)
  1561. {
  1562. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1563. struct kvm_s390_interrupt_info *inti, *tmp_inti;
  1564. int isc;
  1565. inti = get_top_io_int(kvm, isc_mask, schid);
  1566. isc = get_top_gisa_isc(kvm, isc_mask, schid);
  1567. if (isc < 0)
  1568. /* no AI in GISA */
  1569. goto out;
  1570. if (!inti)
  1571. /* AI in GISA but no classical IO int */
  1572. goto gisa_out;
  1573. /* both types of interrupts present */
  1574. if (int_word_to_isc(inti->io.io_int_word) <= isc) {
  1575. /* classical IO int with higher priority */
  1576. gisa_set_ipm_gisc(gi->origin, isc);
  1577. goto out;
  1578. }
  1579. gisa_out:
  1580. tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  1581. if (tmp_inti) {
  1582. tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
  1583. tmp_inti->io.io_int_word = isc_to_int_word(isc);
  1584. if (inti)
  1585. kvm_s390_reinject_io_int(kvm, inti);
  1586. inti = tmp_inti;
  1587. } else
  1588. gisa_set_ipm_gisc(gi->origin, isc);
  1589. out:
  1590. return inti;
  1591. }
  1592. static int __inject_service(struct kvm *kvm,
  1593. struct kvm_s390_interrupt_info *inti)
  1594. {
  1595. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1596. kvm->stat.inject_service_signal++;
  1597. spin_lock(&fi->lock);
  1598. fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
  1599. /* We always allow events, track them separately from the sccb ints */
  1600. if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
  1601. set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  1602. /*
  1603. * Early versions of the QEMU s390 bios will inject several
  1604. * service interrupts after another without handling a
  1605. * condition code indicating busy.
  1606. * We will silently ignore those superfluous sccb values.
  1607. * A future version of QEMU will take care of serialization
  1608. * of servc requests
  1609. */
  1610. if (fi->srv_signal.ext_params & SCCB_MASK)
  1611. goto out;
  1612. fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
  1613. set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  1614. out:
  1615. spin_unlock(&fi->lock);
  1616. kfree(inti);
  1617. return 0;
  1618. }
  1619. static int __inject_virtio(struct kvm *kvm,
  1620. struct kvm_s390_interrupt_info *inti)
  1621. {
  1622. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1623. kvm->stat.inject_virtio++;
  1624. spin_lock(&fi->lock);
  1625. if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
  1626. spin_unlock(&fi->lock);
  1627. return -EBUSY;
  1628. }
  1629. fi->counters[FIRQ_CNTR_VIRTIO] += 1;
  1630. list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
  1631. set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
  1632. spin_unlock(&fi->lock);
  1633. return 0;
  1634. }
  1635. static int __inject_pfault_done(struct kvm *kvm,
  1636. struct kvm_s390_interrupt_info *inti)
  1637. {
  1638. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1639. kvm->stat.inject_pfault_done++;
  1640. spin_lock(&fi->lock);
  1641. if (fi->counters[FIRQ_CNTR_PFAULT] >=
  1642. (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
  1643. spin_unlock(&fi->lock);
  1644. return -EBUSY;
  1645. }
  1646. fi->counters[FIRQ_CNTR_PFAULT] += 1;
  1647. list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
  1648. set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
  1649. spin_unlock(&fi->lock);
  1650. return 0;
  1651. }
  1652. #define CR_PENDING_SUBCLASS 28
  1653. static int __inject_float_mchk(struct kvm *kvm,
  1654. struct kvm_s390_interrupt_info *inti)
  1655. {
  1656. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1657. kvm->stat.inject_float_mchk++;
  1658. spin_lock(&fi->lock);
  1659. fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
  1660. fi->mchk.mcic |= inti->mchk.mcic;
  1661. set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
  1662. spin_unlock(&fi->lock);
  1663. kfree(inti);
  1664. return 0;
  1665. }
  1666. static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
  1667. {
  1668. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1669. struct kvm_s390_float_interrupt *fi;
  1670. struct list_head *list;
  1671. int isc;
  1672. kvm->stat.inject_io++;
  1673. isc = int_word_to_isc(inti->io.io_int_word);
  1674. /*
  1675. * We do not use the lock checking variant as this is just a
  1676. * performance optimization and we do not hold the lock here.
  1677. * This is ok as the code will pick interrupts from both "lists"
  1678. * for delivery.
  1679. */
  1680. if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
  1681. VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
  1682. gisa_set_ipm_gisc(gi->origin, isc);
  1683. kfree(inti);
  1684. return 0;
  1685. }
  1686. fi = &kvm->arch.float_int;
  1687. spin_lock(&fi->lock);
  1688. if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
  1689. spin_unlock(&fi->lock);
  1690. return -EBUSY;
  1691. }
  1692. fi->counters[FIRQ_CNTR_IO] += 1;
  1693. if (inti->type & KVM_S390_INT_IO_AI_MASK)
  1694. VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
  1695. else
  1696. VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
  1697. inti->io.subchannel_id >> 8,
  1698. inti->io.subchannel_id >> 1 & 0x3,
  1699. inti->io.subchannel_nr);
  1700. list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
  1701. list_add_tail(&inti->list, list);
  1702. set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
  1703. spin_unlock(&fi->lock);
  1704. return 0;
  1705. }
  1706. /*
  1707. * Find a destination VCPU for a floating irq and kick it.
  1708. */
  1709. static void __floating_irq_kick(struct kvm *kvm, u64 type)
  1710. {
  1711. struct kvm_vcpu *dst_vcpu;
  1712. int sigcpu, online_vcpus, nr_tries = 0;
  1713. online_vcpus = atomic_read(&kvm->online_vcpus);
  1714. if (!online_vcpus)
  1715. return;
  1716. /* find idle VCPUs first, then round robin */
  1717. sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
  1718. if (sigcpu == online_vcpus) {
  1719. do {
  1720. sigcpu = kvm->arch.float_int.next_rr_cpu++;
  1721. kvm->arch.float_int.next_rr_cpu %= online_vcpus;
  1722. /* avoid endless loops if all vcpus are stopped */
  1723. if (nr_tries++ >= online_vcpus)
  1724. return;
  1725. } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
  1726. }
  1727. dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
  1728. /* make the VCPU drop out of the SIE, or wake it up if sleeping */
  1729. switch (type) {
  1730. case KVM_S390_MCHK:
  1731. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
  1732. break;
  1733. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1734. if (!(type & KVM_S390_INT_IO_AI_MASK &&
  1735. kvm->arch.gisa_int.origin) ||
  1736. kvm_s390_pv_cpu_get_handle(dst_vcpu))
  1737. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
  1738. break;
  1739. default:
  1740. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
  1741. break;
  1742. }
  1743. kvm_s390_vcpu_wakeup(dst_vcpu);
  1744. }
  1745. static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
  1746. {
  1747. u64 type = READ_ONCE(inti->type);
  1748. int rc;
  1749. switch (type) {
  1750. case KVM_S390_MCHK:
  1751. rc = __inject_float_mchk(kvm, inti);
  1752. break;
  1753. case KVM_S390_INT_VIRTIO:
  1754. rc = __inject_virtio(kvm, inti);
  1755. break;
  1756. case KVM_S390_INT_SERVICE:
  1757. rc = __inject_service(kvm, inti);
  1758. break;
  1759. case KVM_S390_INT_PFAULT_DONE:
  1760. rc = __inject_pfault_done(kvm, inti);
  1761. break;
  1762. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1763. rc = __inject_io(kvm, inti);
  1764. break;
  1765. default:
  1766. rc = -EINVAL;
  1767. }
  1768. if (rc)
  1769. return rc;
  1770. __floating_irq_kick(kvm, type);
  1771. return 0;
  1772. }
  1773. int kvm_s390_inject_vm(struct kvm *kvm,
  1774. struct kvm_s390_interrupt *s390int)
  1775. {
  1776. struct kvm_s390_interrupt_info *inti;
  1777. int rc;
  1778. inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  1779. if (!inti)
  1780. return -ENOMEM;
  1781. inti->type = s390int->type;
  1782. switch (inti->type) {
  1783. case KVM_S390_INT_VIRTIO:
  1784. VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
  1785. s390int->parm, s390int->parm64);
  1786. inti->ext.ext_params = s390int->parm;
  1787. inti->ext.ext_params2 = s390int->parm64;
  1788. break;
  1789. case KVM_S390_INT_SERVICE:
  1790. VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
  1791. inti->ext.ext_params = s390int->parm;
  1792. break;
  1793. case KVM_S390_INT_PFAULT_DONE:
  1794. inti->ext.ext_params2 = s390int->parm64;
  1795. break;
  1796. case KVM_S390_MCHK:
  1797. VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
  1798. s390int->parm64);
  1799. inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
  1800. inti->mchk.mcic = s390int->parm64;
  1801. break;
  1802. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1803. inti->io.subchannel_id = s390int->parm >> 16;
  1804. inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
  1805. inti->io.io_int_parm = s390int->parm64 >> 32;
  1806. inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
  1807. break;
  1808. default:
  1809. kfree(inti);
  1810. return -EINVAL;
  1811. }
  1812. trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
  1813. 2);
  1814. rc = __inject_vm(kvm, inti);
  1815. if (rc)
  1816. kfree(inti);
  1817. return rc;
  1818. }
  1819. int kvm_s390_reinject_io_int(struct kvm *kvm,
  1820. struct kvm_s390_interrupt_info *inti)
  1821. {
  1822. return __inject_vm(kvm, inti);
  1823. }
  1824. int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
  1825. struct kvm_s390_irq *irq)
  1826. {
  1827. irq->type = s390int->type;
  1828. switch (irq->type) {
  1829. case KVM_S390_PROGRAM_INT:
  1830. if (s390int->parm & 0xffff0000)
  1831. return -EINVAL;
  1832. irq->u.pgm.code = s390int->parm;
  1833. break;
  1834. case KVM_S390_SIGP_SET_PREFIX:
  1835. irq->u.prefix.address = s390int->parm;
  1836. break;
  1837. case KVM_S390_SIGP_STOP:
  1838. irq->u.stop.flags = s390int->parm;
  1839. break;
  1840. case KVM_S390_INT_EXTERNAL_CALL:
  1841. if (s390int->parm & 0xffff0000)
  1842. return -EINVAL;
  1843. irq->u.extcall.code = s390int->parm;
  1844. break;
  1845. case KVM_S390_INT_EMERGENCY:
  1846. if (s390int->parm & 0xffff0000)
  1847. return -EINVAL;
  1848. irq->u.emerg.code = s390int->parm;
  1849. break;
  1850. case KVM_S390_MCHK:
  1851. irq->u.mchk.mcic = s390int->parm64;
  1852. break;
  1853. case KVM_S390_INT_PFAULT_INIT:
  1854. irq->u.ext.ext_params = s390int->parm;
  1855. irq->u.ext.ext_params2 = s390int->parm64;
  1856. break;
  1857. case KVM_S390_RESTART:
  1858. case KVM_S390_INT_CLOCK_COMP:
  1859. case KVM_S390_INT_CPU_TIMER:
  1860. break;
  1861. default:
  1862. return -EINVAL;
  1863. }
  1864. return 0;
  1865. }
  1866. int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
  1867. {
  1868. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1869. return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
  1870. }
  1871. int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
  1872. {
  1873. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1874. return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  1875. }
  1876. void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
  1877. {
  1878. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1879. spin_lock(&li->lock);
  1880. li->irq.stop.flags = 0;
  1881. clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
  1882. spin_unlock(&li->lock);
  1883. }
  1884. static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1885. {
  1886. int rc;
  1887. switch (irq->type) {
  1888. case KVM_S390_PROGRAM_INT:
  1889. rc = __inject_prog(vcpu, irq);
  1890. break;
  1891. case KVM_S390_SIGP_SET_PREFIX:
  1892. rc = __inject_set_prefix(vcpu, irq);
  1893. break;
  1894. case KVM_S390_SIGP_STOP:
  1895. rc = __inject_sigp_stop(vcpu, irq);
  1896. break;
  1897. case KVM_S390_RESTART:
  1898. rc = __inject_sigp_restart(vcpu);
  1899. break;
  1900. case KVM_S390_INT_CLOCK_COMP:
  1901. rc = __inject_ckc(vcpu);
  1902. break;
  1903. case KVM_S390_INT_CPU_TIMER:
  1904. rc = __inject_cpu_timer(vcpu);
  1905. break;
  1906. case KVM_S390_INT_EXTERNAL_CALL:
  1907. rc = __inject_extcall(vcpu, irq);
  1908. break;
  1909. case KVM_S390_INT_EMERGENCY:
  1910. rc = __inject_sigp_emergency(vcpu, irq);
  1911. break;
  1912. case KVM_S390_MCHK:
  1913. rc = __inject_mchk(vcpu, irq);
  1914. break;
  1915. case KVM_S390_INT_PFAULT_INIT:
  1916. rc = __inject_pfault_init(vcpu, irq);
  1917. break;
  1918. case KVM_S390_INT_VIRTIO:
  1919. case KVM_S390_INT_SERVICE:
  1920. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1921. default:
  1922. rc = -EINVAL;
  1923. }
  1924. return rc;
  1925. }
  1926. int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1927. {
  1928. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1929. int rc;
  1930. spin_lock(&li->lock);
  1931. rc = do_inject_vcpu(vcpu, irq);
  1932. spin_unlock(&li->lock);
  1933. if (!rc)
  1934. kvm_s390_vcpu_wakeup(vcpu);
  1935. return rc;
  1936. }
  1937. static inline void clear_irq_list(struct list_head *_list)
  1938. {
  1939. struct kvm_s390_interrupt_info *inti, *n;
  1940. list_for_each_entry_safe(inti, n, _list, list) {
  1941. list_del(&inti->list);
  1942. kfree(inti);
  1943. }
  1944. }
  1945. static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
  1946. struct kvm_s390_irq *irq)
  1947. {
  1948. irq->type = inti->type;
  1949. switch (inti->type) {
  1950. case KVM_S390_INT_PFAULT_INIT:
  1951. case KVM_S390_INT_PFAULT_DONE:
  1952. case KVM_S390_INT_VIRTIO:
  1953. irq->u.ext = inti->ext;
  1954. break;
  1955. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1956. irq->u.io = inti->io;
  1957. break;
  1958. }
  1959. }
  1960. void kvm_s390_clear_float_irqs(struct kvm *kvm)
  1961. {
  1962. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1963. int i;
  1964. mutex_lock(&kvm->lock);
  1965. if (!kvm_s390_pv_is_protected(kvm))
  1966. fi->masked_irqs = 0;
  1967. mutex_unlock(&kvm->lock);
  1968. spin_lock(&fi->lock);
  1969. fi->pending_irqs = 0;
  1970. memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
  1971. memset(&fi->mchk, 0, sizeof(fi->mchk));
  1972. for (i = 0; i < FIRQ_LIST_COUNT; i++)
  1973. clear_irq_list(&fi->lists[i]);
  1974. for (i = 0; i < FIRQ_MAX_COUNT; i++)
  1975. fi->counters[i] = 0;
  1976. spin_unlock(&fi->lock);
  1977. kvm_s390_gisa_clear(kvm);
  1978. };
  1979. static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
  1980. {
  1981. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1982. struct kvm_s390_interrupt_info *inti;
  1983. struct kvm_s390_float_interrupt *fi;
  1984. struct kvm_s390_irq *buf;
  1985. struct kvm_s390_irq *irq;
  1986. int max_irqs;
  1987. int ret = 0;
  1988. int n = 0;
  1989. int i;
  1990. if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
  1991. return -EINVAL;
  1992. /*
  1993. * We are already using -ENOMEM to signal
  1994. * userspace it may retry with a bigger buffer,
  1995. * so we need to use something else for this case
  1996. */
  1997. buf = vzalloc(len);
  1998. if (!buf)
  1999. return -ENOBUFS;
  2000. max_irqs = len / sizeof(struct kvm_s390_irq);
  2001. if (gi->origin && gisa_get_ipm(gi->origin)) {
  2002. for (i = 0; i <= MAX_ISC; i++) {
  2003. if (n == max_irqs) {
  2004. /* signal userspace to try again */
  2005. ret = -ENOMEM;
  2006. goto out_nolock;
  2007. }
  2008. if (gisa_tac_ipm_gisc(gi->origin, i)) {
  2009. irq = (struct kvm_s390_irq *) &buf[n];
  2010. irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
  2011. irq->u.io.io_int_word = isc_to_int_word(i);
  2012. n++;
  2013. }
  2014. }
  2015. }
  2016. fi = &kvm->arch.float_int;
  2017. spin_lock(&fi->lock);
  2018. for (i = 0; i < FIRQ_LIST_COUNT; i++) {
  2019. list_for_each_entry(inti, &fi->lists[i], list) {
  2020. if (n == max_irqs) {
  2021. /* signal userspace to try again */
  2022. ret = -ENOMEM;
  2023. goto out;
  2024. }
  2025. inti_to_irq(inti, &buf[n]);
  2026. n++;
  2027. }
  2028. }
  2029. if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
  2030. test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
  2031. if (n == max_irqs) {
  2032. /* signal userspace to try again */
  2033. ret = -ENOMEM;
  2034. goto out;
  2035. }
  2036. irq = (struct kvm_s390_irq *) &buf[n];
  2037. irq->type = KVM_S390_INT_SERVICE;
  2038. irq->u.ext = fi->srv_signal;
  2039. n++;
  2040. }
  2041. if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
  2042. if (n == max_irqs) {
  2043. /* signal userspace to try again */
  2044. ret = -ENOMEM;
  2045. goto out;
  2046. }
  2047. irq = (struct kvm_s390_irq *) &buf[n];
  2048. irq->type = KVM_S390_MCHK;
  2049. irq->u.mchk = fi->mchk;
  2050. n++;
  2051. }
  2052. out:
  2053. spin_unlock(&fi->lock);
  2054. out_nolock:
  2055. if (!ret && n > 0) {
  2056. if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
  2057. ret = -EFAULT;
  2058. }
  2059. vfree(buf);
  2060. return ret < 0 ? ret : n;
  2061. }
  2062. static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
  2063. {
  2064. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2065. struct kvm_s390_ais_all ais;
  2066. if (attr->attr < sizeof(ais))
  2067. return -EINVAL;
  2068. if (!test_kvm_facility(kvm, 72))
  2069. return -EOPNOTSUPP;
  2070. mutex_lock(&fi->ais_lock);
  2071. ais.simm = fi->simm;
  2072. ais.nimm = fi->nimm;
  2073. mutex_unlock(&fi->ais_lock);
  2074. if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
  2075. return -EFAULT;
  2076. return 0;
  2077. }
  2078. static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  2079. {
  2080. int r;
  2081. switch (attr->group) {
  2082. case KVM_DEV_FLIC_GET_ALL_IRQS:
  2083. r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
  2084. attr->attr);
  2085. break;
  2086. case KVM_DEV_FLIC_AISM_ALL:
  2087. r = flic_ais_mode_get_all(dev->kvm, attr);
  2088. break;
  2089. default:
  2090. r = -EINVAL;
  2091. }
  2092. return r;
  2093. }
  2094. static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
  2095. u64 addr)
  2096. {
  2097. struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
  2098. void *target = NULL;
  2099. void __user *source;
  2100. u64 size;
  2101. if (get_user(inti->type, (u64 __user *)addr))
  2102. return -EFAULT;
  2103. switch (inti->type) {
  2104. case KVM_S390_INT_PFAULT_INIT:
  2105. case KVM_S390_INT_PFAULT_DONE:
  2106. case KVM_S390_INT_VIRTIO:
  2107. case KVM_S390_INT_SERVICE:
  2108. target = (void *) &inti->ext;
  2109. source = &uptr->u.ext;
  2110. size = sizeof(inti->ext);
  2111. break;
  2112. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  2113. target = (void *) &inti->io;
  2114. source = &uptr->u.io;
  2115. size = sizeof(inti->io);
  2116. break;
  2117. case KVM_S390_MCHK:
  2118. target = (void *) &inti->mchk;
  2119. source = &uptr->u.mchk;
  2120. size = sizeof(inti->mchk);
  2121. break;
  2122. default:
  2123. return -EINVAL;
  2124. }
  2125. if (copy_from_user(target, source, size))
  2126. return -EFAULT;
  2127. return 0;
  2128. }
  2129. static int enqueue_floating_irq(struct kvm_device *dev,
  2130. struct kvm_device_attr *attr)
  2131. {
  2132. struct kvm_s390_interrupt_info *inti = NULL;
  2133. int r = 0;
  2134. int len = attr->attr;
  2135. if (len % sizeof(struct kvm_s390_irq) != 0)
  2136. return -EINVAL;
  2137. else if (len > KVM_S390_FLIC_MAX_BUFFER)
  2138. return -EINVAL;
  2139. while (len >= sizeof(struct kvm_s390_irq)) {
  2140. inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  2141. if (!inti)
  2142. return -ENOMEM;
  2143. r = copy_irq_from_user(inti, attr->addr);
  2144. if (r) {
  2145. kfree(inti);
  2146. return r;
  2147. }
  2148. r = __inject_vm(dev->kvm, inti);
  2149. if (r) {
  2150. kfree(inti);
  2151. return r;
  2152. }
  2153. len -= sizeof(struct kvm_s390_irq);
  2154. attr->addr += sizeof(struct kvm_s390_irq);
  2155. }
  2156. return r;
  2157. }
  2158. static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
  2159. {
  2160. if (id >= MAX_S390_IO_ADAPTERS)
  2161. return NULL;
  2162. id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
  2163. return kvm->arch.adapters[id];
  2164. }
  2165. static int register_io_adapter(struct kvm_device *dev,
  2166. struct kvm_device_attr *attr)
  2167. {
  2168. struct s390_io_adapter *adapter;
  2169. struct kvm_s390_io_adapter adapter_info;
  2170. if (copy_from_user(&adapter_info,
  2171. (void __user *)attr->addr, sizeof(adapter_info)))
  2172. return -EFAULT;
  2173. if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
  2174. return -EINVAL;
  2175. adapter_info.id = array_index_nospec(adapter_info.id,
  2176. MAX_S390_IO_ADAPTERS);
  2177. if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
  2178. return -EINVAL;
  2179. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
  2180. if (!adapter)
  2181. return -ENOMEM;
  2182. adapter->id = adapter_info.id;
  2183. adapter->isc = adapter_info.isc;
  2184. adapter->maskable = adapter_info.maskable;
  2185. adapter->masked = false;
  2186. adapter->swap = adapter_info.swap;
  2187. adapter->suppressible = (adapter_info.flags) &
  2188. KVM_S390_ADAPTER_SUPPRESSIBLE;
  2189. dev->kvm->arch.adapters[adapter->id] = adapter;
  2190. return 0;
  2191. }
  2192. int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
  2193. {
  2194. int ret;
  2195. struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
  2196. if (!adapter || !adapter->maskable)
  2197. return -EINVAL;
  2198. ret = adapter->masked;
  2199. adapter->masked = masked;
  2200. return ret;
  2201. }
  2202. void kvm_s390_destroy_adapters(struct kvm *kvm)
  2203. {
  2204. int i;
  2205. for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
  2206. kfree(kvm->arch.adapters[i]);
  2207. }
  2208. static int modify_io_adapter(struct kvm_device *dev,
  2209. struct kvm_device_attr *attr)
  2210. {
  2211. struct kvm_s390_io_adapter_req req;
  2212. struct s390_io_adapter *adapter;
  2213. int ret;
  2214. if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
  2215. return -EFAULT;
  2216. adapter = get_io_adapter(dev->kvm, req.id);
  2217. if (!adapter)
  2218. return -EINVAL;
  2219. switch (req.type) {
  2220. case KVM_S390_IO_ADAPTER_MASK:
  2221. ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
  2222. if (ret > 0)
  2223. ret = 0;
  2224. break;
  2225. /*
  2226. * The following operations are no longer needed and therefore no-ops.
  2227. * The gpa to hva translation is done when an IRQ route is set up. The
  2228. * set_irq code uses get_user_pages_remote() to do the actual write.
  2229. */
  2230. case KVM_S390_IO_ADAPTER_MAP:
  2231. case KVM_S390_IO_ADAPTER_UNMAP:
  2232. ret = 0;
  2233. break;
  2234. default:
  2235. ret = -EINVAL;
  2236. }
  2237. return ret;
  2238. }
  2239. static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
  2240. {
  2241. const u64 isc_mask = 0xffUL << 24; /* all iscs set */
  2242. u32 schid;
  2243. if (attr->flags)
  2244. return -EINVAL;
  2245. if (attr->attr != sizeof(schid))
  2246. return -EINVAL;
  2247. if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
  2248. return -EFAULT;
  2249. if (!schid)
  2250. return -EINVAL;
  2251. kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
  2252. /*
  2253. * If userspace is conforming to the architecture, we can have at most
  2254. * one pending I/O interrupt per subchannel, so this is effectively a
  2255. * clear all.
  2256. */
  2257. return 0;
  2258. }
  2259. static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
  2260. {
  2261. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2262. struct kvm_s390_ais_req req;
  2263. int ret = 0;
  2264. if (!test_kvm_facility(kvm, 72))
  2265. return -EOPNOTSUPP;
  2266. if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
  2267. return -EFAULT;
  2268. if (req.isc > MAX_ISC)
  2269. return -EINVAL;
  2270. trace_kvm_s390_modify_ais_mode(req.isc,
  2271. (fi->simm & AIS_MODE_MASK(req.isc)) ?
  2272. (fi->nimm & AIS_MODE_MASK(req.isc)) ?
  2273. 2 : KVM_S390_AIS_MODE_SINGLE :
  2274. KVM_S390_AIS_MODE_ALL, req.mode);
  2275. mutex_lock(&fi->ais_lock);
  2276. switch (req.mode) {
  2277. case KVM_S390_AIS_MODE_ALL:
  2278. fi->simm &= ~AIS_MODE_MASK(req.isc);
  2279. fi->nimm &= ~AIS_MODE_MASK(req.isc);
  2280. break;
  2281. case KVM_S390_AIS_MODE_SINGLE:
  2282. fi->simm |= AIS_MODE_MASK(req.isc);
  2283. fi->nimm &= ~AIS_MODE_MASK(req.isc);
  2284. break;
  2285. default:
  2286. ret = -EINVAL;
  2287. }
  2288. mutex_unlock(&fi->ais_lock);
  2289. return ret;
  2290. }
  2291. static int kvm_s390_inject_airq(struct kvm *kvm,
  2292. struct s390_io_adapter *adapter)
  2293. {
  2294. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2295. struct kvm_s390_interrupt s390int = {
  2296. .type = KVM_S390_INT_IO(1, 0, 0, 0),
  2297. .parm = 0,
  2298. .parm64 = isc_to_int_word(adapter->isc),
  2299. };
  2300. int ret = 0;
  2301. if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
  2302. return kvm_s390_inject_vm(kvm, &s390int);
  2303. mutex_lock(&fi->ais_lock);
  2304. if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
  2305. trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
  2306. goto out;
  2307. }
  2308. ret = kvm_s390_inject_vm(kvm, &s390int);
  2309. if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
  2310. fi->nimm |= AIS_MODE_MASK(adapter->isc);
  2311. trace_kvm_s390_modify_ais_mode(adapter->isc,
  2312. KVM_S390_AIS_MODE_SINGLE, 2);
  2313. }
  2314. out:
  2315. mutex_unlock(&fi->ais_lock);
  2316. return ret;
  2317. }
  2318. static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
  2319. {
  2320. unsigned int id = attr->attr;
  2321. struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
  2322. if (!adapter)
  2323. return -EINVAL;
  2324. return kvm_s390_inject_airq(kvm, adapter);
  2325. }
  2326. static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
  2327. {
  2328. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2329. struct kvm_s390_ais_all ais;
  2330. if (!test_kvm_facility(kvm, 72))
  2331. return -EOPNOTSUPP;
  2332. if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
  2333. return -EFAULT;
  2334. mutex_lock(&fi->ais_lock);
  2335. fi->simm = ais.simm;
  2336. fi->nimm = ais.nimm;
  2337. mutex_unlock(&fi->ais_lock);
  2338. return 0;
  2339. }
  2340. static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  2341. {
  2342. int r = 0;
  2343. unsigned long i;
  2344. struct kvm_vcpu *vcpu;
  2345. switch (attr->group) {
  2346. case KVM_DEV_FLIC_ENQUEUE:
  2347. r = enqueue_floating_irq(dev, attr);
  2348. break;
  2349. case KVM_DEV_FLIC_CLEAR_IRQS:
  2350. kvm_s390_clear_float_irqs(dev->kvm);
  2351. break;
  2352. case KVM_DEV_FLIC_APF_ENABLE:
  2353. dev->kvm->arch.gmap->pfault_enabled = 1;
  2354. break;
  2355. case KVM_DEV_FLIC_APF_DISABLE_WAIT:
  2356. dev->kvm->arch.gmap->pfault_enabled = 0;
  2357. /*
  2358. * Make sure no async faults are in transition when
  2359. * clearing the queues. So we don't need to worry
  2360. * about late coming workers.
  2361. */
  2362. synchronize_srcu(&dev->kvm->srcu);
  2363. kvm_for_each_vcpu(i, vcpu, dev->kvm)
  2364. kvm_clear_async_pf_completion_queue(vcpu);
  2365. break;
  2366. case KVM_DEV_FLIC_ADAPTER_REGISTER:
  2367. r = register_io_adapter(dev, attr);
  2368. break;
  2369. case KVM_DEV_FLIC_ADAPTER_MODIFY:
  2370. r = modify_io_adapter(dev, attr);
  2371. break;
  2372. case KVM_DEV_FLIC_CLEAR_IO_IRQ:
  2373. r = clear_io_irq(dev->kvm, attr);
  2374. break;
  2375. case KVM_DEV_FLIC_AISM:
  2376. r = modify_ais_mode(dev->kvm, attr);
  2377. break;
  2378. case KVM_DEV_FLIC_AIRQ_INJECT:
  2379. r = flic_inject_airq(dev->kvm, attr);
  2380. break;
  2381. case KVM_DEV_FLIC_AISM_ALL:
  2382. r = flic_ais_mode_set_all(dev->kvm, attr);
  2383. break;
  2384. default:
  2385. r = -EINVAL;
  2386. }
  2387. return r;
  2388. }
  2389. static int flic_has_attr(struct kvm_device *dev,
  2390. struct kvm_device_attr *attr)
  2391. {
  2392. switch (attr->group) {
  2393. case KVM_DEV_FLIC_GET_ALL_IRQS:
  2394. case KVM_DEV_FLIC_ENQUEUE:
  2395. case KVM_DEV_FLIC_CLEAR_IRQS:
  2396. case KVM_DEV_FLIC_APF_ENABLE:
  2397. case KVM_DEV_FLIC_APF_DISABLE_WAIT:
  2398. case KVM_DEV_FLIC_ADAPTER_REGISTER:
  2399. case KVM_DEV_FLIC_ADAPTER_MODIFY:
  2400. case KVM_DEV_FLIC_CLEAR_IO_IRQ:
  2401. case KVM_DEV_FLIC_AISM:
  2402. case KVM_DEV_FLIC_AIRQ_INJECT:
  2403. case KVM_DEV_FLIC_AISM_ALL:
  2404. return 0;
  2405. }
  2406. return -ENXIO;
  2407. }
  2408. static int flic_create(struct kvm_device *dev, u32 type)
  2409. {
  2410. if (!dev)
  2411. return -EINVAL;
  2412. if (dev->kvm->arch.flic)
  2413. return -EINVAL;
  2414. dev->kvm->arch.flic = dev;
  2415. return 0;
  2416. }
  2417. static void flic_destroy(struct kvm_device *dev)
  2418. {
  2419. dev->kvm->arch.flic = NULL;
  2420. kfree(dev);
  2421. }
  2422. /* s390 floating irq controller (flic) */
  2423. struct kvm_device_ops kvm_flic_ops = {
  2424. .name = "kvm-flic",
  2425. .get_attr = flic_get_attr,
  2426. .set_attr = flic_set_attr,
  2427. .has_attr = flic_has_attr,
  2428. .create = flic_create,
  2429. .destroy = flic_destroy,
  2430. };
  2431. static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
  2432. {
  2433. unsigned long bit;
  2434. bit = bit_nr + (addr % PAGE_SIZE) * 8;
  2435. return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
  2436. }
  2437. static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
  2438. {
  2439. struct page *page = NULL;
  2440. mmap_read_lock(kvm->mm);
  2441. get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
  2442. &page, NULL);
  2443. mmap_read_unlock(kvm->mm);
  2444. return page;
  2445. }
  2446. static int adapter_indicators_set(struct kvm *kvm,
  2447. struct s390_io_adapter *adapter,
  2448. struct kvm_s390_adapter_int *adapter_int)
  2449. {
  2450. unsigned long bit;
  2451. int summary_set, idx;
  2452. struct page *ind_page, *summary_page;
  2453. void *map;
  2454. ind_page = get_map_page(kvm, adapter_int->ind_addr);
  2455. if (!ind_page)
  2456. return -1;
  2457. summary_page = get_map_page(kvm, adapter_int->summary_addr);
  2458. if (!summary_page) {
  2459. put_page(ind_page);
  2460. return -1;
  2461. }
  2462. idx = srcu_read_lock(&kvm->srcu);
  2463. map = page_address(ind_page);
  2464. bit = get_ind_bit(adapter_int->ind_addr,
  2465. adapter_int->ind_offset, adapter->swap);
  2466. set_bit(bit, map);
  2467. mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
  2468. set_page_dirty_lock(ind_page);
  2469. map = page_address(summary_page);
  2470. bit = get_ind_bit(adapter_int->summary_addr,
  2471. adapter_int->summary_offset, adapter->swap);
  2472. summary_set = test_and_set_bit(bit, map);
  2473. mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
  2474. set_page_dirty_lock(summary_page);
  2475. srcu_read_unlock(&kvm->srcu, idx);
  2476. put_page(ind_page);
  2477. put_page(summary_page);
  2478. return summary_set ? 0 : 1;
  2479. }
  2480. /*
  2481. * < 0 - not injected due to error
  2482. * = 0 - coalesced, summary indicator already active
  2483. * > 0 - injected interrupt
  2484. */
  2485. static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
  2486. struct kvm *kvm, int irq_source_id, int level,
  2487. bool line_status)
  2488. {
  2489. int ret;
  2490. struct s390_io_adapter *adapter;
  2491. /* We're only interested in the 0->1 transition. */
  2492. if (!level)
  2493. return 0;
  2494. adapter = get_io_adapter(kvm, e->adapter.adapter_id);
  2495. if (!adapter)
  2496. return -1;
  2497. ret = adapter_indicators_set(kvm, adapter, &e->adapter);
  2498. if ((ret > 0) && !adapter->masked) {
  2499. ret = kvm_s390_inject_airq(kvm, adapter);
  2500. if (ret == 0)
  2501. ret = 1;
  2502. }
  2503. return ret;
  2504. }
  2505. /*
  2506. * Inject the machine check to the guest.
  2507. */
  2508. void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
  2509. struct mcck_volatile_info *mcck_info)
  2510. {
  2511. struct kvm_s390_interrupt_info inti;
  2512. struct kvm_s390_irq irq;
  2513. struct kvm_s390_mchk_info *mchk;
  2514. union mci mci;
  2515. __u64 cr14 = 0; /* upper bits are not used */
  2516. int rc;
  2517. mci.val = mcck_info->mcic;
  2518. if (mci.sr)
  2519. cr14 |= CR14_RECOVERY_SUBMASK;
  2520. if (mci.dg)
  2521. cr14 |= CR14_DEGRADATION_SUBMASK;
  2522. if (mci.w)
  2523. cr14 |= CR14_WARNING_SUBMASK;
  2524. mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
  2525. mchk->cr14 = cr14;
  2526. mchk->mcic = mcck_info->mcic;
  2527. mchk->ext_damage_code = mcck_info->ext_damage_code;
  2528. mchk->failing_storage_address = mcck_info->failing_storage_address;
  2529. if (mci.ck) {
  2530. /* Inject the floating machine check */
  2531. inti.type = KVM_S390_MCHK;
  2532. rc = __inject_vm(vcpu->kvm, &inti);
  2533. } else {
  2534. /* Inject the machine check to specified vcpu */
  2535. irq.type = KVM_S390_MCHK;
  2536. rc = kvm_s390_inject_vcpu(vcpu, &irq);
  2537. }
  2538. WARN_ON_ONCE(rc);
  2539. }
  2540. int kvm_set_routing_entry(struct kvm *kvm,
  2541. struct kvm_kernel_irq_routing_entry *e,
  2542. const struct kvm_irq_routing_entry *ue)
  2543. {
  2544. u64 uaddr;
  2545. switch (ue->type) {
  2546. /* we store the userspace addresses instead of the guest addresses */
  2547. case KVM_IRQ_ROUTING_S390_ADAPTER:
  2548. e->set = set_adapter_int;
  2549. uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
  2550. if (uaddr == -EFAULT)
  2551. return -EFAULT;
  2552. e->adapter.summary_addr = uaddr;
  2553. uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
  2554. if (uaddr == -EFAULT)
  2555. return -EFAULT;
  2556. e->adapter.ind_addr = uaddr;
  2557. e->adapter.summary_offset = ue->u.adapter.summary_offset;
  2558. e->adapter.ind_offset = ue->u.adapter.ind_offset;
  2559. e->adapter.adapter_id = ue->u.adapter.adapter_id;
  2560. return 0;
  2561. default:
  2562. return -EINVAL;
  2563. }
  2564. }
  2565. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
  2566. int irq_source_id, int level, bool line_status)
  2567. {
  2568. return -EINVAL;
  2569. }
  2570. int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
  2571. {
  2572. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  2573. struct kvm_s390_irq *buf;
  2574. int r = 0;
  2575. int n;
  2576. buf = vmalloc(len);
  2577. if (!buf)
  2578. return -ENOMEM;
  2579. if (copy_from_user((void *) buf, irqstate, len)) {
  2580. r = -EFAULT;
  2581. goto out_free;
  2582. }
  2583. /*
  2584. * Don't allow setting the interrupt state
  2585. * when there are already interrupts pending
  2586. */
  2587. spin_lock(&li->lock);
  2588. if (li->pending_irqs) {
  2589. r = -EBUSY;
  2590. goto out_unlock;
  2591. }
  2592. for (n = 0; n < len / sizeof(*buf); n++) {
  2593. r = do_inject_vcpu(vcpu, &buf[n]);
  2594. if (r)
  2595. break;
  2596. }
  2597. out_unlock:
  2598. spin_unlock(&li->lock);
  2599. out_free:
  2600. vfree(buf);
  2601. return r;
  2602. }
  2603. static void store_local_irq(struct kvm_s390_local_interrupt *li,
  2604. struct kvm_s390_irq *irq,
  2605. unsigned long irq_type)
  2606. {
  2607. switch (irq_type) {
  2608. case IRQ_PEND_MCHK_EX:
  2609. case IRQ_PEND_MCHK_REP:
  2610. irq->type = KVM_S390_MCHK;
  2611. irq->u.mchk = li->irq.mchk;
  2612. break;
  2613. case IRQ_PEND_PROG:
  2614. irq->type = KVM_S390_PROGRAM_INT;
  2615. irq->u.pgm = li->irq.pgm;
  2616. break;
  2617. case IRQ_PEND_PFAULT_INIT:
  2618. irq->type = KVM_S390_INT_PFAULT_INIT;
  2619. irq->u.ext = li->irq.ext;
  2620. break;
  2621. case IRQ_PEND_EXT_EXTERNAL:
  2622. irq->type = KVM_S390_INT_EXTERNAL_CALL;
  2623. irq->u.extcall = li->irq.extcall;
  2624. break;
  2625. case IRQ_PEND_EXT_CLOCK_COMP:
  2626. irq->type = KVM_S390_INT_CLOCK_COMP;
  2627. break;
  2628. case IRQ_PEND_EXT_CPU_TIMER:
  2629. irq->type = KVM_S390_INT_CPU_TIMER;
  2630. break;
  2631. case IRQ_PEND_SIGP_STOP:
  2632. irq->type = KVM_S390_SIGP_STOP;
  2633. irq->u.stop = li->irq.stop;
  2634. break;
  2635. case IRQ_PEND_RESTART:
  2636. irq->type = KVM_S390_RESTART;
  2637. break;
  2638. case IRQ_PEND_SET_PREFIX:
  2639. irq->type = KVM_S390_SIGP_SET_PREFIX;
  2640. irq->u.prefix = li->irq.prefix;
  2641. break;
  2642. }
  2643. }
  2644. int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
  2645. {
  2646. int scn;
  2647. DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
  2648. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  2649. unsigned long pending_irqs;
  2650. struct kvm_s390_irq irq;
  2651. unsigned long irq_type;
  2652. int cpuaddr;
  2653. int n = 0;
  2654. spin_lock(&li->lock);
  2655. pending_irqs = li->pending_irqs;
  2656. memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
  2657. sizeof(sigp_emerg_pending));
  2658. spin_unlock(&li->lock);
  2659. for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
  2660. memset(&irq, 0, sizeof(irq));
  2661. if (irq_type == IRQ_PEND_EXT_EMERGENCY)
  2662. continue;
  2663. if (n + sizeof(irq) > len)
  2664. return -ENOBUFS;
  2665. store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
  2666. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2667. return -EFAULT;
  2668. n += sizeof(irq);
  2669. }
  2670. if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
  2671. for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
  2672. memset(&irq, 0, sizeof(irq));
  2673. if (n + sizeof(irq) > len)
  2674. return -ENOBUFS;
  2675. irq.type = KVM_S390_INT_EMERGENCY;
  2676. irq.u.emerg.code = cpuaddr;
  2677. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2678. return -EFAULT;
  2679. n += sizeof(irq);
  2680. }
  2681. }
  2682. if (sca_ext_call_pending(vcpu, &scn)) {
  2683. if (n + sizeof(irq) > len)
  2684. return -ENOBUFS;
  2685. memset(&irq, 0, sizeof(irq));
  2686. irq.type = KVM_S390_INT_EXTERNAL_CALL;
  2687. irq.u.extcall.code = scn;
  2688. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2689. return -EFAULT;
  2690. n += sizeof(irq);
  2691. }
  2692. return n;
  2693. }
  2694. static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
  2695. {
  2696. int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
  2697. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2698. struct kvm_vcpu *vcpu;
  2699. u8 vcpu_isc_mask;
  2700. for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
  2701. vcpu = kvm_get_vcpu(kvm, vcpu_idx);
  2702. if (psw_ioint_disabled(vcpu))
  2703. continue;
  2704. vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
  2705. if (deliverable_mask & vcpu_isc_mask) {
  2706. /* lately kicked but not yet running */
  2707. if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
  2708. return;
  2709. kvm_s390_vcpu_wakeup(vcpu);
  2710. return;
  2711. }
  2712. }
  2713. }
  2714. static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
  2715. {
  2716. struct kvm_s390_gisa_interrupt *gi =
  2717. container_of(timer, struct kvm_s390_gisa_interrupt, timer);
  2718. struct kvm *kvm =
  2719. container_of(gi->origin, struct sie_page2, gisa)->kvm;
  2720. u8 pending_mask;
  2721. pending_mask = gisa_get_ipm_or_restore_iam(gi);
  2722. if (pending_mask) {
  2723. __airqs_kick_single_vcpu(kvm, pending_mask);
  2724. hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
  2725. return HRTIMER_RESTART;
  2726. }
  2727. return HRTIMER_NORESTART;
  2728. }
  2729. #define NULL_GISA_ADDR 0x00000000UL
  2730. #define NONE_GISA_ADDR 0x00000001UL
  2731. #define GISA_ADDR_MASK 0xfffff000UL
  2732. static void process_gib_alert_list(void)
  2733. {
  2734. struct kvm_s390_gisa_interrupt *gi;
  2735. u32 final, gisa_phys, origin = 0UL;
  2736. struct kvm_s390_gisa *gisa;
  2737. struct kvm *kvm;
  2738. do {
  2739. /*
  2740. * If the NONE_GISA_ADDR is still stored in the alert list
  2741. * origin, we will leave the outer loop. No further GISA has
  2742. * been added to the alert list by millicode while processing
  2743. * the current alert list.
  2744. */
  2745. final = (origin & NONE_GISA_ADDR);
  2746. /*
  2747. * Cut off the alert list and store the NONE_GISA_ADDR in the
  2748. * alert list origin to avoid further GAL interruptions.
  2749. * A new alert list can be build up by millicode in parallel
  2750. * for guests not in the yet cut-off alert list. When in the
  2751. * final loop, store the NULL_GISA_ADDR instead. This will re-
  2752. * enable GAL interruptions on the host again.
  2753. */
  2754. origin = xchg(&gib->alert_list_origin,
  2755. (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
  2756. /*
  2757. * Loop through the just cut-off alert list and start the
  2758. * gisa timers to kick idle vcpus to consume the pending
  2759. * interruptions asap.
  2760. */
  2761. while (origin & GISA_ADDR_MASK) {
  2762. gisa_phys = origin;
  2763. gisa = phys_to_virt(gisa_phys);
  2764. origin = gisa->next_alert;
  2765. gisa->next_alert = gisa_phys;
  2766. kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
  2767. gi = &kvm->arch.gisa_int;
  2768. if (hrtimer_active(&gi->timer))
  2769. hrtimer_cancel(&gi->timer);
  2770. hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
  2771. }
  2772. } while (!final);
  2773. }
  2774. void kvm_s390_gisa_clear(struct kvm *kvm)
  2775. {
  2776. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2777. if (!gi->origin)
  2778. return;
  2779. gisa_clear_ipm(gi->origin);
  2780. VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
  2781. }
  2782. void kvm_s390_gisa_init(struct kvm *kvm)
  2783. {
  2784. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2785. if (!css_general_characteristics.aiv)
  2786. return;
  2787. gi->origin = &kvm->arch.sie_page2->gisa;
  2788. gi->alert.mask = 0;
  2789. spin_lock_init(&gi->alert.ref_lock);
  2790. gi->expires = 50 * 1000; /* 50 usec */
  2791. hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2792. gi->timer.function = gisa_vcpu_kicker;
  2793. memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
  2794. gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
  2795. VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
  2796. }
  2797. void kvm_s390_gisa_enable(struct kvm *kvm)
  2798. {
  2799. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2800. struct kvm_vcpu *vcpu;
  2801. unsigned long i;
  2802. u32 gisa_desc;
  2803. if (gi->origin)
  2804. return;
  2805. kvm_s390_gisa_init(kvm);
  2806. gisa_desc = kvm_s390_get_gisa_desc(kvm);
  2807. if (!gisa_desc)
  2808. return;
  2809. kvm_for_each_vcpu(i, vcpu, kvm) {
  2810. mutex_lock(&vcpu->mutex);
  2811. vcpu->arch.sie_block->gd = gisa_desc;
  2812. vcpu->arch.sie_block->eca |= ECA_AIV;
  2813. VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
  2814. vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
  2815. mutex_unlock(&vcpu->mutex);
  2816. }
  2817. }
  2818. void kvm_s390_gisa_destroy(struct kvm *kvm)
  2819. {
  2820. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2821. struct kvm_s390_gisa *gisa = gi->origin;
  2822. if (!gi->origin)
  2823. return;
  2824. WARN(gi->alert.mask != 0x00,
  2825. "unexpected non zero alert.mask 0x%02x",
  2826. gi->alert.mask);
  2827. gi->alert.mask = 0x00;
  2828. if (gisa_set_iam(gi->origin, gi->alert.mask))
  2829. process_gib_alert_list();
  2830. hrtimer_cancel(&gi->timer);
  2831. gi->origin = NULL;
  2832. VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
  2833. }
  2834. void kvm_s390_gisa_disable(struct kvm *kvm)
  2835. {
  2836. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2837. struct kvm_vcpu *vcpu;
  2838. unsigned long i;
  2839. if (!gi->origin)
  2840. return;
  2841. kvm_for_each_vcpu(i, vcpu, kvm) {
  2842. mutex_lock(&vcpu->mutex);
  2843. vcpu->arch.sie_block->eca &= ~ECA_AIV;
  2844. vcpu->arch.sie_block->gd = 0U;
  2845. mutex_unlock(&vcpu->mutex);
  2846. VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
  2847. }
  2848. kvm_s390_gisa_destroy(kvm);
  2849. }
  2850. /**
  2851. * kvm_s390_gisc_register - register a guest ISC
  2852. *
  2853. * @kvm: the kernel vm to work with
  2854. * @gisc: the guest interruption sub class to register
  2855. *
  2856. * The function extends the vm specific alert mask to use.
  2857. * The effective IAM mask in the GISA is updated as well
  2858. * in case the GISA is not part of the GIB alert list.
  2859. * It will be updated latest when the IAM gets restored
  2860. * by gisa_get_ipm_or_restore_iam().
  2861. *
  2862. * Returns: the nonspecific ISC (NISC) the gib alert mechanism
  2863. * has registered with the channel subsystem.
  2864. * -ENODEV in case the vm uses no GISA
  2865. * -ERANGE in case the guest ISC is invalid
  2866. */
  2867. int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
  2868. {
  2869. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2870. if (!gi->origin)
  2871. return -ENODEV;
  2872. if (gisc > MAX_ISC)
  2873. return -ERANGE;
  2874. spin_lock(&gi->alert.ref_lock);
  2875. gi->alert.ref_count[gisc]++;
  2876. if (gi->alert.ref_count[gisc] == 1) {
  2877. gi->alert.mask |= 0x80 >> gisc;
  2878. gisa_set_iam(gi->origin, gi->alert.mask);
  2879. }
  2880. spin_unlock(&gi->alert.ref_lock);
  2881. return gib->nisc;
  2882. }
  2883. EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
  2884. /**
  2885. * kvm_s390_gisc_unregister - unregister a guest ISC
  2886. *
  2887. * @kvm: the kernel vm to work with
  2888. * @gisc: the guest interruption sub class to register
  2889. *
  2890. * The function reduces the vm specific alert mask to use.
  2891. * The effective IAM mask in the GISA is updated as well
  2892. * in case the GISA is not part of the GIB alert list.
  2893. * It will be updated latest when the IAM gets restored
  2894. * by gisa_get_ipm_or_restore_iam().
  2895. *
  2896. * Returns: the nonspecific ISC (NISC) the gib alert mechanism
  2897. * has registered with the channel subsystem.
  2898. * -ENODEV in case the vm uses no GISA
  2899. * -ERANGE in case the guest ISC is invalid
  2900. * -EINVAL in case the guest ISC is not registered
  2901. */
  2902. int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
  2903. {
  2904. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2905. int rc = 0;
  2906. if (!gi->origin)
  2907. return -ENODEV;
  2908. if (gisc > MAX_ISC)
  2909. return -ERANGE;
  2910. spin_lock(&gi->alert.ref_lock);
  2911. if (gi->alert.ref_count[gisc] == 0) {
  2912. rc = -EINVAL;
  2913. goto out;
  2914. }
  2915. gi->alert.ref_count[gisc]--;
  2916. if (gi->alert.ref_count[gisc] == 0) {
  2917. gi->alert.mask &= ~(0x80 >> gisc);
  2918. gisa_set_iam(gi->origin, gi->alert.mask);
  2919. }
  2920. out:
  2921. spin_unlock(&gi->alert.ref_lock);
  2922. return rc;
  2923. }
  2924. EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
  2925. static void aen_host_forward(unsigned long si)
  2926. {
  2927. struct kvm_s390_gisa_interrupt *gi;
  2928. struct zpci_gaite *gaite;
  2929. struct kvm *kvm;
  2930. gaite = (struct zpci_gaite *)aift->gait +
  2931. (si * sizeof(struct zpci_gaite));
  2932. if (gaite->count == 0)
  2933. return;
  2934. if (gaite->aisb != 0)
  2935. set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
  2936. kvm = kvm_s390_pci_si_to_kvm(aift, si);
  2937. if (!kvm)
  2938. return;
  2939. gi = &kvm->arch.gisa_int;
  2940. if (!(gi->origin->g1.simm & AIS_MODE_MASK(gaite->gisc)) ||
  2941. !(gi->origin->g1.nimm & AIS_MODE_MASK(gaite->gisc))) {
  2942. gisa_set_ipm_gisc(gi->origin, gaite->gisc);
  2943. if (hrtimer_active(&gi->timer))
  2944. hrtimer_cancel(&gi->timer);
  2945. hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
  2946. kvm->stat.aen_forward++;
  2947. }
  2948. }
  2949. static void aen_process_gait(u8 isc)
  2950. {
  2951. bool found = false, first = true;
  2952. union zpci_sic_iib iib = {{0}};
  2953. unsigned long si, flags;
  2954. spin_lock_irqsave(&aift->gait_lock, flags);
  2955. if (!aift->gait) {
  2956. spin_unlock_irqrestore(&aift->gait_lock, flags);
  2957. return;
  2958. }
  2959. for (si = 0;;) {
  2960. /* Scan adapter summary indicator bit vector */
  2961. si = airq_iv_scan(aift->sbv, si, airq_iv_end(aift->sbv));
  2962. if (si == -1UL) {
  2963. if (first || found) {
  2964. /* Re-enable interrupts. */
  2965. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, isc,
  2966. &iib);
  2967. first = found = false;
  2968. } else {
  2969. /* Interrupts on and all bits processed */
  2970. break;
  2971. }
  2972. found = false;
  2973. si = 0;
  2974. /* Scan again after re-enabling interrupts */
  2975. continue;
  2976. }
  2977. found = true;
  2978. aen_host_forward(si);
  2979. }
  2980. spin_unlock_irqrestore(&aift->gait_lock, flags);
  2981. }
  2982. static void gib_alert_irq_handler(struct airq_struct *airq,
  2983. struct tpi_info *tpi_info)
  2984. {
  2985. struct tpi_adapter_info *info = (struct tpi_adapter_info *)tpi_info;
  2986. inc_irq_stat(IRQIO_GAL);
  2987. if ((info->forward || info->error) &&
  2988. IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
  2989. aen_process_gait(info->isc);
  2990. if (info->aism != 0)
  2991. process_gib_alert_list();
  2992. } else {
  2993. process_gib_alert_list();
  2994. }
  2995. }
  2996. static struct airq_struct gib_alert_irq = {
  2997. .handler = gib_alert_irq_handler,
  2998. };
  2999. void kvm_s390_gib_destroy(void)
  3000. {
  3001. if (!gib)
  3002. return;
  3003. if (kvm_s390_pci_interp_allowed() && aift) {
  3004. mutex_lock(&aift->aift_lock);
  3005. kvm_s390_pci_aen_exit();
  3006. mutex_unlock(&aift->aift_lock);
  3007. }
  3008. chsc_sgib(0);
  3009. unregister_adapter_interrupt(&gib_alert_irq);
  3010. free_page((unsigned long)gib);
  3011. gib = NULL;
  3012. }
  3013. int __init kvm_s390_gib_init(u8 nisc)
  3014. {
  3015. u32 gib_origin;
  3016. int rc = 0;
  3017. if (!css_general_characteristics.aiv) {
  3018. KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
  3019. goto out;
  3020. }
  3021. gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
  3022. if (!gib) {
  3023. rc = -ENOMEM;
  3024. goto out;
  3025. }
  3026. gib_alert_irq.isc = nisc;
  3027. if (register_adapter_interrupt(&gib_alert_irq)) {
  3028. pr_err("Registering the GIB alert interruption handler failed\n");
  3029. rc = -EIO;
  3030. goto out_free_gib;
  3031. }
  3032. /* adapter interrupts used for AP (applicable here) don't use the LSI */
  3033. *gib_alert_irq.lsi_ptr = 0xff;
  3034. gib->nisc = nisc;
  3035. gib_origin = virt_to_phys(gib);
  3036. if (chsc_sgib(gib_origin)) {
  3037. pr_err("Associating the GIB with the AIV facility failed\n");
  3038. free_page((unsigned long)gib);
  3039. gib = NULL;
  3040. rc = -EIO;
  3041. goto out_unreg_gal;
  3042. }
  3043. if (kvm_s390_pci_interp_allowed()) {
  3044. if (kvm_s390_pci_aen_init(nisc)) {
  3045. pr_err("Initializing AEN for PCI failed\n");
  3046. rc = -EIO;
  3047. goto out_unreg_gal;
  3048. }
  3049. }
  3050. KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
  3051. goto out;
  3052. out_unreg_gal:
  3053. unregister_adapter_interrupt(&gib_alert_irq);
  3054. out_free_gib:
  3055. free_page((unsigned long)gib);
  3056. gib = NULL;
  3057. out:
  3058. return rc;
  3059. }