intercept.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * in-kernel handling for sie intercepts
  4. *
  5. * Copyright IBM Corp. 2008, 2020
  6. *
  7. * Author(s): Carsten Otte <cotte@de.ibm.com>
  8. * Christian Borntraeger <borntraeger@de.ibm.com>
  9. */
  10. #include <linux/kvm_host.h>
  11. #include <linux/errno.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/asm-offsets.h>
  14. #include <asm/irq.h>
  15. #include <asm/sysinfo.h>
  16. #include <asm/uv.h>
  17. #include "kvm-s390.h"
  18. #include "gaccess.h"
  19. #include "trace.h"
  20. #include "trace-s390.h"
  21. u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
  22. {
  23. struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
  24. u8 ilen = 0;
  25. switch (vcpu->arch.sie_block->icptcode) {
  26. case ICPT_INST:
  27. case ICPT_INSTPROGI:
  28. case ICPT_OPEREXC:
  29. case ICPT_PARTEXEC:
  30. case ICPT_IOINST:
  31. /* instruction only stored for these icptcodes */
  32. ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
  33. /* Use the length of the EXECUTE instruction if necessary */
  34. if (sie_block->icptstatus & 1) {
  35. ilen = (sie_block->icptstatus >> 4) & 0x6;
  36. if (!ilen)
  37. ilen = 4;
  38. }
  39. break;
  40. case ICPT_PROGI:
  41. /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
  42. ilen = vcpu->arch.sie_block->pgmilc & 0x6;
  43. break;
  44. }
  45. return ilen;
  46. }
  47. static int handle_stop(struct kvm_vcpu *vcpu)
  48. {
  49. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  50. int rc = 0;
  51. uint8_t flags, stop_pending;
  52. vcpu->stat.exit_stop_request++;
  53. /* delay the stop if any non-stop irq is pending */
  54. if (kvm_s390_vcpu_has_irq(vcpu, 1))
  55. return 0;
  56. /* avoid races with the injection/SIGP STOP code */
  57. spin_lock(&li->lock);
  58. flags = li->irq.stop.flags;
  59. stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
  60. spin_unlock(&li->lock);
  61. trace_kvm_s390_stop_request(stop_pending, flags);
  62. if (!stop_pending)
  63. return 0;
  64. if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
  65. rc = kvm_s390_vcpu_store_status(vcpu,
  66. KVM_S390_STORE_STATUS_NOADDR);
  67. if (rc)
  68. return rc;
  69. }
  70. /*
  71. * no need to check the return value of vcpu_stop as it can only have
  72. * an error for protvirt, but protvirt means user cpu state
  73. */
  74. if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
  75. kvm_s390_vcpu_stop(vcpu);
  76. return -EOPNOTSUPP;
  77. }
  78. static int handle_validity(struct kvm_vcpu *vcpu)
  79. {
  80. int viwhy = vcpu->arch.sie_block->ipb >> 16;
  81. vcpu->stat.exit_validity++;
  82. trace_kvm_s390_intercept_validity(vcpu, viwhy);
  83. KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
  84. current->pid, vcpu->kvm);
  85. /* do not warn on invalid runtime instrumentation mode */
  86. WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
  87. viwhy);
  88. return -EINVAL;
  89. }
  90. static int handle_instruction(struct kvm_vcpu *vcpu)
  91. {
  92. vcpu->stat.exit_instruction++;
  93. trace_kvm_s390_intercept_instruction(vcpu,
  94. vcpu->arch.sie_block->ipa,
  95. vcpu->arch.sie_block->ipb);
  96. switch (vcpu->arch.sie_block->ipa >> 8) {
  97. case 0x01:
  98. return kvm_s390_handle_01(vcpu);
  99. case 0x82:
  100. return kvm_s390_handle_lpsw(vcpu);
  101. case 0x83:
  102. return kvm_s390_handle_diag(vcpu);
  103. case 0xaa:
  104. return kvm_s390_handle_aa(vcpu);
  105. case 0xae:
  106. return kvm_s390_handle_sigp(vcpu);
  107. case 0xb2:
  108. return kvm_s390_handle_b2(vcpu);
  109. case 0xb6:
  110. return kvm_s390_handle_stctl(vcpu);
  111. case 0xb7:
  112. return kvm_s390_handle_lctl(vcpu);
  113. case 0xb9:
  114. return kvm_s390_handle_b9(vcpu);
  115. case 0xe3:
  116. return kvm_s390_handle_e3(vcpu);
  117. case 0xe5:
  118. return kvm_s390_handle_e5(vcpu);
  119. case 0xeb:
  120. return kvm_s390_handle_eb(vcpu);
  121. default:
  122. return -EOPNOTSUPP;
  123. }
  124. }
  125. static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
  126. {
  127. struct kvm_s390_pgm_info pgm_info = {
  128. .code = vcpu->arch.sie_block->iprcc,
  129. /* the PSW has already been rewound */
  130. .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
  131. };
  132. switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
  133. case PGM_AFX_TRANSLATION:
  134. case PGM_ASX_TRANSLATION:
  135. case PGM_EX_TRANSLATION:
  136. case PGM_LFX_TRANSLATION:
  137. case PGM_LSTE_SEQUENCE:
  138. case PGM_LSX_TRANSLATION:
  139. case PGM_LX_TRANSLATION:
  140. case PGM_PRIMARY_AUTHORITY:
  141. case PGM_SECONDARY_AUTHORITY:
  142. case PGM_SPACE_SWITCH:
  143. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  144. break;
  145. case PGM_ALEN_TRANSLATION:
  146. case PGM_ALE_SEQUENCE:
  147. case PGM_ASTE_INSTANCE:
  148. case PGM_ASTE_SEQUENCE:
  149. case PGM_ASTE_VALIDITY:
  150. case PGM_EXTENDED_AUTHORITY:
  151. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  152. break;
  153. case PGM_ASCE_TYPE:
  154. case PGM_PAGE_TRANSLATION:
  155. case PGM_REGION_FIRST_TRANS:
  156. case PGM_REGION_SECOND_TRANS:
  157. case PGM_REGION_THIRD_TRANS:
  158. case PGM_SEGMENT_TRANSLATION:
  159. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  160. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  161. pgm_info.op_access_id = vcpu->arch.sie_block->oai;
  162. break;
  163. case PGM_MONITOR:
  164. pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
  165. pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
  166. break;
  167. case PGM_VECTOR_PROCESSING:
  168. case PGM_DATA:
  169. pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
  170. break;
  171. case PGM_PROTECTION:
  172. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  173. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  174. break;
  175. default:
  176. break;
  177. }
  178. if (vcpu->arch.sie_block->iprcc & PGM_PER) {
  179. pgm_info.per_code = vcpu->arch.sie_block->perc;
  180. pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
  181. pgm_info.per_address = vcpu->arch.sie_block->peraddr;
  182. pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
  183. }
  184. return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
  185. }
  186. /*
  187. * restore ITDB to program-interruption TDB in guest lowcore
  188. * and set TX abort indication if required
  189. */
  190. static int handle_itdb(struct kvm_vcpu *vcpu)
  191. {
  192. struct kvm_s390_itdb *itdb;
  193. int rc;
  194. if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
  195. return 0;
  196. if (current->thread.per_flags & PER_FLAG_NO_TE)
  197. return 0;
  198. itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
  199. rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
  200. if (rc)
  201. return rc;
  202. memset(itdb, 0, sizeof(*itdb));
  203. return 0;
  204. }
  205. #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
  206. static bool should_handle_per_event(const struct kvm_vcpu *vcpu)
  207. {
  208. if (!guestdbg_enabled(vcpu) || !per_event(vcpu))
  209. return false;
  210. if (guestdbg_sstep_enabled(vcpu) &&
  211. vcpu->arch.sie_block->iprcc != PGM_PER) {
  212. /*
  213. * __vcpu_run() will exit after delivering the concurrently
  214. * indicated condition.
  215. */
  216. return false;
  217. }
  218. return true;
  219. }
  220. static int handle_prog(struct kvm_vcpu *vcpu)
  221. {
  222. psw_t psw;
  223. int rc;
  224. vcpu->stat.exit_program_interruption++;
  225. /*
  226. * Intercept 8 indicates a loop of specification exceptions
  227. * for protected guests.
  228. */
  229. if (kvm_s390_pv_cpu_is_protected(vcpu))
  230. return -EOPNOTSUPP;
  231. if (should_handle_per_event(vcpu)) {
  232. rc = kvm_s390_handle_per_event(vcpu);
  233. if (rc)
  234. return rc;
  235. /* the interrupt might have been filtered out completely */
  236. if (vcpu->arch.sie_block->iprcc == 0)
  237. return 0;
  238. }
  239. trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
  240. if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
  241. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
  242. if (rc)
  243. return rc;
  244. /* Avoid endless loops of specification exceptions */
  245. if (!is_valid_psw(&psw))
  246. return -EOPNOTSUPP;
  247. }
  248. rc = handle_itdb(vcpu);
  249. if (rc)
  250. return rc;
  251. return inject_prog_on_prog_intercept(vcpu);
  252. }
  253. /**
  254. * handle_external_interrupt - used for external interruption interceptions
  255. * @vcpu: virtual cpu
  256. *
  257. * This interception occurs if:
  258. * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
  259. * occurred. In this case, the interrupt needs to be injected manually to
  260. * preserve interrupt priority.
  261. * - the external new PSW has external interrupts enabled, which will cause an
  262. * interruption loop. We drop to userspace in this case.
  263. *
  264. * The latter case can be detected by inspecting the external mask bit in the
  265. * external new psw.
  266. *
  267. * Under PV, only the latter case can occur, since interrupt priorities are
  268. * handled in the ultravisor.
  269. */
  270. static int handle_external_interrupt(struct kvm_vcpu *vcpu)
  271. {
  272. u16 eic = vcpu->arch.sie_block->eic;
  273. struct kvm_s390_irq irq;
  274. psw_t newpsw;
  275. int rc;
  276. vcpu->stat.exit_external_interrupt++;
  277. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  278. newpsw = vcpu->arch.sie_block->gpsw;
  279. } else {
  280. rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
  281. if (rc)
  282. return rc;
  283. }
  284. /*
  285. * Clock comparator or timer interrupt with external interrupt enabled
  286. * will cause interrupt loop. Drop to userspace.
  287. */
  288. if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
  289. (newpsw.mask & PSW_MASK_EXT))
  290. return -EOPNOTSUPP;
  291. switch (eic) {
  292. case EXT_IRQ_CLK_COMP:
  293. irq.type = KVM_S390_INT_CLOCK_COMP;
  294. break;
  295. case EXT_IRQ_CPU_TIMER:
  296. irq.type = KVM_S390_INT_CPU_TIMER;
  297. break;
  298. case EXT_IRQ_EXTERNAL_CALL:
  299. irq.type = KVM_S390_INT_EXTERNAL_CALL;
  300. irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
  301. rc = kvm_s390_inject_vcpu(vcpu, &irq);
  302. /* ignore if another external call is already pending */
  303. if (rc == -EBUSY)
  304. return 0;
  305. return rc;
  306. default:
  307. return -EOPNOTSUPP;
  308. }
  309. return kvm_s390_inject_vcpu(vcpu, &irq);
  310. }
  311. /**
  312. * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
  313. * @vcpu: virtual cpu
  314. *
  315. * This interception can only happen for guests with DAT disabled and
  316. * addresses that are currently not mapped in the host. Thus we try to
  317. * set up the mappings for the corresponding user pages here (or throw
  318. * addressing exceptions in case of illegal guest addresses).
  319. */
  320. static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
  321. {
  322. unsigned long srcaddr, dstaddr;
  323. int reg1, reg2, rc;
  324. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  325. /* Ensure that the source is paged-in, no actual access -> no key checking */
  326. rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
  327. reg2, &srcaddr, GACC_FETCH, 0);
  328. if (rc)
  329. return kvm_s390_inject_prog_cond(vcpu, rc);
  330. rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
  331. if (rc != 0)
  332. return rc;
  333. /* Ensure that the source is paged-in, no actual access -> no key checking */
  334. rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
  335. reg1, &dstaddr, GACC_STORE, 0);
  336. if (rc)
  337. return kvm_s390_inject_prog_cond(vcpu, rc);
  338. rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
  339. if (rc != 0)
  340. return rc;
  341. kvm_s390_retry_instr(vcpu);
  342. return 0;
  343. }
  344. static int handle_partial_execution(struct kvm_vcpu *vcpu)
  345. {
  346. vcpu->stat.exit_pei++;
  347. if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
  348. return handle_mvpg_pei(vcpu);
  349. if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
  350. return kvm_s390_handle_sigp_pei(vcpu);
  351. return -EOPNOTSUPP;
  352. }
  353. /*
  354. * Handle the sthyi instruction that provides the guest with system
  355. * information, like current CPU resources available at each level of
  356. * the machine.
  357. */
  358. int handle_sthyi(struct kvm_vcpu *vcpu)
  359. {
  360. int reg1, reg2, cc = 0, r = 0;
  361. u64 code, addr, rc = 0;
  362. struct sthyi_sctns *sctns = NULL;
  363. if (!test_kvm_facility(vcpu->kvm, 74))
  364. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  365. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  366. code = vcpu->run->s.regs.gprs[reg1];
  367. addr = vcpu->run->s.regs.gprs[reg2];
  368. vcpu->stat.instruction_sthyi++;
  369. VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
  370. trace_kvm_s390_handle_sthyi(vcpu, code, addr);
  371. if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
  372. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  373. if (code & 0xffff) {
  374. cc = 3;
  375. rc = 4;
  376. goto out;
  377. }
  378. if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
  379. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  380. sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
  381. if (!sctns)
  382. return -ENOMEM;
  383. cc = sthyi_fill(sctns, &rc);
  384. if (cc < 0) {
  385. free_page((unsigned long)sctns);
  386. return cc;
  387. }
  388. out:
  389. if (!cc) {
  390. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  391. memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
  392. } else {
  393. r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
  394. if (r) {
  395. free_page((unsigned long)sctns);
  396. return kvm_s390_inject_prog_cond(vcpu, r);
  397. }
  398. }
  399. }
  400. free_page((unsigned long)sctns);
  401. vcpu->run->s.regs.gprs[reg2 + 1] = rc;
  402. kvm_s390_set_psw_cc(vcpu, cc);
  403. return r;
  404. }
  405. static int handle_operexc(struct kvm_vcpu *vcpu)
  406. {
  407. psw_t oldpsw, newpsw;
  408. int rc;
  409. vcpu->stat.exit_operation_exception++;
  410. trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
  411. vcpu->arch.sie_block->ipb);
  412. if (vcpu->arch.sie_block->ipa == 0xb256)
  413. return handle_sthyi(vcpu);
  414. if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
  415. return -EOPNOTSUPP;
  416. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
  417. if (rc)
  418. return rc;
  419. /*
  420. * Avoid endless loops of operation exceptions, if the pgm new
  421. * PSW will cause a new operation exception.
  422. * The heuristic checks if the pgm new psw is within 6 bytes before
  423. * the faulting psw address (with same DAT, AS settings) and the
  424. * new psw is not a wait psw and the fault was not triggered by
  425. * problem state.
  426. */
  427. oldpsw = vcpu->arch.sie_block->gpsw;
  428. if (oldpsw.addr - newpsw.addr <= 6 &&
  429. !(newpsw.mask & PSW_MASK_WAIT) &&
  430. !(oldpsw.mask & PSW_MASK_PSTATE) &&
  431. (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
  432. (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
  433. return -EOPNOTSUPP;
  434. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  435. }
  436. static int handle_pv_spx(struct kvm_vcpu *vcpu)
  437. {
  438. u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
  439. kvm_s390_set_prefix(vcpu, pref);
  440. trace_kvm_s390_handle_prefix(vcpu, 1, pref);
  441. return 0;
  442. }
  443. static int handle_pv_sclp(struct kvm_vcpu *vcpu)
  444. {
  445. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  446. spin_lock(&fi->lock);
  447. /*
  448. * 2 cases:
  449. * a: an sccb answering interrupt was already pending or in flight.
  450. * As the sccb value is not known we can simply set some value to
  451. * trigger delivery of a saved SCCB. UV will then use its saved
  452. * copy of the SCCB value.
  453. * b: an error SCCB interrupt needs to be injected so we also inject
  454. * a fake SCCB address. Firmware will use the proper one.
  455. * This makes sure, that both errors and real sccb returns will only
  456. * be delivered after a notification intercept (instruction has
  457. * finished) but not after others.
  458. */
  459. fi->srv_signal.ext_params |= 0x43000;
  460. set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  461. clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
  462. spin_unlock(&fi->lock);
  463. return 0;
  464. }
  465. static int handle_pv_uvc(struct kvm_vcpu *vcpu)
  466. {
  467. struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
  468. struct uv_cb_cts uvcb = {
  469. .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
  470. .header.len = sizeof(uvcb),
  471. .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
  472. .gaddr = guest_uvcb->paddr,
  473. };
  474. int rc;
  475. if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
  476. WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
  477. guest_uvcb->header.cmd);
  478. return 0;
  479. }
  480. rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
  481. /*
  482. * If the unpin did not succeed, the guest will exit again for the UVC
  483. * and we will retry the unpin.
  484. */
  485. if (rc == -EINVAL)
  486. return 0;
  487. /*
  488. * If we got -EAGAIN here, we simply return it. It will eventually
  489. * get propagated all the way to userspace, which should then try
  490. * again.
  491. */
  492. return rc;
  493. }
  494. static int handle_pv_notification(struct kvm_vcpu *vcpu)
  495. {
  496. int ret;
  497. if (vcpu->arch.sie_block->ipa == 0xb210)
  498. return handle_pv_spx(vcpu);
  499. if (vcpu->arch.sie_block->ipa == 0xb220)
  500. return handle_pv_sclp(vcpu);
  501. if (vcpu->arch.sie_block->ipa == 0xb9a4)
  502. return handle_pv_uvc(vcpu);
  503. if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
  504. /*
  505. * Besides external call, other SIGP orders also cause a
  506. * 108 (pv notify) intercept. In contrast to external call,
  507. * these orders need to be emulated and hence the appropriate
  508. * place to handle them is in handle_instruction().
  509. * So first try kvm_s390_handle_sigp_pei() and if that isn't
  510. * successful, go on with handle_instruction().
  511. */
  512. ret = kvm_s390_handle_sigp_pei(vcpu);
  513. if (!ret)
  514. return ret;
  515. }
  516. return handle_instruction(vcpu);
  517. }
  518. static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc)
  519. {
  520. /* Process PER, also if the instruction is processed in user space. */
  521. if (!(vcpu->arch.sie_block->icptstatus & 0x02))
  522. return false;
  523. if (rc != 0 && rc != -EOPNOTSUPP)
  524. return false;
  525. if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs)
  526. /* __vcpu_run() will exit after delivering the interrupt. */
  527. return false;
  528. return true;
  529. }
  530. int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
  531. {
  532. int rc, per_rc = 0;
  533. if (kvm_is_ucontrol(vcpu->kvm))
  534. return -EOPNOTSUPP;
  535. switch (vcpu->arch.sie_block->icptcode) {
  536. case ICPT_EXTREQ:
  537. vcpu->stat.exit_external_request++;
  538. return 0;
  539. case ICPT_IOREQ:
  540. vcpu->stat.exit_io_request++;
  541. return 0;
  542. case ICPT_INST:
  543. rc = handle_instruction(vcpu);
  544. break;
  545. case ICPT_PROGI:
  546. return handle_prog(vcpu);
  547. case ICPT_EXTINT:
  548. return handle_external_interrupt(vcpu);
  549. case ICPT_WAIT:
  550. return kvm_s390_handle_wait(vcpu);
  551. case ICPT_VALIDITY:
  552. return handle_validity(vcpu);
  553. case ICPT_STOP:
  554. return handle_stop(vcpu);
  555. case ICPT_OPEREXC:
  556. rc = handle_operexc(vcpu);
  557. break;
  558. case ICPT_PARTEXEC:
  559. rc = handle_partial_execution(vcpu);
  560. break;
  561. case ICPT_KSS:
  562. /* Instruction will be redriven, skip the PER check. */
  563. return kvm_s390_skey_check_enable(vcpu);
  564. case ICPT_MCHKREQ:
  565. case ICPT_INT_ENABLE:
  566. /*
  567. * PSW bit 13 or a CR (0, 6, 14) changed and we might
  568. * now be able to deliver interrupts. The pre-run code
  569. * will take care of this.
  570. */
  571. rc = 0;
  572. break;
  573. case ICPT_PV_INSTR:
  574. rc = handle_instruction(vcpu);
  575. break;
  576. case ICPT_PV_NOTIFY:
  577. rc = handle_pv_notification(vcpu);
  578. break;
  579. case ICPT_PV_PREF:
  580. rc = 0;
  581. gmap_convert_to_secure(vcpu->arch.gmap,
  582. kvm_s390_get_prefix(vcpu));
  583. gmap_convert_to_secure(vcpu->arch.gmap,
  584. kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
  585. break;
  586. default:
  587. return -EOPNOTSUPP;
  588. }
  589. if (should_handle_per_ifetch(vcpu, rc))
  590. per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
  591. return per_rc ? per_rc : rc;
  592. }