emulate_loadstore.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. */
  20. #include <linux/jiffies.h>
  21. #include <linux/hrtimer.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/kvm_host.h>
  25. #include <linux/clockchips.h>
  26. #include <asm/reg.h>
  27. #include <asm/time.h>
  28. #include <asm/byteorder.h>
  29. #include <asm/kvm_ppc.h>
  30. #include <asm/disassemble.h>
  31. #include <asm/ppc-opcode.h>
  32. #include <asm/sstep.h>
  33. #include "timing.h"
  34. #include "trace.h"
  35. #ifdef CONFIG_PPC_FPU
  36. static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
  37. {
  38. if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
  39. kvmppc_core_queue_fpunavail(vcpu);
  40. return true;
  41. }
  42. return false;
  43. }
  44. #endif /* CONFIG_PPC_FPU */
  45. #ifdef CONFIG_VSX
  46. static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
  47. {
  48. if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
  49. kvmppc_core_queue_vsx_unavail(vcpu);
  50. return true;
  51. }
  52. return false;
  53. }
  54. #endif /* CONFIG_VSX */
  55. #ifdef CONFIG_ALTIVEC
  56. static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
  57. {
  58. if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
  59. kvmppc_core_queue_vec_unavail(vcpu);
  60. return true;
  61. }
  62. return false;
  63. }
  64. #endif /* CONFIG_ALTIVEC */
  65. /*
  66. * XXX to do:
  67. * lfiwax, lfiwzx
  68. * vector loads and stores
  69. *
  70. * Instructions that trap when used on cache-inhibited mappings
  71. * are not emulated here: multiple and string instructions,
  72. * lq/stq, and the load-reserve/store-conditional instructions.
  73. */
  74. int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
  75. {
  76. struct kvm_run *run = vcpu->run;
  77. u32 inst;
  78. int ra, rs, rt;
  79. enum emulation_result emulated = EMULATE_FAIL;
  80. int advance = 1;
  81. struct instruction_op op;
  82. /* this default type might be overwritten by subcategories */
  83. kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
  84. emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
  85. if (emulated != EMULATE_DONE)
  86. return emulated;
  87. ra = get_ra(inst);
  88. rs = get_rs(inst);
  89. rt = get_rt(inst);
  90. /*
  91. * if mmio_vsx_tx_sx_enabled == 0, copy data between
  92. * VSR[0..31] and memory
  93. * if mmio_vsx_tx_sx_enabled == 1, copy data between
  94. * VSR[32..63] and memory
  95. */
  96. vcpu->arch.mmio_vsx_copy_nums = 0;
  97. vcpu->arch.mmio_vsx_offset = 0;
  98. vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
  99. vcpu->arch.mmio_sp64_extend = 0;
  100. vcpu->arch.mmio_sign_extend = 0;
  101. vcpu->arch.mmio_vmx_copy_nums = 0;
  102. vcpu->arch.mmio_vmx_offset = 0;
  103. vcpu->arch.mmio_host_swabbed = 0;
  104. emulated = EMULATE_FAIL;
  105. vcpu->arch.regs.msr = vcpu->arch.shared->msr;
  106. if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
  107. int type = op.type & INSTR_TYPE_MASK;
  108. int size = GETSIZE(op.type);
  109. switch (type) {
  110. case LOAD: {
  111. int instr_byte_swap = op.type & BYTEREV;
  112. if (op.type & SIGNEXT)
  113. emulated = kvmppc_handle_loads(run, vcpu,
  114. op.reg, size, !instr_byte_swap);
  115. else
  116. emulated = kvmppc_handle_load(run, vcpu,
  117. op.reg, size, !instr_byte_swap);
  118. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  119. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  120. break;
  121. }
  122. #ifdef CONFIG_PPC_FPU
  123. case LOAD_FP:
  124. if (kvmppc_check_fp_disabled(vcpu))
  125. return EMULATE_DONE;
  126. if (op.type & FPCONV)
  127. vcpu->arch.mmio_sp64_extend = 1;
  128. if (op.type & SIGNEXT)
  129. emulated = kvmppc_handle_loads(run, vcpu,
  130. KVM_MMIO_REG_FPR|op.reg, size, 1);
  131. else
  132. emulated = kvmppc_handle_load(run, vcpu,
  133. KVM_MMIO_REG_FPR|op.reg, size, 1);
  134. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  135. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  136. break;
  137. #endif
  138. #ifdef CONFIG_ALTIVEC
  139. case LOAD_VMX:
  140. if (kvmppc_check_altivec_disabled(vcpu))
  141. return EMULATE_DONE;
  142. /* Hardware enforces alignment of VMX accesses */
  143. vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
  144. vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
  145. if (size == 16) { /* lvx */
  146. vcpu->arch.mmio_copy_type =
  147. KVMPPC_VMX_COPY_DWORD;
  148. } else if (size == 4) { /* lvewx */
  149. vcpu->arch.mmio_copy_type =
  150. KVMPPC_VMX_COPY_WORD;
  151. } else if (size == 2) { /* lvehx */
  152. vcpu->arch.mmio_copy_type =
  153. KVMPPC_VMX_COPY_HWORD;
  154. } else if (size == 1) { /* lvebx */
  155. vcpu->arch.mmio_copy_type =
  156. KVMPPC_VMX_COPY_BYTE;
  157. } else
  158. break;
  159. vcpu->arch.mmio_vmx_offset =
  160. (vcpu->arch.vaddr_accessed & 0xf)/size;
  161. if (size == 16) {
  162. vcpu->arch.mmio_vmx_copy_nums = 2;
  163. emulated = kvmppc_handle_vmx_load(run,
  164. vcpu, KVM_MMIO_REG_VMX|op.reg,
  165. 8, 1);
  166. } else {
  167. vcpu->arch.mmio_vmx_copy_nums = 1;
  168. emulated = kvmppc_handle_vmx_load(run, vcpu,
  169. KVM_MMIO_REG_VMX|op.reg,
  170. size, 1);
  171. }
  172. break;
  173. #endif
  174. #ifdef CONFIG_VSX
  175. case LOAD_VSX: {
  176. int io_size_each;
  177. if (op.vsx_flags & VSX_CHECK_VEC) {
  178. if (kvmppc_check_altivec_disabled(vcpu))
  179. return EMULATE_DONE;
  180. } else {
  181. if (kvmppc_check_vsx_disabled(vcpu))
  182. return EMULATE_DONE;
  183. }
  184. if (op.vsx_flags & VSX_FPCONV)
  185. vcpu->arch.mmio_sp64_extend = 1;
  186. if (op.element_size == 8) {
  187. if (op.vsx_flags & VSX_SPLAT)
  188. vcpu->arch.mmio_copy_type =
  189. KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
  190. else
  191. vcpu->arch.mmio_copy_type =
  192. KVMPPC_VSX_COPY_DWORD;
  193. } else if (op.element_size == 4) {
  194. if (op.vsx_flags & VSX_SPLAT)
  195. vcpu->arch.mmio_copy_type =
  196. KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
  197. else
  198. vcpu->arch.mmio_copy_type =
  199. KVMPPC_VSX_COPY_WORD;
  200. } else
  201. break;
  202. if (size < op.element_size) {
  203. /* precision convert case: lxsspx, etc */
  204. vcpu->arch.mmio_vsx_copy_nums = 1;
  205. io_size_each = size;
  206. } else { /* lxvw4x, lxvd2x, etc */
  207. vcpu->arch.mmio_vsx_copy_nums =
  208. size/op.element_size;
  209. io_size_each = op.element_size;
  210. }
  211. emulated = kvmppc_handle_vsx_load(run, vcpu,
  212. KVM_MMIO_REG_VSX|op.reg, io_size_each,
  213. 1, op.type & SIGNEXT);
  214. break;
  215. }
  216. #endif
  217. case STORE:
  218. /* if need byte reverse, op.val has been reversed by
  219. * analyse_instr().
  220. */
  221. emulated = kvmppc_handle_store(run, vcpu, op.val,
  222. size, 1);
  223. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  224. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  225. break;
  226. #ifdef CONFIG_PPC_FPU
  227. case STORE_FP:
  228. if (kvmppc_check_fp_disabled(vcpu))
  229. return EMULATE_DONE;
  230. /* The FP registers need to be flushed so that
  231. * kvmppc_handle_store() can read actual FP vals
  232. * from vcpu->arch.
  233. */
  234. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  235. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  236. MSR_FP);
  237. if (op.type & FPCONV)
  238. vcpu->arch.mmio_sp64_extend = 1;
  239. emulated = kvmppc_handle_store(run, vcpu,
  240. VCPU_FPR(vcpu, op.reg), size, 1);
  241. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  242. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  243. break;
  244. #endif
  245. #ifdef CONFIG_ALTIVEC
  246. case STORE_VMX:
  247. if (kvmppc_check_altivec_disabled(vcpu))
  248. return EMULATE_DONE;
  249. /* Hardware enforces alignment of VMX accesses. */
  250. vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
  251. vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
  252. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  253. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  254. MSR_VEC);
  255. if (size == 16) { /* stvx */
  256. vcpu->arch.mmio_copy_type =
  257. KVMPPC_VMX_COPY_DWORD;
  258. } else if (size == 4) { /* stvewx */
  259. vcpu->arch.mmio_copy_type =
  260. KVMPPC_VMX_COPY_WORD;
  261. } else if (size == 2) { /* stvehx */
  262. vcpu->arch.mmio_copy_type =
  263. KVMPPC_VMX_COPY_HWORD;
  264. } else if (size == 1) { /* stvebx */
  265. vcpu->arch.mmio_copy_type =
  266. KVMPPC_VMX_COPY_BYTE;
  267. } else
  268. break;
  269. vcpu->arch.mmio_vmx_offset =
  270. (vcpu->arch.vaddr_accessed & 0xf)/size;
  271. if (size == 16) {
  272. vcpu->arch.mmio_vmx_copy_nums = 2;
  273. emulated = kvmppc_handle_vmx_store(run,
  274. vcpu, op.reg, 8, 1);
  275. } else {
  276. vcpu->arch.mmio_vmx_copy_nums = 1;
  277. emulated = kvmppc_handle_vmx_store(run,
  278. vcpu, op.reg, size, 1);
  279. }
  280. break;
  281. #endif
  282. #ifdef CONFIG_VSX
  283. case STORE_VSX: {
  284. int io_size_each;
  285. if (op.vsx_flags & VSX_CHECK_VEC) {
  286. if (kvmppc_check_altivec_disabled(vcpu))
  287. return EMULATE_DONE;
  288. } else {
  289. if (kvmppc_check_vsx_disabled(vcpu))
  290. return EMULATE_DONE;
  291. }
  292. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  293. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  294. MSR_VSX);
  295. if (op.vsx_flags & VSX_FPCONV)
  296. vcpu->arch.mmio_sp64_extend = 1;
  297. if (op.element_size == 8)
  298. vcpu->arch.mmio_copy_type =
  299. KVMPPC_VSX_COPY_DWORD;
  300. else if (op.element_size == 4)
  301. vcpu->arch.mmio_copy_type =
  302. KVMPPC_VSX_COPY_WORD;
  303. else
  304. break;
  305. if (size < op.element_size) {
  306. /* precise conversion case, like stxsspx */
  307. vcpu->arch.mmio_vsx_copy_nums = 1;
  308. io_size_each = size;
  309. } else { /* stxvw4x, stxvd2x, etc */
  310. vcpu->arch.mmio_vsx_copy_nums =
  311. size/op.element_size;
  312. io_size_each = op.element_size;
  313. }
  314. emulated = kvmppc_handle_vsx_store(run, vcpu,
  315. op.reg, io_size_each, 1);
  316. break;
  317. }
  318. #endif
  319. case CACHEOP:
  320. /* Do nothing. The guest is performing dcbi because
  321. * hardware DMA is not snooped by the dcache, but
  322. * emulated DMA either goes through the dcache as
  323. * normal writes, or the host kernel has handled dcache
  324. * coherence.
  325. */
  326. emulated = EMULATE_DONE;
  327. break;
  328. default:
  329. break;
  330. }
  331. }
  332. if (emulated == EMULATE_FAIL) {
  333. advance = 0;
  334. kvmppc_core_queue_program(vcpu, 0);
  335. }
  336. trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
  337. /* Advance past emulated instruction. */
  338. if (advance)
  339. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
  340. return emulated;
  341. }