sstep.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Single-step support.
  4. *
  5. * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/ptrace.h>
  10. #include <linux/prefetch.h>
  11. #include <asm/sstep.h>
  12. #include <asm/processor.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/cpu_has_feature.h>
  15. #include <asm/cputable.h>
  16. #include <asm/disassemble.h>
  17. #ifdef CONFIG_PPC64
  18. /* Bits in SRR1 that are copied from MSR */
  19. #define MSR_MASK 0xffffffff87c0ffffUL
  20. #else
  21. #define MSR_MASK 0x87c0ffff
  22. #endif
  23. /* Bits in XER */
  24. #define XER_SO 0x80000000U
  25. #define XER_OV 0x40000000U
  26. #define XER_CA 0x20000000U
  27. #define XER_OV32 0x00080000U
  28. #define XER_CA32 0x00040000U
  29. #ifdef CONFIG_VSX
  30. #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
  31. #endif
  32. #ifdef CONFIG_PPC_FPU
  33. /*
  34. * Functions in ldstfp.S
  35. */
  36. extern void get_fpr(int rn, double *p);
  37. extern void put_fpr(int rn, const double *p);
  38. extern void get_vr(int rn, __vector128 *p);
  39. extern void put_vr(int rn, __vector128 *p);
  40. extern void load_vsrn(int vsr, const void *p);
  41. extern void store_vsrn(int vsr, void *p);
  42. extern void conv_sp_to_dp(const float *sp, double *dp);
  43. extern void conv_dp_to_sp(const double *dp, float *sp);
  44. #endif
  45. #ifdef __powerpc64__
  46. /*
  47. * Functions in quad.S
  48. */
  49. extern int do_lq(unsigned long ea, unsigned long *regs);
  50. extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
  51. extern int do_lqarx(unsigned long ea, unsigned long *regs);
  52. extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
  53. unsigned int *crp);
  54. #endif
  55. #ifdef __LITTLE_ENDIAN__
  56. #define IS_LE 1
  57. #define IS_BE 0
  58. #else
  59. #define IS_LE 0
  60. #define IS_BE 1
  61. #endif
  62. /*
  63. * Emulate the truncation of 64 bit values in 32-bit mode.
  64. */
  65. static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
  66. unsigned long val)
  67. {
  68. if ((msr & MSR_64BIT) == 0)
  69. val &= 0xffffffffUL;
  70. return val;
  71. }
  72. /*
  73. * Determine whether a conditional branch instruction would branch.
  74. */
  75. static nokprobe_inline int branch_taken(unsigned int instr,
  76. const struct pt_regs *regs,
  77. struct instruction_op *op)
  78. {
  79. unsigned int bo = (instr >> 21) & 0x1f;
  80. unsigned int bi;
  81. if ((bo & 4) == 0) {
  82. /* decrement counter */
  83. op->type |= DECCTR;
  84. if (((bo >> 1) & 1) ^ (regs->ctr == 1))
  85. return 0;
  86. }
  87. if ((bo & 0x10) == 0) {
  88. /* check bit from CR */
  89. bi = (instr >> 16) & 0x1f;
  90. if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
  91. return 0;
  92. }
  93. return 1;
  94. }
  95. static nokprobe_inline long address_ok(struct pt_regs *regs,
  96. unsigned long ea, int nb)
  97. {
  98. if (!user_mode(regs))
  99. return 1;
  100. if (access_ok((void __user *)ea, nb))
  101. return 1;
  102. if (access_ok((void __user *)ea, 1))
  103. /* Access overlaps the end of the user region */
  104. regs->dar = TASK_SIZE_MAX - 1;
  105. else
  106. regs->dar = ea;
  107. return 0;
  108. }
  109. /*
  110. * Calculate effective address for a D-form instruction
  111. */
  112. static nokprobe_inline unsigned long dform_ea(unsigned int instr,
  113. const struct pt_regs *regs)
  114. {
  115. int ra;
  116. unsigned long ea;
  117. ra = (instr >> 16) & 0x1f;
  118. ea = (signed short) instr; /* sign-extend */
  119. if (ra)
  120. ea += regs->gpr[ra];
  121. return ea;
  122. }
  123. #ifdef __powerpc64__
  124. /*
  125. * Calculate effective address for a DS-form instruction
  126. */
  127. static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
  128. const struct pt_regs *regs)
  129. {
  130. int ra;
  131. unsigned long ea;
  132. ra = (instr >> 16) & 0x1f;
  133. ea = (signed short) (instr & ~3); /* sign-extend */
  134. if (ra)
  135. ea += regs->gpr[ra];
  136. return ea;
  137. }
  138. /*
  139. * Calculate effective address for a DQ-form instruction
  140. */
  141. static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
  142. const struct pt_regs *regs)
  143. {
  144. int ra;
  145. unsigned long ea;
  146. ra = (instr >> 16) & 0x1f;
  147. ea = (signed short) (instr & ~0xf); /* sign-extend */
  148. if (ra)
  149. ea += regs->gpr[ra];
  150. return ea;
  151. }
  152. #endif /* __powerpc64 */
  153. /*
  154. * Calculate effective address for an X-form instruction
  155. */
  156. static nokprobe_inline unsigned long xform_ea(unsigned int instr,
  157. const struct pt_regs *regs)
  158. {
  159. int ra, rb;
  160. unsigned long ea;
  161. ra = (instr >> 16) & 0x1f;
  162. rb = (instr >> 11) & 0x1f;
  163. ea = regs->gpr[rb];
  164. if (ra)
  165. ea += regs->gpr[ra];
  166. return ea;
  167. }
  168. /*
  169. * Calculate effective address for a MLS:D-form / 8LS:D-form
  170. * prefixed instruction
  171. */
  172. static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
  173. unsigned int suffix,
  174. const struct pt_regs *regs)
  175. {
  176. int ra, prefix_r;
  177. unsigned int dd;
  178. unsigned long ea, d0, d1, d;
  179. prefix_r = GET_PREFIX_R(instr);
  180. ra = GET_PREFIX_RA(suffix);
  181. d0 = instr & 0x3ffff;
  182. d1 = suffix & 0xffff;
  183. d = (d0 << 16) | d1;
  184. /*
  185. * sign extend a 34 bit number
  186. */
  187. dd = (unsigned int)(d >> 2);
  188. ea = (signed int)dd;
  189. ea = (ea << 2) | (d & 0x3);
  190. if (!prefix_r && ra)
  191. ea += regs->gpr[ra];
  192. else if (!prefix_r && !ra)
  193. ; /* Leave ea as is */
  194. else if (prefix_r)
  195. ea += regs->nip;
  196. /*
  197. * (prefix_r && ra) is an invalid form. Should already be
  198. * checked for by caller!
  199. */
  200. return ea;
  201. }
  202. /*
  203. * Return the largest power of 2, not greater than sizeof(unsigned long),
  204. * such that x is a multiple of it.
  205. */
  206. static nokprobe_inline unsigned long max_align(unsigned long x)
  207. {
  208. x |= sizeof(unsigned long);
  209. return x & -x; /* isolates rightmost bit */
  210. }
  211. static nokprobe_inline unsigned long byterev_2(unsigned long x)
  212. {
  213. return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
  214. }
  215. static nokprobe_inline unsigned long byterev_4(unsigned long x)
  216. {
  217. return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
  218. ((x & 0xff00) << 8) | ((x & 0xff) << 24);
  219. }
  220. #ifdef __powerpc64__
  221. static nokprobe_inline unsigned long byterev_8(unsigned long x)
  222. {
  223. return (byterev_4(x) << 32) | byterev_4(x >> 32);
  224. }
  225. #endif
  226. static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
  227. {
  228. switch (nb) {
  229. case 2:
  230. *(u16 *)ptr = byterev_2(*(u16 *)ptr);
  231. break;
  232. case 4:
  233. *(u32 *)ptr = byterev_4(*(u32 *)ptr);
  234. break;
  235. #ifdef __powerpc64__
  236. case 8:
  237. *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
  238. break;
  239. case 16: {
  240. unsigned long *up = (unsigned long *)ptr;
  241. unsigned long tmp;
  242. tmp = byterev_8(up[0]);
  243. up[0] = byterev_8(up[1]);
  244. up[1] = tmp;
  245. break;
  246. }
  247. case 32: {
  248. unsigned long *up = (unsigned long *)ptr;
  249. unsigned long tmp;
  250. tmp = byterev_8(up[0]);
  251. up[0] = byterev_8(up[3]);
  252. up[3] = tmp;
  253. tmp = byterev_8(up[2]);
  254. up[2] = byterev_8(up[1]);
  255. up[1] = tmp;
  256. break;
  257. }
  258. #endif
  259. default:
  260. WARN_ON_ONCE(1);
  261. }
  262. }
  263. static __always_inline int
  264. __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
  265. {
  266. unsigned long x = 0;
  267. switch (nb) {
  268. case 1:
  269. unsafe_get_user(x, (unsigned char __user *)ea, Efault);
  270. break;
  271. case 2:
  272. unsafe_get_user(x, (unsigned short __user *)ea, Efault);
  273. break;
  274. case 4:
  275. unsafe_get_user(x, (unsigned int __user *)ea, Efault);
  276. break;
  277. #ifdef __powerpc64__
  278. case 8:
  279. unsafe_get_user(x, (unsigned long __user *)ea, Efault);
  280. break;
  281. #endif
  282. }
  283. *dest = x;
  284. return 0;
  285. Efault:
  286. regs->dar = ea;
  287. return -EFAULT;
  288. }
  289. static nokprobe_inline int
  290. read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
  291. {
  292. int err;
  293. if (is_kernel_addr(ea))
  294. return __read_mem_aligned(dest, ea, nb, regs);
  295. if (user_read_access_begin((void __user *)ea, nb)) {
  296. err = __read_mem_aligned(dest, ea, nb, regs);
  297. user_read_access_end();
  298. } else {
  299. err = -EFAULT;
  300. regs->dar = ea;
  301. }
  302. return err;
  303. }
  304. /*
  305. * Copy from userspace to a buffer, using the largest possible
  306. * aligned accesses, up to sizeof(long).
  307. */
  308. static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  309. {
  310. int c;
  311. for (; nb > 0; nb -= c) {
  312. c = max_align(ea);
  313. if (c > nb)
  314. c = max_align(nb);
  315. switch (c) {
  316. case 1:
  317. unsafe_get_user(*dest, (u8 __user *)ea, Efault);
  318. break;
  319. case 2:
  320. unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
  321. break;
  322. case 4:
  323. unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
  324. break;
  325. #ifdef __powerpc64__
  326. case 8:
  327. unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
  328. break;
  329. #endif
  330. }
  331. dest += c;
  332. ea += c;
  333. }
  334. return 0;
  335. Efault:
  336. regs->dar = ea;
  337. return -EFAULT;
  338. }
  339. static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  340. {
  341. int err;
  342. if (is_kernel_addr(ea))
  343. return __copy_mem_in(dest, ea, nb, regs);
  344. if (user_read_access_begin((void __user *)ea, nb)) {
  345. err = __copy_mem_in(dest, ea, nb, regs);
  346. user_read_access_end();
  347. } else {
  348. err = -EFAULT;
  349. regs->dar = ea;
  350. }
  351. return err;
  352. }
  353. static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
  354. unsigned long ea, int nb,
  355. struct pt_regs *regs)
  356. {
  357. union {
  358. unsigned long ul;
  359. u8 b[sizeof(unsigned long)];
  360. } u;
  361. int i;
  362. int err;
  363. u.ul = 0;
  364. i = IS_BE ? sizeof(unsigned long) - nb : 0;
  365. err = copy_mem_in(&u.b[i], ea, nb, regs);
  366. if (!err)
  367. *dest = u.ul;
  368. return err;
  369. }
  370. /*
  371. * Read memory at address ea for nb bytes, return 0 for success
  372. * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
  373. * If nb < sizeof(long), the result is right-justified on BE systems.
  374. */
  375. static int read_mem(unsigned long *dest, unsigned long ea, int nb,
  376. struct pt_regs *regs)
  377. {
  378. if (!address_ok(regs, ea, nb))
  379. return -EFAULT;
  380. if ((ea & (nb - 1)) == 0)
  381. return read_mem_aligned(dest, ea, nb, regs);
  382. return read_mem_unaligned(dest, ea, nb, regs);
  383. }
  384. NOKPROBE_SYMBOL(read_mem);
  385. static __always_inline int
  386. __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
  387. {
  388. switch (nb) {
  389. case 1:
  390. unsafe_put_user(val, (unsigned char __user *)ea, Efault);
  391. break;
  392. case 2:
  393. unsafe_put_user(val, (unsigned short __user *)ea, Efault);
  394. break;
  395. case 4:
  396. unsafe_put_user(val, (unsigned int __user *)ea, Efault);
  397. break;
  398. #ifdef __powerpc64__
  399. case 8:
  400. unsafe_put_user(val, (unsigned long __user *)ea, Efault);
  401. break;
  402. #endif
  403. }
  404. return 0;
  405. Efault:
  406. regs->dar = ea;
  407. return -EFAULT;
  408. }
  409. static nokprobe_inline int
  410. write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
  411. {
  412. int err;
  413. if (is_kernel_addr(ea))
  414. return __write_mem_aligned(val, ea, nb, regs);
  415. if (user_write_access_begin((void __user *)ea, nb)) {
  416. err = __write_mem_aligned(val, ea, nb, regs);
  417. user_write_access_end();
  418. } else {
  419. err = -EFAULT;
  420. regs->dar = ea;
  421. }
  422. return err;
  423. }
  424. /*
  425. * Copy from a buffer to userspace, using the largest possible
  426. * aligned accesses, up to sizeof(long).
  427. */
  428. static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  429. {
  430. int c;
  431. for (; nb > 0; nb -= c) {
  432. c = max_align(ea);
  433. if (c > nb)
  434. c = max_align(nb);
  435. switch (c) {
  436. case 1:
  437. unsafe_put_user(*dest, (u8 __user *)ea, Efault);
  438. break;
  439. case 2:
  440. unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
  441. break;
  442. case 4:
  443. unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
  444. break;
  445. #ifdef __powerpc64__
  446. case 8:
  447. unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
  448. break;
  449. #endif
  450. }
  451. dest += c;
  452. ea += c;
  453. }
  454. return 0;
  455. Efault:
  456. regs->dar = ea;
  457. return -EFAULT;
  458. }
  459. static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  460. {
  461. int err;
  462. if (is_kernel_addr(ea))
  463. return __copy_mem_out(dest, ea, nb, regs);
  464. if (user_write_access_begin((void __user *)ea, nb)) {
  465. err = __copy_mem_out(dest, ea, nb, regs);
  466. user_write_access_end();
  467. } else {
  468. err = -EFAULT;
  469. regs->dar = ea;
  470. }
  471. return err;
  472. }
  473. static nokprobe_inline int write_mem_unaligned(unsigned long val,
  474. unsigned long ea, int nb,
  475. struct pt_regs *regs)
  476. {
  477. union {
  478. unsigned long ul;
  479. u8 b[sizeof(unsigned long)];
  480. } u;
  481. int i;
  482. u.ul = val;
  483. i = IS_BE ? sizeof(unsigned long) - nb : 0;
  484. return copy_mem_out(&u.b[i], ea, nb, regs);
  485. }
  486. /*
  487. * Write memory at address ea for nb bytes, return 0 for success
  488. * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
  489. */
  490. static int write_mem(unsigned long val, unsigned long ea, int nb,
  491. struct pt_regs *regs)
  492. {
  493. if (!address_ok(regs, ea, nb))
  494. return -EFAULT;
  495. if ((ea & (nb - 1)) == 0)
  496. return write_mem_aligned(val, ea, nb, regs);
  497. return write_mem_unaligned(val, ea, nb, regs);
  498. }
  499. NOKPROBE_SYMBOL(write_mem);
  500. #ifdef CONFIG_PPC_FPU
  501. /*
  502. * These access either the real FP register or the image in the
  503. * thread_struct, depending on regs->msr & MSR_FP.
  504. */
  505. static int do_fp_load(struct instruction_op *op, unsigned long ea,
  506. struct pt_regs *regs, bool cross_endian)
  507. {
  508. int err, rn, nb;
  509. union {
  510. int i;
  511. unsigned int u;
  512. float f;
  513. double d[2];
  514. unsigned long l[2];
  515. u8 b[2 * sizeof(double)];
  516. } u;
  517. nb = GETSIZE(op->type);
  518. if (nb > sizeof(u))
  519. return -EINVAL;
  520. if (!address_ok(regs, ea, nb))
  521. return -EFAULT;
  522. rn = op->reg;
  523. err = copy_mem_in(u.b, ea, nb, regs);
  524. if (err)
  525. return err;
  526. if (unlikely(cross_endian)) {
  527. do_byte_reverse(u.b, min(nb, 8));
  528. if (nb == 16)
  529. do_byte_reverse(&u.b[8], 8);
  530. }
  531. preempt_disable();
  532. if (nb == 4) {
  533. if (op->type & FPCONV)
  534. conv_sp_to_dp(&u.f, &u.d[0]);
  535. else if (op->type & SIGNEXT)
  536. u.l[0] = u.i;
  537. else
  538. u.l[0] = u.u;
  539. }
  540. if (regs->msr & MSR_FP)
  541. put_fpr(rn, &u.d[0]);
  542. else
  543. current->thread.TS_FPR(rn) = u.l[0];
  544. if (nb == 16) {
  545. /* lfdp */
  546. rn |= 1;
  547. if (regs->msr & MSR_FP)
  548. put_fpr(rn, &u.d[1]);
  549. else
  550. current->thread.TS_FPR(rn) = u.l[1];
  551. }
  552. preempt_enable();
  553. return 0;
  554. }
  555. NOKPROBE_SYMBOL(do_fp_load);
  556. static int do_fp_store(struct instruction_op *op, unsigned long ea,
  557. struct pt_regs *regs, bool cross_endian)
  558. {
  559. int rn, nb;
  560. union {
  561. unsigned int u;
  562. float f;
  563. double d[2];
  564. unsigned long l[2];
  565. u8 b[2 * sizeof(double)];
  566. } u;
  567. nb = GETSIZE(op->type);
  568. if (nb > sizeof(u))
  569. return -EINVAL;
  570. if (!address_ok(regs, ea, nb))
  571. return -EFAULT;
  572. rn = op->reg;
  573. preempt_disable();
  574. if (regs->msr & MSR_FP)
  575. get_fpr(rn, &u.d[0]);
  576. else
  577. u.l[0] = current->thread.TS_FPR(rn);
  578. if (nb == 4) {
  579. if (op->type & FPCONV)
  580. conv_dp_to_sp(&u.d[0], &u.f);
  581. else
  582. u.u = u.l[0];
  583. }
  584. if (nb == 16) {
  585. rn |= 1;
  586. if (regs->msr & MSR_FP)
  587. get_fpr(rn, &u.d[1]);
  588. else
  589. u.l[1] = current->thread.TS_FPR(rn);
  590. }
  591. preempt_enable();
  592. if (unlikely(cross_endian)) {
  593. do_byte_reverse(u.b, min(nb, 8));
  594. if (nb == 16)
  595. do_byte_reverse(&u.b[8], 8);
  596. }
  597. return copy_mem_out(u.b, ea, nb, regs);
  598. }
  599. NOKPROBE_SYMBOL(do_fp_store);
  600. #endif
  601. #ifdef CONFIG_ALTIVEC
  602. /* For Altivec/VMX, no need to worry about alignment */
  603. static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
  604. int size, struct pt_regs *regs,
  605. bool cross_endian)
  606. {
  607. int err;
  608. union {
  609. __vector128 v;
  610. u8 b[sizeof(__vector128)];
  611. } u = {};
  612. if (size > sizeof(u))
  613. return -EINVAL;
  614. if (!address_ok(regs, ea & ~0xfUL, 16))
  615. return -EFAULT;
  616. /* align to multiple of size */
  617. ea &= ~(size - 1);
  618. err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
  619. if (err)
  620. return err;
  621. if (unlikely(cross_endian))
  622. do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
  623. preempt_disable();
  624. if (regs->msr & MSR_VEC)
  625. put_vr(rn, &u.v);
  626. else
  627. current->thread.vr_state.vr[rn] = u.v;
  628. preempt_enable();
  629. return 0;
  630. }
  631. static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
  632. int size, struct pt_regs *regs,
  633. bool cross_endian)
  634. {
  635. union {
  636. __vector128 v;
  637. u8 b[sizeof(__vector128)];
  638. } u;
  639. if (size > sizeof(u))
  640. return -EINVAL;
  641. if (!address_ok(regs, ea & ~0xfUL, 16))
  642. return -EFAULT;
  643. /* align to multiple of size */
  644. ea &= ~(size - 1);
  645. preempt_disable();
  646. if (regs->msr & MSR_VEC)
  647. get_vr(rn, &u.v);
  648. else
  649. u.v = current->thread.vr_state.vr[rn];
  650. preempt_enable();
  651. if (unlikely(cross_endian))
  652. do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
  653. return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
  654. }
  655. #endif /* CONFIG_ALTIVEC */
  656. #ifdef __powerpc64__
  657. static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
  658. int reg, bool cross_endian)
  659. {
  660. int err;
  661. if (!address_ok(regs, ea, 16))
  662. return -EFAULT;
  663. /* if aligned, should be atomic */
  664. if ((ea & 0xf) == 0) {
  665. err = do_lq(ea, &regs->gpr[reg]);
  666. } else {
  667. err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
  668. if (!err)
  669. err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
  670. }
  671. if (!err && unlikely(cross_endian))
  672. do_byte_reverse(&regs->gpr[reg], 16);
  673. return err;
  674. }
  675. static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
  676. int reg, bool cross_endian)
  677. {
  678. int err;
  679. unsigned long vals[2];
  680. if (!address_ok(regs, ea, 16))
  681. return -EFAULT;
  682. vals[0] = regs->gpr[reg];
  683. vals[1] = regs->gpr[reg + 1];
  684. if (unlikely(cross_endian))
  685. do_byte_reverse(vals, 16);
  686. /* if aligned, should be atomic */
  687. if ((ea & 0xf) == 0)
  688. return do_stq(ea, vals[0], vals[1]);
  689. err = write_mem(vals[IS_LE], ea, 8, regs);
  690. if (!err)
  691. err = write_mem(vals[IS_BE], ea + 8, 8, regs);
  692. return err;
  693. }
  694. #endif /* __powerpc64 */
  695. #ifdef CONFIG_VSX
  696. static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
  697. const void *mem, bool rev)
  698. {
  699. int size, read_size;
  700. int i, j;
  701. const unsigned int *wp;
  702. const unsigned short *hp;
  703. const unsigned char *bp;
  704. size = GETSIZE(op->type);
  705. reg->d[0] = reg->d[1] = 0;
  706. switch (op->element_size) {
  707. case 32:
  708. /* [p]lxvp[x] */
  709. case 16:
  710. /* whole vector; lxv[x] or lxvl[l] */
  711. if (size == 0)
  712. break;
  713. memcpy(reg, mem, size);
  714. if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
  715. rev = !rev;
  716. if (rev)
  717. do_byte_reverse(reg, size);
  718. break;
  719. case 8:
  720. /* scalar loads, lxvd2x, lxvdsx */
  721. read_size = (size >= 8) ? 8 : size;
  722. i = IS_LE ? 8 : 8 - read_size;
  723. memcpy(&reg->b[i], mem, read_size);
  724. if (rev)
  725. do_byte_reverse(&reg->b[i], 8);
  726. if (size < 8) {
  727. if (op->type & SIGNEXT) {
  728. /* size == 4 is the only case here */
  729. reg->d[IS_LE] = (signed int) reg->d[IS_LE];
  730. } else if (op->vsx_flags & VSX_FPCONV) {
  731. preempt_disable();
  732. conv_sp_to_dp(&reg->fp[1 + IS_LE],
  733. &reg->dp[IS_LE]);
  734. preempt_enable();
  735. }
  736. } else {
  737. if (size == 16) {
  738. unsigned long v = *(unsigned long *)(mem + 8);
  739. reg->d[IS_BE] = !rev ? v : byterev_8(v);
  740. } else if (op->vsx_flags & VSX_SPLAT)
  741. reg->d[IS_BE] = reg->d[IS_LE];
  742. }
  743. break;
  744. case 4:
  745. /* lxvw4x, lxvwsx */
  746. wp = mem;
  747. for (j = 0; j < size / 4; ++j) {
  748. i = IS_LE ? 3 - j : j;
  749. reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
  750. }
  751. if (op->vsx_flags & VSX_SPLAT) {
  752. u32 val = reg->w[IS_LE ? 3 : 0];
  753. for (; j < 4; ++j) {
  754. i = IS_LE ? 3 - j : j;
  755. reg->w[i] = val;
  756. }
  757. }
  758. break;
  759. case 2:
  760. /* lxvh8x */
  761. hp = mem;
  762. for (j = 0; j < size / 2; ++j) {
  763. i = IS_LE ? 7 - j : j;
  764. reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
  765. }
  766. break;
  767. case 1:
  768. /* lxvb16x */
  769. bp = mem;
  770. for (j = 0; j < size; ++j) {
  771. i = IS_LE ? 15 - j : j;
  772. reg->b[i] = *bp++;
  773. }
  774. break;
  775. }
  776. }
  777. static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
  778. void *mem, bool rev)
  779. {
  780. int size, write_size;
  781. int i, j;
  782. union vsx_reg buf;
  783. unsigned int *wp;
  784. unsigned short *hp;
  785. unsigned char *bp;
  786. size = GETSIZE(op->type);
  787. switch (op->element_size) {
  788. case 32:
  789. /* [p]stxvp[x] */
  790. if (size == 0)
  791. break;
  792. if (rev) {
  793. /* reverse 32 bytes */
  794. union vsx_reg buf32[2];
  795. buf32[0].d[0] = byterev_8(reg[1].d[1]);
  796. buf32[0].d[1] = byterev_8(reg[1].d[0]);
  797. buf32[1].d[0] = byterev_8(reg[0].d[1]);
  798. buf32[1].d[1] = byterev_8(reg[0].d[0]);
  799. memcpy(mem, buf32, size);
  800. } else {
  801. memcpy(mem, reg, size);
  802. }
  803. break;
  804. case 16:
  805. /* stxv, stxvx, stxvl, stxvll */
  806. if (size == 0)
  807. break;
  808. if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
  809. rev = !rev;
  810. if (rev) {
  811. /* reverse 16 bytes */
  812. buf.d[0] = byterev_8(reg->d[1]);
  813. buf.d[1] = byterev_8(reg->d[0]);
  814. reg = &buf;
  815. }
  816. memcpy(mem, reg, size);
  817. break;
  818. case 8:
  819. /* scalar stores, stxvd2x */
  820. write_size = (size >= 8) ? 8 : size;
  821. i = IS_LE ? 8 : 8 - write_size;
  822. if (size < 8 && op->vsx_flags & VSX_FPCONV) {
  823. buf.d[0] = buf.d[1] = 0;
  824. preempt_disable();
  825. conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
  826. preempt_enable();
  827. reg = &buf;
  828. }
  829. memcpy(mem, &reg->b[i], write_size);
  830. if (size == 16)
  831. memcpy(mem + 8, &reg->d[IS_BE], 8);
  832. if (unlikely(rev)) {
  833. do_byte_reverse(mem, write_size);
  834. if (size == 16)
  835. do_byte_reverse(mem + 8, 8);
  836. }
  837. break;
  838. case 4:
  839. /* stxvw4x */
  840. wp = mem;
  841. for (j = 0; j < size / 4; ++j) {
  842. i = IS_LE ? 3 - j : j;
  843. *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
  844. }
  845. break;
  846. case 2:
  847. /* stxvh8x */
  848. hp = mem;
  849. for (j = 0; j < size / 2; ++j) {
  850. i = IS_LE ? 7 - j : j;
  851. *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
  852. }
  853. break;
  854. case 1:
  855. /* stvxb16x */
  856. bp = mem;
  857. for (j = 0; j < size; ++j) {
  858. i = IS_LE ? 15 - j : j;
  859. *bp++ = reg->b[i];
  860. }
  861. break;
  862. }
  863. }
  864. static nokprobe_inline int do_vsx_load(struct instruction_op *op,
  865. unsigned long ea, struct pt_regs *regs,
  866. bool cross_endian)
  867. {
  868. int reg = op->reg;
  869. int i, j, nr_vsx_regs;
  870. u8 mem[32];
  871. union vsx_reg buf[2];
  872. int size = GETSIZE(op->type);
  873. if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
  874. return -EFAULT;
  875. nr_vsx_regs = max(1ul, size / sizeof(__vector128));
  876. emulate_vsx_load(op, buf, mem, cross_endian);
  877. preempt_disable();
  878. if (reg < 32) {
  879. /* FP regs + extensions */
  880. if (regs->msr & MSR_FP) {
  881. for (i = 0; i < nr_vsx_regs; i++) {
  882. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  883. load_vsrn(reg + i, &buf[j].v);
  884. }
  885. } else {
  886. for (i = 0; i < nr_vsx_regs; i++) {
  887. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  888. current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
  889. current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
  890. }
  891. }
  892. } else {
  893. if (regs->msr & MSR_VEC) {
  894. for (i = 0; i < nr_vsx_regs; i++) {
  895. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  896. load_vsrn(reg + i, &buf[j].v);
  897. }
  898. } else {
  899. for (i = 0; i < nr_vsx_regs; i++) {
  900. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  901. current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
  902. }
  903. }
  904. }
  905. preempt_enable();
  906. return 0;
  907. }
  908. static nokprobe_inline int do_vsx_store(struct instruction_op *op,
  909. unsigned long ea, struct pt_regs *regs,
  910. bool cross_endian)
  911. {
  912. int reg = op->reg;
  913. int i, j, nr_vsx_regs;
  914. u8 mem[32];
  915. union vsx_reg buf[2];
  916. int size = GETSIZE(op->type);
  917. if (!address_ok(regs, ea, size))
  918. return -EFAULT;
  919. nr_vsx_regs = max(1ul, size / sizeof(__vector128));
  920. preempt_disable();
  921. if (reg < 32) {
  922. /* FP regs + extensions */
  923. if (regs->msr & MSR_FP) {
  924. for (i = 0; i < nr_vsx_regs; i++) {
  925. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  926. store_vsrn(reg + i, &buf[j].v);
  927. }
  928. } else {
  929. for (i = 0; i < nr_vsx_regs; i++) {
  930. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  931. buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
  932. buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
  933. }
  934. }
  935. } else {
  936. if (regs->msr & MSR_VEC) {
  937. for (i = 0; i < nr_vsx_regs; i++) {
  938. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  939. store_vsrn(reg + i, &buf[j].v);
  940. }
  941. } else {
  942. for (i = 0; i < nr_vsx_regs; i++) {
  943. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  944. buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
  945. }
  946. }
  947. }
  948. preempt_enable();
  949. emulate_vsx_store(op, buf, mem, cross_endian);
  950. return copy_mem_out(mem, ea, size, regs);
  951. }
  952. #endif /* CONFIG_VSX */
  953. static __always_inline int __emulate_dcbz(unsigned long ea)
  954. {
  955. unsigned long i;
  956. unsigned long size = l1_dcache_bytes();
  957. for (i = 0; i < size; i += sizeof(long))
  958. unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
  959. return 0;
  960. Efault:
  961. return -EFAULT;
  962. }
  963. int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
  964. {
  965. int err;
  966. unsigned long size = l1_dcache_bytes();
  967. ea = truncate_if_32bit(regs->msr, ea);
  968. ea &= ~(size - 1);
  969. if (!address_ok(regs, ea, size))
  970. return -EFAULT;
  971. if (is_kernel_addr(ea)) {
  972. err = __emulate_dcbz(ea);
  973. } else if (user_write_access_begin((void __user *)ea, size)) {
  974. err = __emulate_dcbz(ea);
  975. user_write_access_end();
  976. } else {
  977. err = -EFAULT;
  978. }
  979. if (err)
  980. regs->dar = ea;
  981. return err;
  982. }
  983. NOKPROBE_SYMBOL(emulate_dcbz);
  984. #define __put_user_asmx(x, addr, err, op, cr) \
  985. __asm__ __volatile__( \
  986. ".machine push\n" \
  987. ".machine power8\n" \
  988. "1: " op " %2,0,%3\n" \
  989. ".machine pop\n" \
  990. " mfcr %1\n" \
  991. "2:\n" \
  992. ".section .fixup,\"ax\"\n" \
  993. "3: li %0,%4\n" \
  994. " b 2b\n" \
  995. ".previous\n" \
  996. EX_TABLE(1b, 3b) \
  997. : "=r" (err), "=r" (cr) \
  998. : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
  999. #define __get_user_asmx(x, addr, err, op) \
  1000. __asm__ __volatile__( \
  1001. ".machine push\n" \
  1002. ".machine power8\n" \
  1003. "1: "op" %1,0,%2\n" \
  1004. ".machine pop\n" \
  1005. "2:\n" \
  1006. ".section .fixup,\"ax\"\n" \
  1007. "3: li %0,%3\n" \
  1008. " b 2b\n" \
  1009. ".previous\n" \
  1010. EX_TABLE(1b, 3b) \
  1011. : "=r" (err), "=r" (x) \
  1012. : "r" (addr), "i" (-EFAULT), "0" (err))
  1013. #define __cacheop_user_asmx(addr, err, op) \
  1014. __asm__ __volatile__( \
  1015. "1: "op" 0,%1\n" \
  1016. "2:\n" \
  1017. ".section .fixup,\"ax\"\n" \
  1018. "3: li %0,%3\n" \
  1019. " b 2b\n" \
  1020. ".previous\n" \
  1021. EX_TABLE(1b, 3b) \
  1022. : "=r" (err) \
  1023. : "r" (addr), "i" (-EFAULT), "0" (err))
  1024. static nokprobe_inline void set_cr0(const struct pt_regs *regs,
  1025. struct instruction_op *op)
  1026. {
  1027. long val = op->val;
  1028. op->type |= SETCC;
  1029. op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
  1030. if (!(regs->msr & MSR_64BIT))
  1031. val = (int) val;
  1032. if (val < 0)
  1033. op->ccval |= 0x80000000;
  1034. else if (val > 0)
  1035. op->ccval |= 0x40000000;
  1036. else
  1037. op->ccval |= 0x20000000;
  1038. }
  1039. static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
  1040. {
  1041. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  1042. if (val)
  1043. op->xerval |= XER_CA32;
  1044. else
  1045. op->xerval &= ~XER_CA32;
  1046. }
  1047. }
  1048. static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
  1049. struct instruction_op *op, int rd,
  1050. unsigned long val1, unsigned long val2,
  1051. unsigned long carry_in)
  1052. {
  1053. unsigned long val = val1 + val2;
  1054. if (carry_in)
  1055. ++val;
  1056. op->type = COMPUTE | SETREG | SETXER;
  1057. op->reg = rd;
  1058. op->val = val;
  1059. val = truncate_if_32bit(regs->msr, val);
  1060. val1 = truncate_if_32bit(regs->msr, val1);
  1061. op->xerval = regs->xer;
  1062. if (val < val1 || (carry_in && val == val1))
  1063. op->xerval |= XER_CA;
  1064. else
  1065. op->xerval &= ~XER_CA;
  1066. set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
  1067. (carry_in && (unsigned int)val == (unsigned int)val1));
  1068. }
  1069. static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
  1070. struct instruction_op *op,
  1071. long v1, long v2, int crfld)
  1072. {
  1073. unsigned int crval, shift;
  1074. op->type = COMPUTE | SETCC;
  1075. crval = (regs->xer >> 31) & 1; /* get SO bit */
  1076. if (v1 < v2)
  1077. crval |= 8;
  1078. else if (v1 > v2)
  1079. crval |= 4;
  1080. else
  1081. crval |= 2;
  1082. shift = (7 - crfld) * 4;
  1083. op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  1084. }
  1085. static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
  1086. struct instruction_op *op,
  1087. unsigned long v1,
  1088. unsigned long v2, int crfld)
  1089. {
  1090. unsigned int crval, shift;
  1091. op->type = COMPUTE | SETCC;
  1092. crval = (regs->xer >> 31) & 1; /* get SO bit */
  1093. if (v1 < v2)
  1094. crval |= 8;
  1095. else if (v1 > v2)
  1096. crval |= 4;
  1097. else
  1098. crval |= 2;
  1099. shift = (7 - crfld) * 4;
  1100. op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  1101. }
  1102. static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
  1103. struct instruction_op *op,
  1104. unsigned long v1, unsigned long v2)
  1105. {
  1106. unsigned long long out_val, mask;
  1107. int i;
  1108. out_val = 0;
  1109. for (i = 0; i < 8; i++) {
  1110. mask = 0xffUL << (i * 8);
  1111. if ((v1 & mask) == (v2 & mask))
  1112. out_val |= mask;
  1113. }
  1114. op->val = out_val;
  1115. }
  1116. /*
  1117. * The size parameter is used to adjust the equivalent popcnt instruction.
  1118. * popcntb = 8, popcntw = 32, popcntd = 64
  1119. */
  1120. static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
  1121. struct instruction_op *op,
  1122. unsigned long v1, int size)
  1123. {
  1124. unsigned long long out = v1;
  1125. out -= (out >> 1) & 0x5555555555555555ULL;
  1126. out = (0x3333333333333333ULL & out) +
  1127. (0x3333333333333333ULL & (out >> 2));
  1128. out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
  1129. if (size == 8) { /* popcntb */
  1130. op->val = out;
  1131. return;
  1132. }
  1133. out += out >> 8;
  1134. out += out >> 16;
  1135. if (size == 32) { /* popcntw */
  1136. op->val = out & 0x0000003f0000003fULL;
  1137. return;
  1138. }
  1139. out = (out + (out >> 32)) & 0x7f;
  1140. op->val = out; /* popcntd */
  1141. }
  1142. #ifdef CONFIG_PPC64
  1143. static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
  1144. struct instruction_op *op,
  1145. unsigned long v1, unsigned long v2)
  1146. {
  1147. unsigned char perm, idx;
  1148. unsigned int i;
  1149. perm = 0;
  1150. for (i = 0; i < 8; i++) {
  1151. idx = (v1 >> (i * 8)) & 0xff;
  1152. if (idx < 64)
  1153. if (v2 & PPC_BIT(idx))
  1154. perm |= 1 << i;
  1155. }
  1156. op->val = perm;
  1157. }
  1158. #endif /* CONFIG_PPC64 */
  1159. /*
  1160. * The size parameter adjusts the equivalent prty instruction.
  1161. * prtyw = 32, prtyd = 64
  1162. */
  1163. static nokprobe_inline void do_prty(const struct pt_regs *regs,
  1164. struct instruction_op *op,
  1165. unsigned long v, int size)
  1166. {
  1167. unsigned long long res = v ^ (v >> 8);
  1168. res ^= res >> 16;
  1169. if (size == 32) { /* prtyw */
  1170. op->val = res & 0x0000000100000001ULL;
  1171. return;
  1172. }
  1173. res ^= res >> 32;
  1174. op->val = res & 1; /*prtyd */
  1175. }
  1176. static nokprobe_inline int trap_compare(long v1, long v2)
  1177. {
  1178. int ret = 0;
  1179. if (v1 < v2)
  1180. ret |= 0x10;
  1181. else if (v1 > v2)
  1182. ret |= 0x08;
  1183. else
  1184. ret |= 0x04;
  1185. if ((unsigned long)v1 < (unsigned long)v2)
  1186. ret |= 0x02;
  1187. else if ((unsigned long)v1 > (unsigned long)v2)
  1188. ret |= 0x01;
  1189. return ret;
  1190. }
  1191. /*
  1192. * Elements of 32-bit rotate and mask instructions.
  1193. */
  1194. #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
  1195. ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
  1196. #ifdef __powerpc64__
  1197. #define MASK64_L(mb) (~0UL >> (mb))
  1198. #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
  1199. #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
  1200. #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
  1201. #else
  1202. #define DATA32(x) (x)
  1203. #endif
  1204. #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
  1205. /*
  1206. * Decode an instruction, and return information about it in *op
  1207. * without changing *regs.
  1208. * Integer arithmetic and logical instructions, branches, and barrier
  1209. * instructions can be emulated just using the information in *op.
  1210. *
  1211. * Return value is 1 if the instruction can be emulated just by
  1212. * updating *regs with the information in *op, -1 if we need the
  1213. * GPRs but *regs doesn't contain the full register set, or 0
  1214. * otherwise.
  1215. */
  1216. int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
  1217. ppc_inst_t instr)
  1218. {
  1219. #ifdef CONFIG_PPC64
  1220. unsigned int suffixopcode, prefixtype, prefix_r;
  1221. #endif
  1222. unsigned int opcode, ra, rb, rc, rd, spr, u;
  1223. unsigned long int imm;
  1224. unsigned long int val, val2;
  1225. unsigned int mb, me, sh;
  1226. unsigned int word, suffix;
  1227. long ival;
  1228. word = ppc_inst_val(instr);
  1229. suffix = ppc_inst_suffix(instr);
  1230. op->type = COMPUTE;
  1231. opcode = ppc_inst_primary_opcode(instr);
  1232. switch (opcode) {
  1233. case 16: /* bc */
  1234. op->type = BRANCH;
  1235. imm = (signed short)(word & 0xfffc);
  1236. if ((word & 2) == 0)
  1237. imm += regs->nip;
  1238. op->val = truncate_if_32bit(regs->msr, imm);
  1239. if (word & 1)
  1240. op->type |= SETLK;
  1241. if (branch_taken(word, regs, op))
  1242. op->type |= BRTAKEN;
  1243. return 1;
  1244. case 17: /* sc */
  1245. if ((word & 0xfe2) == 2)
  1246. op->type = SYSCALL;
  1247. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
  1248. (word & 0xfe3) == 1) { /* scv */
  1249. op->type = SYSCALL_VECTORED_0;
  1250. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1251. goto unknown_opcode;
  1252. } else
  1253. op->type = UNKNOWN;
  1254. return 0;
  1255. case 18: /* b */
  1256. op->type = BRANCH | BRTAKEN;
  1257. imm = word & 0x03fffffc;
  1258. if (imm & 0x02000000)
  1259. imm -= 0x04000000;
  1260. if ((word & 2) == 0)
  1261. imm += regs->nip;
  1262. op->val = truncate_if_32bit(regs->msr, imm);
  1263. if (word & 1)
  1264. op->type |= SETLK;
  1265. return 1;
  1266. case 19:
  1267. switch ((word >> 1) & 0x3ff) {
  1268. case 0: /* mcrf */
  1269. op->type = COMPUTE + SETCC;
  1270. rd = 7 - ((word >> 23) & 0x7);
  1271. ra = 7 - ((word >> 18) & 0x7);
  1272. rd *= 4;
  1273. ra *= 4;
  1274. val = (regs->ccr >> ra) & 0xf;
  1275. op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
  1276. return 1;
  1277. case 16: /* bclr */
  1278. case 528: /* bcctr */
  1279. op->type = BRANCH;
  1280. imm = (word & 0x400)? regs->ctr: regs->link;
  1281. op->val = truncate_if_32bit(regs->msr, imm);
  1282. if (word & 1)
  1283. op->type |= SETLK;
  1284. if (branch_taken(word, regs, op))
  1285. op->type |= BRTAKEN;
  1286. return 1;
  1287. case 18: /* rfid, scary */
  1288. if (user_mode(regs))
  1289. goto priv;
  1290. op->type = RFI;
  1291. return 0;
  1292. case 150: /* isync */
  1293. op->type = BARRIER | BARRIER_ISYNC;
  1294. return 1;
  1295. case 33: /* crnor */
  1296. case 129: /* crandc */
  1297. case 193: /* crxor */
  1298. case 225: /* crnand */
  1299. case 257: /* crand */
  1300. case 289: /* creqv */
  1301. case 417: /* crorc */
  1302. case 449: /* cror */
  1303. op->type = COMPUTE + SETCC;
  1304. ra = (word >> 16) & 0x1f;
  1305. rb = (word >> 11) & 0x1f;
  1306. rd = (word >> 21) & 0x1f;
  1307. ra = (regs->ccr >> (31 - ra)) & 1;
  1308. rb = (regs->ccr >> (31 - rb)) & 1;
  1309. val = (word >> (6 + ra * 2 + rb)) & 1;
  1310. op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
  1311. (val << (31 - rd));
  1312. return 1;
  1313. }
  1314. break;
  1315. case 31:
  1316. switch ((word >> 1) & 0x3ff) {
  1317. case 598: /* sync */
  1318. op->type = BARRIER + BARRIER_SYNC;
  1319. #ifdef __powerpc64__
  1320. switch ((word >> 21) & 3) {
  1321. case 1: /* lwsync */
  1322. op->type = BARRIER + BARRIER_LWSYNC;
  1323. break;
  1324. case 2: /* ptesync */
  1325. op->type = BARRIER + BARRIER_PTESYNC;
  1326. break;
  1327. }
  1328. #endif
  1329. return 1;
  1330. case 854: /* eieio */
  1331. op->type = BARRIER + BARRIER_EIEIO;
  1332. return 1;
  1333. }
  1334. break;
  1335. }
  1336. rd = (word >> 21) & 0x1f;
  1337. ra = (word >> 16) & 0x1f;
  1338. rb = (word >> 11) & 0x1f;
  1339. rc = (word >> 6) & 0x1f;
  1340. switch (opcode) {
  1341. #ifdef __powerpc64__
  1342. case 1:
  1343. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  1344. goto unknown_opcode;
  1345. prefix_r = GET_PREFIX_R(word);
  1346. ra = GET_PREFIX_RA(suffix);
  1347. rd = (suffix >> 21) & 0x1f;
  1348. op->reg = rd;
  1349. op->val = regs->gpr[rd];
  1350. suffixopcode = get_op(suffix);
  1351. prefixtype = (word >> 24) & 0x3;
  1352. switch (prefixtype) {
  1353. case 2:
  1354. if (prefix_r && ra)
  1355. return 0;
  1356. switch (suffixopcode) {
  1357. case 14: /* paddi */
  1358. op->type = COMPUTE | PREFIXED;
  1359. op->val = mlsd_8lsd_ea(word, suffix, regs);
  1360. goto compute_done;
  1361. }
  1362. }
  1363. break;
  1364. case 2: /* tdi */
  1365. if (rd & trap_compare(regs->gpr[ra], (short) word))
  1366. goto trap;
  1367. return 1;
  1368. #endif
  1369. case 3: /* twi */
  1370. if (rd & trap_compare((int)regs->gpr[ra], (short) word))
  1371. goto trap;
  1372. return 1;
  1373. #ifdef __powerpc64__
  1374. case 4:
  1375. /*
  1376. * There are very many instructions with this primary opcode
  1377. * introduced in the ISA as early as v2.03. However, the ones
  1378. * we currently emulate were all introduced with ISA 3.0
  1379. */
  1380. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1381. goto unknown_opcode;
  1382. switch (word & 0x3f) {
  1383. case 48: /* maddhd */
  1384. asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
  1385. "=r" (op->val) : "r" (regs->gpr[ra]),
  1386. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1387. goto compute_done;
  1388. case 49: /* maddhdu */
  1389. asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
  1390. "=r" (op->val) : "r" (regs->gpr[ra]),
  1391. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1392. goto compute_done;
  1393. case 51: /* maddld */
  1394. asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
  1395. "=r" (op->val) : "r" (regs->gpr[ra]),
  1396. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1397. goto compute_done;
  1398. }
  1399. /*
  1400. * There are other instructions from ISA 3.0 with the same
  1401. * primary opcode which do not have emulation support yet.
  1402. */
  1403. goto unknown_opcode;
  1404. #endif
  1405. case 7: /* mulli */
  1406. op->val = regs->gpr[ra] * (short) word;
  1407. goto compute_done;
  1408. case 8: /* subfic */
  1409. imm = (short) word;
  1410. add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
  1411. return 1;
  1412. case 10: /* cmpli */
  1413. imm = (unsigned short) word;
  1414. val = regs->gpr[ra];
  1415. #ifdef __powerpc64__
  1416. if ((rd & 1) == 0)
  1417. val = (unsigned int) val;
  1418. #endif
  1419. do_cmp_unsigned(regs, op, val, imm, rd >> 2);
  1420. return 1;
  1421. case 11: /* cmpi */
  1422. imm = (short) word;
  1423. val = regs->gpr[ra];
  1424. #ifdef __powerpc64__
  1425. if ((rd & 1) == 0)
  1426. val = (int) val;
  1427. #endif
  1428. do_cmp_signed(regs, op, val, imm, rd >> 2);
  1429. return 1;
  1430. case 12: /* addic */
  1431. imm = (short) word;
  1432. add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
  1433. return 1;
  1434. case 13: /* addic. */
  1435. imm = (short) word;
  1436. add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
  1437. set_cr0(regs, op);
  1438. return 1;
  1439. case 14: /* addi */
  1440. imm = (short) word;
  1441. if (ra)
  1442. imm += regs->gpr[ra];
  1443. op->val = imm;
  1444. goto compute_done;
  1445. case 15: /* addis */
  1446. imm = ((short) word) << 16;
  1447. if (ra)
  1448. imm += regs->gpr[ra];
  1449. op->val = imm;
  1450. goto compute_done;
  1451. case 19:
  1452. if (((word >> 1) & 0x1f) == 2) {
  1453. /* addpcis */
  1454. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1455. goto unknown_opcode;
  1456. imm = (short) (word & 0xffc1); /* d0 + d2 fields */
  1457. imm |= (word >> 15) & 0x3e; /* d1 field */
  1458. op->val = regs->nip + (imm << 16) + 4;
  1459. goto compute_done;
  1460. }
  1461. op->type = UNKNOWN;
  1462. return 0;
  1463. case 20: /* rlwimi */
  1464. mb = (word >> 6) & 0x1f;
  1465. me = (word >> 1) & 0x1f;
  1466. val = DATA32(regs->gpr[rd]);
  1467. imm = MASK32(mb, me);
  1468. op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
  1469. goto logical_done;
  1470. case 21: /* rlwinm */
  1471. mb = (word >> 6) & 0x1f;
  1472. me = (word >> 1) & 0x1f;
  1473. val = DATA32(regs->gpr[rd]);
  1474. op->val = ROTATE(val, rb) & MASK32(mb, me);
  1475. goto logical_done;
  1476. case 23: /* rlwnm */
  1477. mb = (word >> 6) & 0x1f;
  1478. me = (word >> 1) & 0x1f;
  1479. rb = regs->gpr[rb] & 0x1f;
  1480. val = DATA32(regs->gpr[rd]);
  1481. op->val = ROTATE(val, rb) & MASK32(mb, me);
  1482. goto logical_done;
  1483. case 24: /* ori */
  1484. op->val = regs->gpr[rd] | (unsigned short) word;
  1485. goto logical_done_nocc;
  1486. case 25: /* oris */
  1487. imm = (unsigned short) word;
  1488. op->val = regs->gpr[rd] | (imm << 16);
  1489. goto logical_done_nocc;
  1490. case 26: /* xori */
  1491. op->val = regs->gpr[rd] ^ (unsigned short) word;
  1492. goto logical_done_nocc;
  1493. case 27: /* xoris */
  1494. imm = (unsigned short) word;
  1495. op->val = regs->gpr[rd] ^ (imm << 16);
  1496. goto logical_done_nocc;
  1497. case 28: /* andi. */
  1498. op->val = regs->gpr[rd] & (unsigned short) word;
  1499. set_cr0(regs, op);
  1500. goto logical_done_nocc;
  1501. case 29: /* andis. */
  1502. imm = (unsigned short) word;
  1503. op->val = regs->gpr[rd] & (imm << 16);
  1504. set_cr0(regs, op);
  1505. goto logical_done_nocc;
  1506. #ifdef __powerpc64__
  1507. case 30: /* rld* */
  1508. mb = ((word >> 6) & 0x1f) | (word & 0x20);
  1509. val = regs->gpr[rd];
  1510. if ((word & 0x10) == 0) {
  1511. sh = rb | ((word & 2) << 4);
  1512. val = ROTATE(val, sh);
  1513. switch ((word >> 2) & 3) {
  1514. case 0: /* rldicl */
  1515. val &= MASK64_L(mb);
  1516. break;
  1517. case 1: /* rldicr */
  1518. val &= MASK64_R(mb);
  1519. break;
  1520. case 2: /* rldic */
  1521. val &= MASK64(mb, 63 - sh);
  1522. break;
  1523. case 3: /* rldimi */
  1524. imm = MASK64(mb, 63 - sh);
  1525. val = (regs->gpr[ra] & ~imm) |
  1526. (val & imm);
  1527. }
  1528. op->val = val;
  1529. goto logical_done;
  1530. } else {
  1531. sh = regs->gpr[rb] & 0x3f;
  1532. val = ROTATE(val, sh);
  1533. switch ((word >> 1) & 7) {
  1534. case 0: /* rldcl */
  1535. op->val = val & MASK64_L(mb);
  1536. goto logical_done;
  1537. case 1: /* rldcr */
  1538. op->val = val & MASK64_R(mb);
  1539. goto logical_done;
  1540. }
  1541. }
  1542. #endif
  1543. op->type = UNKNOWN; /* illegal instruction */
  1544. return 0;
  1545. case 31:
  1546. /* isel occupies 32 minor opcodes */
  1547. if (((word >> 1) & 0x1f) == 15) {
  1548. mb = (word >> 6) & 0x1f; /* bc field */
  1549. val = (regs->ccr >> (31 - mb)) & 1;
  1550. val2 = (ra) ? regs->gpr[ra] : 0;
  1551. op->val = (val) ? val2 : regs->gpr[rb];
  1552. goto compute_done;
  1553. }
  1554. switch ((word >> 1) & 0x3ff) {
  1555. case 4: /* tw */
  1556. if (rd == 0x1f ||
  1557. (rd & trap_compare((int)regs->gpr[ra],
  1558. (int)regs->gpr[rb])))
  1559. goto trap;
  1560. return 1;
  1561. #ifdef __powerpc64__
  1562. case 68: /* td */
  1563. if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
  1564. goto trap;
  1565. return 1;
  1566. #endif
  1567. case 83: /* mfmsr */
  1568. if (user_mode(regs))
  1569. goto priv;
  1570. op->type = MFMSR;
  1571. op->reg = rd;
  1572. return 0;
  1573. case 146: /* mtmsr */
  1574. if (user_mode(regs))
  1575. goto priv;
  1576. op->type = MTMSR;
  1577. op->reg = rd;
  1578. op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
  1579. return 0;
  1580. #ifdef CONFIG_PPC64
  1581. case 178: /* mtmsrd */
  1582. if (user_mode(regs))
  1583. goto priv;
  1584. op->type = MTMSR;
  1585. op->reg = rd;
  1586. /* only MSR_EE and MSR_RI get changed if bit 15 set */
  1587. /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
  1588. imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
  1589. op->val = imm;
  1590. return 0;
  1591. #endif
  1592. case 19: /* mfcr */
  1593. imm = 0xffffffffUL;
  1594. if ((word >> 20) & 1) {
  1595. imm = 0xf0000000UL;
  1596. for (sh = 0; sh < 8; ++sh) {
  1597. if (word & (0x80000 >> sh))
  1598. break;
  1599. imm >>= 4;
  1600. }
  1601. }
  1602. op->val = regs->ccr & imm;
  1603. goto compute_done;
  1604. case 128: /* setb */
  1605. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1606. goto unknown_opcode;
  1607. /*
  1608. * 'ra' encodes the CR field number (bfa) in the top 3 bits.
  1609. * Since each CR field is 4 bits,
  1610. * we can simply mask off the bottom two bits (bfa * 4)
  1611. * to yield the first bit in the CR field.
  1612. */
  1613. ra = ra & ~0x3;
  1614. /* 'val' stores bits of the CR field (bfa) */
  1615. val = regs->ccr >> (CR0_SHIFT - ra);
  1616. /* checks if the LT bit of CR field (bfa) is set */
  1617. if (val & 8)
  1618. op->val = -1;
  1619. /* checks if the GT bit of CR field (bfa) is set */
  1620. else if (val & 4)
  1621. op->val = 1;
  1622. else
  1623. op->val = 0;
  1624. goto compute_done;
  1625. case 144: /* mtcrf */
  1626. op->type = COMPUTE + SETCC;
  1627. imm = 0xf0000000UL;
  1628. val = regs->gpr[rd];
  1629. op->ccval = regs->ccr;
  1630. for (sh = 0; sh < 8; ++sh) {
  1631. if (word & (0x80000 >> sh))
  1632. op->ccval = (op->ccval & ~imm) |
  1633. (val & imm);
  1634. imm >>= 4;
  1635. }
  1636. return 1;
  1637. case 339: /* mfspr */
  1638. spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
  1639. op->type = MFSPR;
  1640. op->reg = rd;
  1641. op->spr = spr;
  1642. if (spr == SPRN_XER || spr == SPRN_LR ||
  1643. spr == SPRN_CTR)
  1644. return 1;
  1645. return 0;
  1646. case 467: /* mtspr */
  1647. spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
  1648. op->type = MTSPR;
  1649. op->val = regs->gpr[rd];
  1650. op->spr = spr;
  1651. if (spr == SPRN_XER || spr == SPRN_LR ||
  1652. spr == SPRN_CTR)
  1653. return 1;
  1654. return 0;
  1655. /*
  1656. * Compare instructions
  1657. */
  1658. case 0: /* cmp */
  1659. val = regs->gpr[ra];
  1660. val2 = regs->gpr[rb];
  1661. #ifdef __powerpc64__
  1662. if ((rd & 1) == 0) {
  1663. /* word (32-bit) compare */
  1664. val = (int) val;
  1665. val2 = (int) val2;
  1666. }
  1667. #endif
  1668. do_cmp_signed(regs, op, val, val2, rd >> 2);
  1669. return 1;
  1670. case 32: /* cmpl */
  1671. val = regs->gpr[ra];
  1672. val2 = regs->gpr[rb];
  1673. #ifdef __powerpc64__
  1674. if ((rd & 1) == 0) {
  1675. /* word (32-bit) compare */
  1676. val = (unsigned int) val;
  1677. val2 = (unsigned int) val2;
  1678. }
  1679. #endif
  1680. do_cmp_unsigned(regs, op, val, val2, rd >> 2);
  1681. return 1;
  1682. case 508: /* cmpb */
  1683. do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
  1684. goto logical_done_nocc;
  1685. /*
  1686. * Arithmetic instructions
  1687. */
  1688. case 8: /* subfc */
  1689. add_with_carry(regs, op, rd, ~regs->gpr[ra],
  1690. regs->gpr[rb], 1);
  1691. goto arith_done;
  1692. #ifdef __powerpc64__
  1693. case 9: /* mulhdu */
  1694. asm("mulhdu %0,%1,%2" : "=r" (op->val) :
  1695. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1696. goto arith_done;
  1697. #endif
  1698. case 10: /* addc */
  1699. add_with_carry(regs, op, rd, regs->gpr[ra],
  1700. regs->gpr[rb], 0);
  1701. goto arith_done;
  1702. case 11: /* mulhwu */
  1703. asm("mulhwu %0,%1,%2" : "=r" (op->val) :
  1704. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1705. goto arith_done;
  1706. case 40: /* subf */
  1707. op->val = regs->gpr[rb] - regs->gpr[ra];
  1708. goto arith_done;
  1709. #ifdef __powerpc64__
  1710. case 73: /* mulhd */
  1711. asm("mulhd %0,%1,%2" : "=r" (op->val) :
  1712. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1713. goto arith_done;
  1714. #endif
  1715. case 75: /* mulhw */
  1716. asm("mulhw %0,%1,%2" : "=r" (op->val) :
  1717. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1718. goto arith_done;
  1719. case 104: /* neg */
  1720. op->val = -regs->gpr[ra];
  1721. goto arith_done;
  1722. case 136: /* subfe */
  1723. add_with_carry(regs, op, rd, ~regs->gpr[ra],
  1724. regs->gpr[rb], regs->xer & XER_CA);
  1725. goto arith_done;
  1726. case 138: /* adde */
  1727. add_with_carry(regs, op, rd, regs->gpr[ra],
  1728. regs->gpr[rb], regs->xer & XER_CA);
  1729. goto arith_done;
  1730. case 200: /* subfze */
  1731. add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
  1732. regs->xer & XER_CA);
  1733. goto arith_done;
  1734. case 202: /* addze */
  1735. add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
  1736. regs->xer & XER_CA);
  1737. goto arith_done;
  1738. case 232: /* subfme */
  1739. add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
  1740. regs->xer & XER_CA);
  1741. goto arith_done;
  1742. #ifdef __powerpc64__
  1743. case 233: /* mulld */
  1744. op->val = regs->gpr[ra] * regs->gpr[rb];
  1745. goto arith_done;
  1746. #endif
  1747. case 234: /* addme */
  1748. add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
  1749. regs->xer & XER_CA);
  1750. goto arith_done;
  1751. case 235: /* mullw */
  1752. op->val = (long)(int) regs->gpr[ra] *
  1753. (int) regs->gpr[rb];
  1754. goto arith_done;
  1755. #ifdef __powerpc64__
  1756. case 265: /* modud */
  1757. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1758. goto unknown_opcode;
  1759. op->val = regs->gpr[ra] % regs->gpr[rb];
  1760. goto compute_done;
  1761. #endif
  1762. case 266: /* add */
  1763. op->val = regs->gpr[ra] + regs->gpr[rb];
  1764. goto arith_done;
  1765. case 267: /* moduw */
  1766. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1767. goto unknown_opcode;
  1768. op->val = (unsigned int) regs->gpr[ra] %
  1769. (unsigned int) regs->gpr[rb];
  1770. goto compute_done;
  1771. #ifdef __powerpc64__
  1772. case 457: /* divdu */
  1773. op->val = regs->gpr[ra] / regs->gpr[rb];
  1774. goto arith_done;
  1775. #endif
  1776. case 459: /* divwu */
  1777. op->val = (unsigned int) regs->gpr[ra] /
  1778. (unsigned int) regs->gpr[rb];
  1779. goto arith_done;
  1780. #ifdef __powerpc64__
  1781. case 489: /* divd */
  1782. op->val = (long int) regs->gpr[ra] /
  1783. (long int) regs->gpr[rb];
  1784. goto arith_done;
  1785. #endif
  1786. case 491: /* divw */
  1787. op->val = (int) regs->gpr[ra] /
  1788. (int) regs->gpr[rb];
  1789. goto arith_done;
  1790. #ifdef __powerpc64__
  1791. case 425: /* divde[.] */
  1792. asm volatile(PPC_DIVDE(%0, %1, %2) :
  1793. "=r" (op->val) : "r" (regs->gpr[ra]),
  1794. "r" (regs->gpr[rb]));
  1795. goto arith_done;
  1796. case 393: /* divdeu[.] */
  1797. asm volatile(PPC_DIVDEU(%0, %1, %2) :
  1798. "=r" (op->val) : "r" (regs->gpr[ra]),
  1799. "r" (regs->gpr[rb]));
  1800. goto arith_done;
  1801. #endif
  1802. case 755: /* darn */
  1803. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1804. goto unknown_opcode;
  1805. switch (ra & 0x3) {
  1806. case 0:
  1807. /* 32-bit conditioned */
  1808. asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
  1809. goto compute_done;
  1810. case 1:
  1811. /* 64-bit conditioned */
  1812. asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
  1813. goto compute_done;
  1814. case 2:
  1815. /* 64-bit raw */
  1816. asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
  1817. goto compute_done;
  1818. }
  1819. goto unknown_opcode;
  1820. #ifdef __powerpc64__
  1821. case 777: /* modsd */
  1822. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1823. goto unknown_opcode;
  1824. op->val = (long int) regs->gpr[ra] %
  1825. (long int) regs->gpr[rb];
  1826. goto compute_done;
  1827. #endif
  1828. case 779: /* modsw */
  1829. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1830. goto unknown_opcode;
  1831. op->val = (int) regs->gpr[ra] %
  1832. (int) regs->gpr[rb];
  1833. goto compute_done;
  1834. /*
  1835. * Logical instructions
  1836. */
  1837. case 26: /* cntlzw */
  1838. val = (unsigned int) regs->gpr[rd];
  1839. op->val = ( val ? __builtin_clz(val) : 32 );
  1840. goto logical_done;
  1841. #ifdef __powerpc64__
  1842. case 58: /* cntlzd */
  1843. val = regs->gpr[rd];
  1844. op->val = ( val ? __builtin_clzl(val) : 64 );
  1845. goto logical_done;
  1846. #endif
  1847. case 28: /* and */
  1848. op->val = regs->gpr[rd] & regs->gpr[rb];
  1849. goto logical_done;
  1850. case 60: /* andc */
  1851. op->val = regs->gpr[rd] & ~regs->gpr[rb];
  1852. goto logical_done;
  1853. case 122: /* popcntb */
  1854. do_popcnt(regs, op, regs->gpr[rd], 8);
  1855. goto logical_done_nocc;
  1856. case 124: /* nor */
  1857. op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
  1858. goto logical_done;
  1859. case 154: /* prtyw */
  1860. do_prty(regs, op, regs->gpr[rd], 32);
  1861. goto logical_done_nocc;
  1862. case 186: /* prtyd */
  1863. do_prty(regs, op, regs->gpr[rd], 64);
  1864. goto logical_done_nocc;
  1865. #ifdef CONFIG_PPC64
  1866. case 252: /* bpermd */
  1867. do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
  1868. goto logical_done_nocc;
  1869. #endif
  1870. case 284: /* xor */
  1871. op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
  1872. goto logical_done;
  1873. case 316: /* xor */
  1874. op->val = regs->gpr[rd] ^ regs->gpr[rb];
  1875. goto logical_done;
  1876. case 378: /* popcntw */
  1877. do_popcnt(regs, op, regs->gpr[rd], 32);
  1878. goto logical_done_nocc;
  1879. case 412: /* orc */
  1880. op->val = regs->gpr[rd] | ~regs->gpr[rb];
  1881. goto logical_done;
  1882. case 444: /* or */
  1883. op->val = regs->gpr[rd] | regs->gpr[rb];
  1884. goto logical_done;
  1885. case 476: /* nand */
  1886. op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
  1887. goto logical_done;
  1888. #ifdef CONFIG_PPC64
  1889. case 506: /* popcntd */
  1890. do_popcnt(regs, op, regs->gpr[rd], 64);
  1891. goto logical_done_nocc;
  1892. #endif
  1893. case 538: /* cnttzw */
  1894. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1895. goto unknown_opcode;
  1896. val = (unsigned int) regs->gpr[rd];
  1897. op->val = (val ? __builtin_ctz(val) : 32);
  1898. goto logical_done;
  1899. #ifdef __powerpc64__
  1900. case 570: /* cnttzd */
  1901. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1902. goto unknown_opcode;
  1903. val = regs->gpr[rd];
  1904. op->val = (val ? __builtin_ctzl(val) : 64);
  1905. goto logical_done;
  1906. #endif
  1907. case 922: /* extsh */
  1908. op->val = (signed short) regs->gpr[rd];
  1909. goto logical_done;
  1910. case 954: /* extsb */
  1911. op->val = (signed char) regs->gpr[rd];
  1912. goto logical_done;
  1913. #ifdef __powerpc64__
  1914. case 986: /* extsw */
  1915. op->val = (signed int) regs->gpr[rd];
  1916. goto logical_done;
  1917. #endif
  1918. /*
  1919. * Shift instructions
  1920. */
  1921. case 24: /* slw */
  1922. sh = regs->gpr[rb] & 0x3f;
  1923. if (sh < 32)
  1924. op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
  1925. else
  1926. op->val = 0;
  1927. goto logical_done;
  1928. case 536: /* srw */
  1929. sh = regs->gpr[rb] & 0x3f;
  1930. if (sh < 32)
  1931. op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
  1932. else
  1933. op->val = 0;
  1934. goto logical_done;
  1935. case 792: /* sraw */
  1936. op->type = COMPUTE + SETREG + SETXER;
  1937. sh = regs->gpr[rb] & 0x3f;
  1938. ival = (signed int) regs->gpr[rd];
  1939. op->val = ival >> (sh < 32 ? sh : 31);
  1940. op->xerval = regs->xer;
  1941. if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
  1942. op->xerval |= XER_CA;
  1943. else
  1944. op->xerval &= ~XER_CA;
  1945. set_ca32(op, op->xerval & XER_CA);
  1946. goto logical_done;
  1947. case 824: /* srawi */
  1948. op->type = COMPUTE + SETREG + SETXER;
  1949. sh = rb;
  1950. ival = (signed int) regs->gpr[rd];
  1951. op->val = ival >> sh;
  1952. op->xerval = regs->xer;
  1953. if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
  1954. op->xerval |= XER_CA;
  1955. else
  1956. op->xerval &= ~XER_CA;
  1957. set_ca32(op, op->xerval & XER_CA);
  1958. goto logical_done;
  1959. #ifdef __powerpc64__
  1960. case 27: /* sld */
  1961. sh = regs->gpr[rb] & 0x7f;
  1962. if (sh < 64)
  1963. op->val = regs->gpr[rd] << sh;
  1964. else
  1965. op->val = 0;
  1966. goto logical_done;
  1967. case 539: /* srd */
  1968. sh = regs->gpr[rb] & 0x7f;
  1969. if (sh < 64)
  1970. op->val = regs->gpr[rd] >> sh;
  1971. else
  1972. op->val = 0;
  1973. goto logical_done;
  1974. case 794: /* srad */
  1975. op->type = COMPUTE + SETREG + SETXER;
  1976. sh = regs->gpr[rb] & 0x7f;
  1977. ival = (signed long int) regs->gpr[rd];
  1978. op->val = ival >> (sh < 64 ? sh : 63);
  1979. op->xerval = regs->xer;
  1980. if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
  1981. op->xerval |= XER_CA;
  1982. else
  1983. op->xerval &= ~XER_CA;
  1984. set_ca32(op, op->xerval & XER_CA);
  1985. goto logical_done;
  1986. case 826: /* sradi with sh_5 = 0 */
  1987. case 827: /* sradi with sh_5 = 1 */
  1988. op->type = COMPUTE + SETREG + SETXER;
  1989. sh = rb | ((word & 2) << 4);
  1990. ival = (signed long int) regs->gpr[rd];
  1991. op->val = ival >> sh;
  1992. op->xerval = regs->xer;
  1993. if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
  1994. op->xerval |= XER_CA;
  1995. else
  1996. op->xerval &= ~XER_CA;
  1997. set_ca32(op, op->xerval & XER_CA);
  1998. goto logical_done;
  1999. case 890: /* extswsli with sh_5 = 0 */
  2000. case 891: /* extswsli with sh_5 = 1 */
  2001. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2002. goto unknown_opcode;
  2003. op->type = COMPUTE + SETREG;
  2004. sh = rb | ((word & 2) << 4);
  2005. val = (signed int) regs->gpr[rd];
  2006. if (sh)
  2007. op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
  2008. else
  2009. op->val = val;
  2010. goto logical_done;
  2011. #endif /* __powerpc64__ */
  2012. /*
  2013. * Cache instructions
  2014. */
  2015. case 54: /* dcbst */
  2016. op->type = MKOP(CACHEOP, DCBST, 0);
  2017. op->ea = xform_ea(word, regs);
  2018. return 0;
  2019. case 86: /* dcbf */
  2020. op->type = MKOP(CACHEOP, DCBF, 0);
  2021. op->ea = xform_ea(word, regs);
  2022. return 0;
  2023. case 246: /* dcbtst */
  2024. op->type = MKOP(CACHEOP, DCBTST, 0);
  2025. op->ea = xform_ea(word, regs);
  2026. op->reg = rd;
  2027. return 0;
  2028. case 278: /* dcbt */
  2029. op->type = MKOP(CACHEOP, DCBTST, 0);
  2030. op->ea = xform_ea(word, regs);
  2031. op->reg = rd;
  2032. return 0;
  2033. case 982: /* icbi */
  2034. op->type = MKOP(CACHEOP, ICBI, 0);
  2035. op->ea = xform_ea(word, regs);
  2036. return 0;
  2037. case 1014: /* dcbz */
  2038. op->type = MKOP(CACHEOP, DCBZ, 0);
  2039. op->ea = xform_ea(word, regs);
  2040. return 0;
  2041. }
  2042. break;
  2043. }
  2044. /*
  2045. * Loads and stores.
  2046. */
  2047. op->type = UNKNOWN;
  2048. op->update_reg = ra;
  2049. op->reg = rd;
  2050. op->val = regs->gpr[rd];
  2051. u = (word >> 20) & UPDATE;
  2052. op->vsx_flags = 0;
  2053. switch (opcode) {
  2054. case 31:
  2055. u = word & UPDATE;
  2056. op->ea = xform_ea(word, regs);
  2057. switch ((word >> 1) & 0x3ff) {
  2058. case 20: /* lwarx */
  2059. op->type = MKOP(LARX, 0, 4);
  2060. break;
  2061. case 150: /* stwcx. */
  2062. op->type = MKOP(STCX, 0, 4);
  2063. break;
  2064. #ifdef CONFIG_PPC_HAS_LBARX_LHARX
  2065. case 52: /* lbarx */
  2066. op->type = MKOP(LARX, 0, 1);
  2067. break;
  2068. case 694: /* stbcx. */
  2069. op->type = MKOP(STCX, 0, 1);
  2070. break;
  2071. case 116: /* lharx */
  2072. op->type = MKOP(LARX, 0, 2);
  2073. break;
  2074. case 726: /* sthcx. */
  2075. op->type = MKOP(STCX, 0, 2);
  2076. break;
  2077. #endif
  2078. #ifdef __powerpc64__
  2079. case 84: /* ldarx */
  2080. op->type = MKOP(LARX, 0, 8);
  2081. break;
  2082. case 214: /* stdcx. */
  2083. op->type = MKOP(STCX, 0, 8);
  2084. break;
  2085. case 276: /* lqarx */
  2086. if (!((rd & 1) || rd == ra || rd == rb))
  2087. op->type = MKOP(LARX, 0, 16);
  2088. break;
  2089. case 182: /* stqcx. */
  2090. if (!(rd & 1))
  2091. op->type = MKOP(STCX, 0, 16);
  2092. break;
  2093. #endif
  2094. case 23: /* lwzx */
  2095. case 55: /* lwzux */
  2096. op->type = MKOP(LOAD, u, 4);
  2097. break;
  2098. case 87: /* lbzx */
  2099. case 119: /* lbzux */
  2100. op->type = MKOP(LOAD, u, 1);
  2101. break;
  2102. #ifdef CONFIG_ALTIVEC
  2103. /*
  2104. * Note: for the load/store vector element instructions,
  2105. * bits of the EA say which field of the VMX register to use.
  2106. */
  2107. case 7: /* lvebx */
  2108. op->type = MKOP(LOAD_VMX, 0, 1);
  2109. op->element_size = 1;
  2110. break;
  2111. case 39: /* lvehx */
  2112. op->type = MKOP(LOAD_VMX, 0, 2);
  2113. op->element_size = 2;
  2114. break;
  2115. case 71: /* lvewx */
  2116. op->type = MKOP(LOAD_VMX, 0, 4);
  2117. op->element_size = 4;
  2118. break;
  2119. case 103: /* lvx */
  2120. case 359: /* lvxl */
  2121. op->type = MKOP(LOAD_VMX, 0, 16);
  2122. op->element_size = 16;
  2123. break;
  2124. case 135: /* stvebx */
  2125. op->type = MKOP(STORE_VMX, 0, 1);
  2126. op->element_size = 1;
  2127. break;
  2128. case 167: /* stvehx */
  2129. op->type = MKOP(STORE_VMX, 0, 2);
  2130. op->element_size = 2;
  2131. break;
  2132. case 199: /* stvewx */
  2133. op->type = MKOP(STORE_VMX, 0, 4);
  2134. op->element_size = 4;
  2135. break;
  2136. case 231: /* stvx */
  2137. case 487: /* stvxl */
  2138. op->type = MKOP(STORE_VMX, 0, 16);
  2139. break;
  2140. #endif /* CONFIG_ALTIVEC */
  2141. #ifdef __powerpc64__
  2142. case 21: /* ldx */
  2143. case 53: /* ldux */
  2144. op->type = MKOP(LOAD, u, 8);
  2145. break;
  2146. case 149: /* stdx */
  2147. case 181: /* stdux */
  2148. op->type = MKOP(STORE, u, 8);
  2149. break;
  2150. #endif
  2151. case 151: /* stwx */
  2152. case 183: /* stwux */
  2153. op->type = MKOP(STORE, u, 4);
  2154. break;
  2155. case 215: /* stbx */
  2156. case 247: /* stbux */
  2157. op->type = MKOP(STORE, u, 1);
  2158. break;
  2159. case 279: /* lhzx */
  2160. case 311: /* lhzux */
  2161. op->type = MKOP(LOAD, u, 2);
  2162. break;
  2163. #ifdef __powerpc64__
  2164. case 341: /* lwax */
  2165. case 373: /* lwaux */
  2166. op->type = MKOP(LOAD, SIGNEXT | u, 4);
  2167. break;
  2168. #endif
  2169. case 343: /* lhax */
  2170. case 375: /* lhaux */
  2171. op->type = MKOP(LOAD, SIGNEXT | u, 2);
  2172. break;
  2173. case 407: /* sthx */
  2174. case 439: /* sthux */
  2175. op->type = MKOP(STORE, u, 2);
  2176. break;
  2177. #ifdef __powerpc64__
  2178. case 532: /* ldbrx */
  2179. op->type = MKOP(LOAD, BYTEREV, 8);
  2180. break;
  2181. #endif
  2182. case 533: /* lswx */
  2183. op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
  2184. break;
  2185. case 534: /* lwbrx */
  2186. op->type = MKOP(LOAD, BYTEREV, 4);
  2187. break;
  2188. case 597: /* lswi */
  2189. if (rb == 0)
  2190. rb = 32; /* # bytes to load */
  2191. op->type = MKOP(LOAD_MULTI, 0, rb);
  2192. op->ea = ra ? regs->gpr[ra] : 0;
  2193. break;
  2194. #ifdef CONFIG_PPC_FPU
  2195. case 535: /* lfsx */
  2196. case 567: /* lfsux */
  2197. op->type = MKOP(LOAD_FP, u | FPCONV, 4);
  2198. break;
  2199. case 599: /* lfdx */
  2200. case 631: /* lfdux */
  2201. op->type = MKOP(LOAD_FP, u, 8);
  2202. break;
  2203. case 663: /* stfsx */
  2204. case 695: /* stfsux */
  2205. op->type = MKOP(STORE_FP, u | FPCONV, 4);
  2206. break;
  2207. case 727: /* stfdx */
  2208. case 759: /* stfdux */
  2209. op->type = MKOP(STORE_FP, u, 8);
  2210. break;
  2211. #ifdef __powerpc64__
  2212. case 791: /* lfdpx */
  2213. op->type = MKOP(LOAD_FP, 0, 16);
  2214. break;
  2215. case 855: /* lfiwax */
  2216. op->type = MKOP(LOAD_FP, SIGNEXT, 4);
  2217. break;
  2218. case 887: /* lfiwzx */
  2219. op->type = MKOP(LOAD_FP, 0, 4);
  2220. break;
  2221. case 919: /* stfdpx */
  2222. op->type = MKOP(STORE_FP, 0, 16);
  2223. break;
  2224. case 983: /* stfiwx */
  2225. op->type = MKOP(STORE_FP, 0, 4);
  2226. break;
  2227. #endif /* __powerpc64 */
  2228. #endif /* CONFIG_PPC_FPU */
  2229. #ifdef __powerpc64__
  2230. case 660: /* stdbrx */
  2231. op->type = MKOP(STORE, BYTEREV, 8);
  2232. op->val = byterev_8(regs->gpr[rd]);
  2233. break;
  2234. #endif
  2235. case 661: /* stswx */
  2236. op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
  2237. break;
  2238. case 662: /* stwbrx */
  2239. op->type = MKOP(STORE, BYTEREV, 4);
  2240. op->val = byterev_4(regs->gpr[rd]);
  2241. break;
  2242. case 725: /* stswi */
  2243. if (rb == 0)
  2244. rb = 32; /* # bytes to store */
  2245. op->type = MKOP(STORE_MULTI, 0, rb);
  2246. op->ea = ra ? regs->gpr[ra] : 0;
  2247. break;
  2248. case 790: /* lhbrx */
  2249. op->type = MKOP(LOAD, BYTEREV, 2);
  2250. break;
  2251. case 918: /* sthbrx */
  2252. op->type = MKOP(STORE, BYTEREV, 2);
  2253. op->val = byterev_2(regs->gpr[rd]);
  2254. break;
  2255. #ifdef CONFIG_VSX
  2256. case 12: /* lxsiwzx */
  2257. op->reg = rd | ((word & 1) << 5);
  2258. op->type = MKOP(LOAD_VSX, 0, 4);
  2259. op->element_size = 8;
  2260. break;
  2261. case 76: /* lxsiwax */
  2262. op->reg = rd | ((word & 1) << 5);
  2263. op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
  2264. op->element_size = 8;
  2265. break;
  2266. case 140: /* stxsiwx */
  2267. op->reg = rd | ((word & 1) << 5);
  2268. op->type = MKOP(STORE_VSX, 0, 4);
  2269. op->element_size = 8;
  2270. break;
  2271. case 268: /* lxvx */
  2272. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2273. goto unknown_opcode;
  2274. op->reg = rd | ((word & 1) << 5);
  2275. op->type = MKOP(LOAD_VSX, 0, 16);
  2276. op->element_size = 16;
  2277. op->vsx_flags = VSX_CHECK_VEC;
  2278. break;
  2279. case 269: /* lxvl */
  2280. case 301: { /* lxvll */
  2281. int nb;
  2282. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2283. goto unknown_opcode;
  2284. op->reg = rd | ((word & 1) << 5);
  2285. op->ea = ra ? regs->gpr[ra] : 0;
  2286. nb = regs->gpr[rb] & 0xff;
  2287. if (nb > 16)
  2288. nb = 16;
  2289. op->type = MKOP(LOAD_VSX, 0, nb);
  2290. op->element_size = 16;
  2291. op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
  2292. VSX_CHECK_VEC;
  2293. break;
  2294. }
  2295. case 332: /* lxvdsx */
  2296. op->reg = rd | ((word & 1) << 5);
  2297. op->type = MKOP(LOAD_VSX, 0, 8);
  2298. op->element_size = 8;
  2299. op->vsx_flags = VSX_SPLAT;
  2300. break;
  2301. case 333: /* lxvpx */
  2302. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2303. goto unknown_opcode;
  2304. op->reg = VSX_REGISTER_XTP(rd);
  2305. op->type = MKOP(LOAD_VSX, 0, 32);
  2306. op->element_size = 32;
  2307. break;
  2308. case 364: /* lxvwsx */
  2309. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2310. goto unknown_opcode;
  2311. op->reg = rd | ((word & 1) << 5);
  2312. op->type = MKOP(LOAD_VSX, 0, 4);
  2313. op->element_size = 4;
  2314. op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
  2315. break;
  2316. case 396: /* stxvx */
  2317. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2318. goto unknown_opcode;
  2319. op->reg = rd | ((word & 1) << 5);
  2320. op->type = MKOP(STORE_VSX, 0, 16);
  2321. op->element_size = 16;
  2322. op->vsx_flags = VSX_CHECK_VEC;
  2323. break;
  2324. case 397: /* stxvl */
  2325. case 429: { /* stxvll */
  2326. int nb;
  2327. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2328. goto unknown_opcode;
  2329. op->reg = rd | ((word & 1) << 5);
  2330. op->ea = ra ? regs->gpr[ra] : 0;
  2331. nb = regs->gpr[rb] & 0xff;
  2332. if (nb > 16)
  2333. nb = 16;
  2334. op->type = MKOP(STORE_VSX, 0, nb);
  2335. op->element_size = 16;
  2336. op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
  2337. VSX_CHECK_VEC;
  2338. break;
  2339. }
  2340. case 461: /* stxvpx */
  2341. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2342. goto unknown_opcode;
  2343. op->reg = VSX_REGISTER_XTP(rd);
  2344. op->type = MKOP(STORE_VSX, 0, 32);
  2345. op->element_size = 32;
  2346. break;
  2347. case 524: /* lxsspx */
  2348. op->reg = rd | ((word & 1) << 5);
  2349. op->type = MKOP(LOAD_VSX, 0, 4);
  2350. op->element_size = 8;
  2351. op->vsx_flags = VSX_FPCONV;
  2352. break;
  2353. case 588: /* lxsdx */
  2354. op->reg = rd | ((word & 1) << 5);
  2355. op->type = MKOP(LOAD_VSX, 0, 8);
  2356. op->element_size = 8;
  2357. break;
  2358. case 652: /* stxsspx */
  2359. op->reg = rd | ((word & 1) << 5);
  2360. op->type = MKOP(STORE_VSX, 0, 4);
  2361. op->element_size = 8;
  2362. op->vsx_flags = VSX_FPCONV;
  2363. break;
  2364. case 716: /* stxsdx */
  2365. op->reg = rd | ((word & 1) << 5);
  2366. op->type = MKOP(STORE_VSX, 0, 8);
  2367. op->element_size = 8;
  2368. break;
  2369. case 780: /* lxvw4x */
  2370. op->reg = rd | ((word & 1) << 5);
  2371. op->type = MKOP(LOAD_VSX, 0, 16);
  2372. op->element_size = 4;
  2373. break;
  2374. case 781: /* lxsibzx */
  2375. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2376. goto unknown_opcode;
  2377. op->reg = rd | ((word & 1) << 5);
  2378. op->type = MKOP(LOAD_VSX, 0, 1);
  2379. op->element_size = 8;
  2380. op->vsx_flags = VSX_CHECK_VEC;
  2381. break;
  2382. case 812: /* lxvh8x */
  2383. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2384. goto unknown_opcode;
  2385. op->reg = rd | ((word & 1) << 5);
  2386. op->type = MKOP(LOAD_VSX, 0, 16);
  2387. op->element_size = 2;
  2388. op->vsx_flags = VSX_CHECK_VEC;
  2389. break;
  2390. case 813: /* lxsihzx */
  2391. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2392. goto unknown_opcode;
  2393. op->reg = rd | ((word & 1) << 5);
  2394. op->type = MKOP(LOAD_VSX, 0, 2);
  2395. op->element_size = 8;
  2396. op->vsx_flags = VSX_CHECK_VEC;
  2397. break;
  2398. case 844: /* lxvd2x */
  2399. op->reg = rd | ((word & 1) << 5);
  2400. op->type = MKOP(LOAD_VSX, 0, 16);
  2401. op->element_size = 8;
  2402. break;
  2403. case 876: /* lxvb16x */
  2404. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2405. goto unknown_opcode;
  2406. op->reg = rd | ((word & 1) << 5);
  2407. op->type = MKOP(LOAD_VSX, 0, 16);
  2408. op->element_size = 1;
  2409. op->vsx_flags = VSX_CHECK_VEC;
  2410. break;
  2411. case 908: /* stxvw4x */
  2412. op->reg = rd | ((word & 1) << 5);
  2413. op->type = MKOP(STORE_VSX, 0, 16);
  2414. op->element_size = 4;
  2415. break;
  2416. case 909: /* stxsibx */
  2417. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2418. goto unknown_opcode;
  2419. op->reg = rd | ((word & 1) << 5);
  2420. op->type = MKOP(STORE_VSX, 0, 1);
  2421. op->element_size = 8;
  2422. op->vsx_flags = VSX_CHECK_VEC;
  2423. break;
  2424. case 940: /* stxvh8x */
  2425. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2426. goto unknown_opcode;
  2427. op->reg = rd | ((word & 1) << 5);
  2428. op->type = MKOP(STORE_VSX, 0, 16);
  2429. op->element_size = 2;
  2430. op->vsx_flags = VSX_CHECK_VEC;
  2431. break;
  2432. case 941: /* stxsihx */
  2433. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2434. goto unknown_opcode;
  2435. op->reg = rd | ((word & 1) << 5);
  2436. op->type = MKOP(STORE_VSX, 0, 2);
  2437. op->element_size = 8;
  2438. op->vsx_flags = VSX_CHECK_VEC;
  2439. break;
  2440. case 972: /* stxvd2x */
  2441. op->reg = rd | ((word & 1) << 5);
  2442. op->type = MKOP(STORE_VSX, 0, 16);
  2443. op->element_size = 8;
  2444. break;
  2445. case 1004: /* stxvb16x */
  2446. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2447. goto unknown_opcode;
  2448. op->reg = rd | ((word & 1) << 5);
  2449. op->type = MKOP(STORE_VSX, 0, 16);
  2450. op->element_size = 1;
  2451. op->vsx_flags = VSX_CHECK_VEC;
  2452. break;
  2453. #endif /* CONFIG_VSX */
  2454. }
  2455. break;
  2456. case 32: /* lwz */
  2457. case 33: /* lwzu */
  2458. op->type = MKOP(LOAD, u, 4);
  2459. op->ea = dform_ea(word, regs);
  2460. break;
  2461. case 34: /* lbz */
  2462. case 35: /* lbzu */
  2463. op->type = MKOP(LOAD, u, 1);
  2464. op->ea = dform_ea(word, regs);
  2465. break;
  2466. case 36: /* stw */
  2467. case 37: /* stwu */
  2468. op->type = MKOP(STORE, u, 4);
  2469. op->ea = dform_ea(word, regs);
  2470. break;
  2471. case 38: /* stb */
  2472. case 39: /* stbu */
  2473. op->type = MKOP(STORE, u, 1);
  2474. op->ea = dform_ea(word, regs);
  2475. break;
  2476. case 40: /* lhz */
  2477. case 41: /* lhzu */
  2478. op->type = MKOP(LOAD, u, 2);
  2479. op->ea = dform_ea(word, regs);
  2480. break;
  2481. case 42: /* lha */
  2482. case 43: /* lhau */
  2483. op->type = MKOP(LOAD, SIGNEXT | u, 2);
  2484. op->ea = dform_ea(word, regs);
  2485. break;
  2486. case 44: /* sth */
  2487. case 45: /* sthu */
  2488. op->type = MKOP(STORE, u, 2);
  2489. op->ea = dform_ea(word, regs);
  2490. break;
  2491. case 46: /* lmw */
  2492. if (ra >= rd)
  2493. break; /* invalid form, ra in range to load */
  2494. op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
  2495. op->ea = dform_ea(word, regs);
  2496. break;
  2497. case 47: /* stmw */
  2498. op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
  2499. op->ea = dform_ea(word, regs);
  2500. break;
  2501. #ifdef CONFIG_PPC_FPU
  2502. case 48: /* lfs */
  2503. case 49: /* lfsu */
  2504. op->type = MKOP(LOAD_FP, u | FPCONV, 4);
  2505. op->ea = dform_ea(word, regs);
  2506. break;
  2507. case 50: /* lfd */
  2508. case 51: /* lfdu */
  2509. op->type = MKOP(LOAD_FP, u, 8);
  2510. op->ea = dform_ea(word, regs);
  2511. break;
  2512. case 52: /* stfs */
  2513. case 53: /* stfsu */
  2514. op->type = MKOP(STORE_FP, u | FPCONV, 4);
  2515. op->ea = dform_ea(word, regs);
  2516. break;
  2517. case 54: /* stfd */
  2518. case 55: /* stfdu */
  2519. op->type = MKOP(STORE_FP, u, 8);
  2520. op->ea = dform_ea(word, regs);
  2521. break;
  2522. #endif
  2523. #ifdef __powerpc64__
  2524. case 56: /* lq */
  2525. if (!((rd & 1) || (rd == ra)))
  2526. op->type = MKOP(LOAD, 0, 16);
  2527. op->ea = dqform_ea(word, regs);
  2528. break;
  2529. #endif
  2530. #ifdef CONFIG_VSX
  2531. case 57: /* lfdp, lxsd, lxssp */
  2532. op->ea = dsform_ea(word, regs);
  2533. switch (word & 3) {
  2534. case 0: /* lfdp */
  2535. if (rd & 1)
  2536. break; /* reg must be even */
  2537. op->type = MKOP(LOAD_FP, 0, 16);
  2538. break;
  2539. case 2: /* lxsd */
  2540. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2541. goto unknown_opcode;
  2542. op->reg = rd + 32;
  2543. op->type = MKOP(LOAD_VSX, 0, 8);
  2544. op->element_size = 8;
  2545. op->vsx_flags = VSX_CHECK_VEC;
  2546. break;
  2547. case 3: /* lxssp */
  2548. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2549. goto unknown_opcode;
  2550. op->reg = rd + 32;
  2551. op->type = MKOP(LOAD_VSX, 0, 4);
  2552. op->element_size = 8;
  2553. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2554. break;
  2555. }
  2556. break;
  2557. #endif /* CONFIG_VSX */
  2558. #ifdef __powerpc64__
  2559. case 58: /* ld[u], lwa */
  2560. op->ea = dsform_ea(word, regs);
  2561. switch (word & 3) {
  2562. case 0: /* ld */
  2563. op->type = MKOP(LOAD, 0, 8);
  2564. break;
  2565. case 1: /* ldu */
  2566. op->type = MKOP(LOAD, UPDATE, 8);
  2567. break;
  2568. case 2: /* lwa */
  2569. op->type = MKOP(LOAD, SIGNEXT, 4);
  2570. break;
  2571. }
  2572. break;
  2573. #endif
  2574. #ifdef CONFIG_VSX
  2575. case 6:
  2576. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2577. goto unknown_opcode;
  2578. op->ea = dqform_ea(word, regs);
  2579. op->reg = VSX_REGISTER_XTP(rd);
  2580. op->element_size = 32;
  2581. switch (word & 0xf) {
  2582. case 0: /* lxvp */
  2583. op->type = MKOP(LOAD_VSX, 0, 32);
  2584. break;
  2585. case 1: /* stxvp */
  2586. op->type = MKOP(STORE_VSX, 0, 32);
  2587. break;
  2588. }
  2589. break;
  2590. case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
  2591. switch (word & 7) {
  2592. case 0: /* stfdp with LSB of DS field = 0 */
  2593. case 4: /* stfdp with LSB of DS field = 1 */
  2594. op->ea = dsform_ea(word, regs);
  2595. op->type = MKOP(STORE_FP, 0, 16);
  2596. break;
  2597. case 1: /* lxv */
  2598. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2599. goto unknown_opcode;
  2600. op->ea = dqform_ea(word, regs);
  2601. if (word & 8)
  2602. op->reg = rd + 32;
  2603. op->type = MKOP(LOAD_VSX, 0, 16);
  2604. op->element_size = 16;
  2605. op->vsx_flags = VSX_CHECK_VEC;
  2606. break;
  2607. case 2: /* stxsd with LSB of DS field = 0 */
  2608. case 6: /* stxsd with LSB of DS field = 1 */
  2609. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2610. goto unknown_opcode;
  2611. op->ea = dsform_ea(word, regs);
  2612. op->reg = rd + 32;
  2613. op->type = MKOP(STORE_VSX, 0, 8);
  2614. op->element_size = 8;
  2615. op->vsx_flags = VSX_CHECK_VEC;
  2616. break;
  2617. case 3: /* stxssp with LSB of DS field = 0 */
  2618. case 7: /* stxssp with LSB of DS field = 1 */
  2619. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2620. goto unknown_opcode;
  2621. op->ea = dsform_ea(word, regs);
  2622. op->reg = rd + 32;
  2623. op->type = MKOP(STORE_VSX, 0, 4);
  2624. op->element_size = 8;
  2625. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2626. break;
  2627. case 5: /* stxv */
  2628. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2629. goto unknown_opcode;
  2630. op->ea = dqform_ea(word, regs);
  2631. if (word & 8)
  2632. op->reg = rd + 32;
  2633. op->type = MKOP(STORE_VSX, 0, 16);
  2634. op->element_size = 16;
  2635. op->vsx_flags = VSX_CHECK_VEC;
  2636. break;
  2637. }
  2638. break;
  2639. #endif /* CONFIG_VSX */
  2640. #ifdef __powerpc64__
  2641. case 62: /* std[u] */
  2642. op->ea = dsform_ea(word, regs);
  2643. switch (word & 3) {
  2644. case 0: /* std */
  2645. op->type = MKOP(STORE, 0, 8);
  2646. break;
  2647. case 1: /* stdu */
  2648. op->type = MKOP(STORE, UPDATE, 8);
  2649. break;
  2650. case 2: /* stq */
  2651. if (!(rd & 1))
  2652. op->type = MKOP(STORE, 0, 16);
  2653. break;
  2654. }
  2655. break;
  2656. case 1: /* Prefixed instructions */
  2657. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2658. goto unknown_opcode;
  2659. prefix_r = GET_PREFIX_R(word);
  2660. ra = GET_PREFIX_RA(suffix);
  2661. op->update_reg = ra;
  2662. rd = (suffix >> 21) & 0x1f;
  2663. op->reg = rd;
  2664. op->val = regs->gpr[rd];
  2665. suffixopcode = get_op(suffix);
  2666. prefixtype = (word >> 24) & 0x3;
  2667. switch (prefixtype) {
  2668. case 0: /* Type 00 Eight-Byte Load/Store */
  2669. if (prefix_r && ra)
  2670. break;
  2671. op->ea = mlsd_8lsd_ea(word, suffix, regs);
  2672. switch (suffixopcode) {
  2673. case 41: /* plwa */
  2674. op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
  2675. break;
  2676. #ifdef CONFIG_VSX
  2677. case 42: /* plxsd */
  2678. op->reg = rd + 32;
  2679. op->type = MKOP(LOAD_VSX, PREFIXED, 8);
  2680. op->element_size = 8;
  2681. op->vsx_flags = VSX_CHECK_VEC;
  2682. break;
  2683. case 43: /* plxssp */
  2684. op->reg = rd + 32;
  2685. op->type = MKOP(LOAD_VSX, PREFIXED, 4);
  2686. op->element_size = 8;
  2687. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2688. break;
  2689. case 46: /* pstxsd */
  2690. op->reg = rd + 32;
  2691. op->type = MKOP(STORE_VSX, PREFIXED, 8);
  2692. op->element_size = 8;
  2693. op->vsx_flags = VSX_CHECK_VEC;
  2694. break;
  2695. case 47: /* pstxssp */
  2696. op->reg = rd + 32;
  2697. op->type = MKOP(STORE_VSX, PREFIXED, 4);
  2698. op->element_size = 8;
  2699. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2700. break;
  2701. case 51: /* plxv1 */
  2702. op->reg += 32;
  2703. fallthrough;
  2704. case 50: /* plxv0 */
  2705. op->type = MKOP(LOAD_VSX, PREFIXED, 16);
  2706. op->element_size = 16;
  2707. op->vsx_flags = VSX_CHECK_VEC;
  2708. break;
  2709. case 55: /* pstxv1 */
  2710. op->reg = rd + 32;
  2711. fallthrough;
  2712. case 54: /* pstxv0 */
  2713. op->type = MKOP(STORE_VSX, PREFIXED, 16);
  2714. op->element_size = 16;
  2715. op->vsx_flags = VSX_CHECK_VEC;
  2716. break;
  2717. #endif /* CONFIG_VSX */
  2718. case 56: /* plq */
  2719. op->type = MKOP(LOAD, PREFIXED, 16);
  2720. break;
  2721. case 57: /* pld */
  2722. op->type = MKOP(LOAD, PREFIXED, 8);
  2723. break;
  2724. #ifdef CONFIG_VSX
  2725. case 58: /* plxvp */
  2726. op->reg = VSX_REGISTER_XTP(rd);
  2727. op->type = MKOP(LOAD_VSX, PREFIXED, 32);
  2728. op->element_size = 32;
  2729. break;
  2730. #endif /* CONFIG_VSX */
  2731. case 60: /* pstq */
  2732. op->type = MKOP(STORE, PREFIXED, 16);
  2733. break;
  2734. case 61: /* pstd */
  2735. op->type = MKOP(STORE, PREFIXED, 8);
  2736. break;
  2737. #ifdef CONFIG_VSX
  2738. case 62: /* pstxvp */
  2739. op->reg = VSX_REGISTER_XTP(rd);
  2740. op->type = MKOP(STORE_VSX, PREFIXED, 32);
  2741. op->element_size = 32;
  2742. break;
  2743. #endif /* CONFIG_VSX */
  2744. }
  2745. break;
  2746. case 1: /* Type 01 Eight-Byte Register-to-Register */
  2747. break;
  2748. case 2: /* Type 10 Modified Load/Store */
  2749. if (prefix_r && ra)
  2750. break;
  2751. op->ea = mlsd_8lsd_ea(word, suffix, regs);
  2752. switch (suffixopcode) {
  2753. case 32: /* plwz */
  2754. op->type = MKOP(LOAD, PREFIXED, 4);
  2755. break;
  2756. case 34: /* plbz */
  2757. op->type = MKOP(LOAD, PREFIXED, 1);
  2758. break;
  2759. case 36: /* pstw */
  2760. op->type = MKOP(STORE, PREFIXED, 4);
  2761. break;
  2762. case 38: /* pstb */
  2763. op->type = MKOP(STORE, PREFIXED, 1);
  2764. break;
  2765. case 40: /* plhz */
  2766. op->type = MKOP(LOAD, PREFIXED, 2);
  2767. break;
  2768. case 42: /* plha */
  2769. op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
  2770. break;
  2771. case 44: /* psth */
  2772. op->type = MKOP(STORE, PREFIXED, 2);
  2773. break;
  2774. case 48: /* plfs */
  2775. op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
  2776. break;
  2777. case 50: /* plfd */
  2778. op->type = MKOP(LOAD_FP, PREFIXED, 8);
  2779. break;
  2780. case 52: /* pstfs */
  2781. op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
  2782. break;
  2783. case 54: /* pstfd */
  2784. op->type = MKOP(STORE_FP, PREFIXED, 8);
  2785. break;
  2786. }
  2787. break;
  2788. case 3: /* Type 11 Modified Register-to-Register */
  2789. break;
  2790. }
  2791. #endif /* __powerpc64__ */
  2792. }
  2793. if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
  2794. switch (GETTYPE(op->type)) {
  2795. case LOAD:
  2796. if (ra == rd)
  2797. goto unknown_opcode;
  2798. fallthrough;
  2799. case STORE:
  2800. case LOAD_FP:
  2801. case STORE_FP:
  2802. if (ra == 0)
  2803. goto unknown_opcode;
  2804. }
  2805. }
  2806. #ifdef CONFIG_VSX
  2807. if ((GETTYPE(op->type) == LOAD_VSX ||
  2808. GETTYPE(op->type) == STORE_VSX) &&
  2809. !cpu_has_feature(CPU_FTR_VSX)) {
  2810. return -1;
  2811. }
  2812. #endif /* CONFIG_VSX */
  2813. return 0;
  2814. unknown_opcode:
  2815. op->type = UNKNOWN;
  2816. return 0;
  2817. logical_done:
  2818. if (word & 1)
  2819. set_cr0(regs, op);
  2820. logical_done_nocc:
  2821. op->reg = ra;
  2822. op->type |= SETREG;
  2823. return 1;
  2824. arith_done:
  2825. if (word & 1)
  2826. set_cr0(regs, op);
  2827. compute_done:
  2828. op->reg = rd;
  2829. op->type |= SETREG;
  2830. return 1;
  2831. priv:
  2832. op->type = INTERRUPT | 0x700;
  2833. op->val = SRR1_PROGPRIV;
  2834. return 0;
  2835. trap:
  2836. op->type = INTERRUPT | 0x700;
  2837. op->val = SRR1_PROGTRAP;
  2838. return 0;
  2839. }
  2840. EXPORT_SYMBOL_GPL(analyse_instr);
  2841. NOKPROBE_SYMBOL(analyse_instr);
  2842. /*
  2843. * For PPC32 we always use stwu with r1 to change the stack pointer.
  2844. * So this emulated store may corrupt the exception frame, now we
  2845. * have to provide the exception frame trampoline, which is pushed
  2846. * below the kprobed function stack. So we only update gpr[1] but
  2847. * don't emulate the real store operation. We will do real store
  2848. * operation safely in exception return code by checking this flag.
  2849. */
  2850. static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
  2851. {
  2852. /*
  2853. * Check if we already set since that means we'll
  2854. * lose the previous value.
  2855. */
  2856. WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
  2857. set_thread_flag(TIF_EMULATE_STACK_STORE);
  2858. return 0;
  2859. }
  2860. static nokprobe_inline void do_signext(unsigned long *valp, int size)
  2861. {
  2862. switch (size) {
  2863. case 2:
  2864. *valp = (signed short) *valp;
  2865. break;
  2866. case 4:
  2867. *valp = (signed int) *valp;
  2868. break;
  2869. }
  2870. }
  2871. static nokprobe_inline void do_byterev(unsigned long *valp, int size)
  2872. {
  2873. switch (size) {
  2874. case 2:
  2875. *valp = byterev_2(*valp);
  2876. break;
  2877. case 4:
  2878. *valp = byterev_4(*valp);
  2879. break;
  2880. #ifdef __powerpc64__
  2881. case 8:
  2882. *valp = byterev_8(*valp);
  2883. break;
  2884. #endif
  2885. }
  2886. }
  2887. /*
  2888. * Emulate an instruction that can be executed just by updating
  2889. * fields in *regs.
  2890. */
  2891. void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
  2892. {
  2893. unsigned long next_pc;
  2894. next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
  2895. switch (GETTYPE(op->type)) {
  2896. case COMPUTE:
  2897. if (op->type & SETREG)
  2898. regs->gpr[op->reg] = op->val;
  2899. if (op->type & SETCC)
  2900. regs->ccr = op->ccval;
  2901. if (op->type & SETXER)
  2902. regs->xer = op->xerval;
  2903. break;
  2904. case BRANCH:
  2905. if (op->type & SETLK)
  2906. regs->link = next_pc;
  2907. if (op->type & BRTAKEN)
  2908. next_pc = op->val;
  2909. if (op->type & DECCTR)
  2910. --regs->ctr;
  2911. break;
  2912. case BARRIER:
  2913. switch (op->type & BARRIER_MASK) {
  2914. case BARRIER_SYNC:
  2915. mb();
  2916. break;
  2917. case BARRIER_ISYNC:
  2918. isync();
  2919. break;
  2920. case BARRIER_EIEIO:
  2921. eieio();
  2922. break;
  2923. #ifdef CONFIG_PPC64
  2924. case BARRIER_LWSYNC:
  2925. asm volatile("lwsync" : : : "memory");
  2926. break;
  2927. case BARRIER_PTESYNC:
  2928. asm volatile("ptesync" : : : "memory");
  2929. break;
  2930. #endif
  2931. }
  2932. break;
  2933. case MFSPR:
  2934. switch (op->spr) {
  2935. case SPRN_XER:
  2936. regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
  2937. break;
  2938. case SPRN_LR:
  2939. regs->gpr[op->reg] = regs->link;
  2940. break;
  2941. case SPRN_CTR:
  2942. regs->gpr[op->reg] = regs->ctr;
  2943. break;
  2944. default:
  2945. WARN_ON_ONCE(1);
  2946. }
  2947. break;
  2948. case MTSPR:
  2949. switch (op->spr) {
  2950. case SPRN_XER:
  2951. regs->xer = op->val & 0xffffffffUL;
  2952. break;
  2953. case SPRN_LR:
  2954. regs->link = op->val;
  2955. break;
  2956. case SPRN_CTR:
  2957. regs->ctr = op->val;
  2958. break;
  2959. default:
  2960. WARN_ON_ONCE(1);
  2961. }
  2962. break;
  2963. default:
  2964. WARN_ON_ONCE(1);
  2965. }
  2966. regs_set_return_ip(regs, next_pc);
  2967. }
  2968. NOKPROBE_SYMBOL(emulate_update_regs);
  2969. /*
  2970. * Emulate a previously-analysed load or store instruction.
  2971. * Return values are:
  2972. * 0 = instruction emulated successfully
  2973. * -EFAULT = address out of range or access faulted (regs->dar
  2974. * contains the faulting address)
  2975. * -EACCES = misaligned access, instruction requires alignment
  2976. * -EINVAL = unknown operation in *op
  2977. */
  2978. int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
  2979. {
  2980. int err, size, type;
  2981. int i, rd, nb;
  2982. unsigned int cr;
  2983. unsigned long val;
  2984. unsigned long ea;
  2985. bool cross_endian;
  2986. err = 0;
  2987. size = GETSIZE(op->type);
  2988. type = GETTYPE(op->type);
  2989. cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
  2990. ea = truncate_if_32bit(regs->msr, op->ea);
  2991. switch (type) {
  2992. case LARX:
  2993. if (ea & (size - 1))
  2994. return -EACCES; /* can't handle misaligned */
  2995. if (!address_ok(regs, ea, size))
  2996. return -EFAULT;
  2997. err = 0;
  2998. val = 0;
  2999. switch (size) {
  3000. #ifdef CONFIG_PPC_HAS_LBARX_LHARX
  3001. case 1:
  3002. __get_user_asmx(val, ea, err, "lbarx");
  3003. break;
  3004. case 2:
  3005. __get_user_asmx(val, ea, err, "lharx");
  3006. break;
  3007. #endif
  3008. case 4:
  3009. __get_user_asmx(val, ea, err, "lwarx");
  3010. break;
  3011. #ifdef __powerpc64__
  3012. case 8:
  3013. __get_user_asmx(val, ea, err, "ldarx");
  3014. break;
  3015. case 16:
  3016. err = do_lqarx(ea, &regs->gpr[op->reg]);
  3017. break;
  3018. #endif
  3019. default:
  3020. return -EINVAL;
  3021. }
  3022. if (err) {
  3023. regs->dar = ea;
  3024. break;
  3025. }
  3026. if (size < 16)
  3027. regs->gpr[op->reg] = val;
  3028. break;
  3029. case STCX:
  3030. if (ea & (size - 1))
  3031. return -EACCES; /* can't handle misaligned */
  3032. if (!address_ok(regs, ea, size))
  3033. return -EFAULT;
  3034. err = 0;
  3035. switch (size) {
  3036. #ifdef __powerpc64__
  3037. case 1:
  3038. __put_user_asmx(op->val, ea, err, "stbcx.", cr);
  3039. break;
  3040. case 2:
  3041. __put_user_asmx(op->val, ea, err, "sthcx.", cr);
  3042. break;
  3043. #endif
  3044. case 4:
  3045. __put_user_asmx(op->val, ea, err, "stwcx.", cr);
  3046. break;
  3047. #ifdef __powerpc64__
  3048. case 8:
  3049. __put_user_asmx(op->val, ea, err, "stdcx.", cr);
  3050. break;
  3051. case 16:
  3052. err = do_stqcx(ea, regs->gpr[op->reg],
  3053. regs->gpr[op->reg + 1], &cr);
  3054. break;
  3055. #endif
  3056. default:
  3057. return -EINVAL;
  3058. }
  3059. if (!err)
  3060. regs->ccr = (regs->ccr & 0x0fffffff) |
  3061. (cr & 0xe0000000) |
  3062. ((regs->xer >> 3) & 0x10000000);
  3063. else
  3064. regs->dar = ea;
  3065. break;
  3066. case LOAD:
  3067. #ifdef __powerpc64__
  3068. if (size == 16) {
  3069. err = emulate_lq(regs, ea, op->reg, cross_endian);
  3070. break;
  3071. }
  3072. #endif
  3073. err = read_mem(&regs->gpr[op->reg], ea, size, regs);
  3074. if (!err) {
  3075. if (op->type & SIGNEXT)
  3076. do_signext(&regs->gpr[op->reg], size);
  3077. if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
  3078. do_byterev(&regs->gpr[op->reg], size);
  3079. }
  3080. break;
  3081. #ifdef CONFIG_PPC_FPU
  3082. case LOAD_FP:
  3083. /*
  3084. * If the instruction is in userspace, we can emulate it even
  3085. * if the VMX state is not live, because we have the state
  3086. * stored in the thread_struct. If the instruction is in
  3087. * the kernel, we must not touch the state in the thread_struct.
  3088. */
  3089. if (!user_mode(regs) && !(regs->msr & MSR_FP))
  3090. return 0;
  3091. err = do_fp_load(op, ea, regs, cross_endian);
  3092. break;
  3093. #endif
  3094. #ifdef CONFIG_ALTIVEC
  3095. case LOAD_VMX:
  3096. if (!user_mode(regs) && !(regs->msr & MSR_VEC))
  3097. return 0;
  3098. err = do_vec_load(op->reg, ea, size, regs, cross_endian);
  3099. break;
  3100. #endif
  3101. #ifdef CONFIG_VSX
  3102. case LOAD_VSX: {
  3103. unsigned long msrbit = MSR_VSX;
  3104. /*
  3105. * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
  3106. * when the target of the instruction is a vector register.
  3107. */
  3108. if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
  3109. msrbit = MSR_VEC;
  3110. if (!user_mode(regs) && !(regs->msr & msrbit))
  3111. return 0;
  3112. err = do_vsx_load(op, ea, regs, cross_endian);
  3113. break;
  3114. }
  3115. #endif
  3116. case LOAD_MULTI:
  3117. if (!address_ok(regs, ea, size))
  3118. return -EFAULT;
  3119. rd = op->reg;
  3120. for (i = 0; i < size; i += 4) {
  3121. unsigned int v32 = 0;
  3122. nb = size - i;
  3123. if (nb > 4)
  3124. nb = 4;
  3125. err = copy_mem_in((u8 *) &v32, ea, nb, regs);
  3126. if (err)
  3127. break;
  3128. if (unlikely(cross_endian))
  3129. v32 = byterev_4(v32);
  3130. regs->gpr[rd] = v32;
  3131. ea += 4;
  3132. /* reg number wraps from 31 to 0 for lsw[ix] */
  3133. rd = (rd + 1) & 0x1f;
  3134. }
  3135. break;
  3136. case STORE:
  3137. #ifdef __powerpc64__
  3138. if (size == 16) {
  3139. err = emulate_stq(regs, ea, op->reg, cross_endian);
  3140. break;
  3141. }
  3142. #endif
  3143. if ((op->type & UPDATE) && size == sizeof(long) &&
  3144. op->reg == 1 && op->update_reg == 1 && !user_mode(regs) &&
  3145. ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
  3146. err = handle_stack_update(ea, regs);
  3147. break;
  3148. }
  3149. if (unlikely(cross_endian))
  3150. do_byterev(&op->val, size);
  3151. err = write_mem(op->val, ea, size, regs);
  3152. break;
  3153. #ifdef CONFIG_PPC_FPU
  3154. case STORE_FP:
  3155. if (!user_mode(regs) && !(regs->msr & MSR_FP))
  3156. return 0;
  3157. err = do_fp_store(op, ea, regs, cross_endian);
  3158. break;
  3159. #endif
  3160. #ifdef CONFIG_ALTIVEC
  3161. case STORE_VMX:
  3162. if (!user_mode(regs) && !(regs->msr & MSR_VEC))
  3163. return 0;
  3164. err = do_vec_store(op->reg, ea, size, regs, cross_endian);
  3165. break;
  3166. #endif
  3167. #ifdef CONFIG_VSX
  3168. case STORE_VSX: {
  3169. unsigned long msrbit = MSR_VSX;
  3170. /*
  3171. * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
  3172. * when the target of the instruction is a vector register.
  3173. */
  3174. if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
  3175. msrbit = MSR_VEC;
  3176. if (!user_mode(regs) && !(regs->msr & msrbit))
  3177. return 0;
  3178. err = do_vsx_store(op, ea, regs, cross_endian);
  3179. break;
  3180. }
  3181. #endif
  3182. case STORE_MULTI:
  3183. if (!address_ok(regs, ea, size))
  3184. return -EFAULT;
  3185. rd = op->reg;
  3186. for (i = 0; i < size; i += 4) {
  3187. unsigned int v32 = regs->gpr[rd];
  3188. nb = size - i;
  3189. if (nb > 4)
  3190. nb = 4;
  3191. if (unlikely(cross_endian))
  3192. v32 = byterev_4(v32);
  3193. err = copy_mem_out((u8 *) &v32, ea, nb, regs);
  3194. if (err)
  3195. break;
  3196. ea += 4;
  3197. /* reg number wraps from 31 to 0 for stsw[ix] */
  3198. rd = (rd + 1) & 0x1f;
  3199. }
  3200. break;
  3201. default:
  3202. return -EINVAL;
  3203. }
  3204. if (err)
  3205. return err;
  3206. if (op->type & UPDATE)
  3207. regs->gpr[op->update_reg] = op->ea;
  3208. return 0;
  3209. }
  3210. NOKPROBE_SYMBOL(emulate_loadstore);
  3211. /*
  3212. * Emulate instructions that cause a transfer of control,
  3213. * loads and stores, and a few other instructions.
  3214. * Returns 1 if the step was emulated, 0 if not,
  3215. * or -1 if the instruction is one that should not be stepped,
  3216. * such as an rfid, or a mtmsrd that would clear MSR_RI.
  3217. */
  3218. int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
  3219. {
  3220. struct instruction_op op;
  3221. int r, err, type;
  3222. unsigned long val;
  3223. unsigned long ea;
  3224. r = analyse_instr(&op, regs, instr);
  3225. if (r < 0)
  3226. return r;
  3227. if (r > 0) {
  3228. emulate_update_regs(regs, &op);
  3229. return 1;
  3230. }
  3231. err = 0;
  3232. type = GETTYPE(op.type);
  3233. if (OP_IS_LOAD_STORE(type)) {
  3234. err = emulate_loadstore(regs, &op);
  3235. if (err)
  3236. return 0;
  3237. goto instr_done;
  3238. }
  3239. switch (type) {
  3240. case CACHEOP:
  3241. ea = truncate_if_32bit(regs->msr, op.ea);
  3242. if (!address_ok(regs, ea, 8))
  3243. return 0;
  3244. switch (op.type & CACHEOP_MASK) {
  3245. case DCBST:
  3246. __cacheop_user_asmx(ea, err, "dcbst");
  3247. break;
  3248. case DCBF:
  3249. __cacheop_user_asmx(ea, err, "dcbf");
  3250. break;
  3251. case DCBTST:
  3252. if (op.reg == 0)
  3253. prefetchw((void *) ea);
  3254. break;
  3255. case DCBT:
  3256. if (op.reg == 0)
  3257. prefetch((void *) ea);
  3258. break;
  3259. case ICBI:
  3260. __cacheop_user_asmx(ea, err, "icbi");
  3261. break;
  3262. case DCBZ:
  3263. err = emulate_dcbz(ea, regs);
  3264. break;
  3265. }
  3266. if (err) {
  3267. regs->dar = ea;
  3268. return 0;
  3269. }
  3270. goto instr_done;
  3271. case MFMSR:
  3272. regs->gpr[op.reg] = regs->msr & MSR_MASK;
  3273. goto instr_done;
  3274. case MTMSR:
  3275. val = regs->gpr[op.reg];
  3276. if ((val & MSR_RI) == 0)
  3277. /* can't step mtmsr[d] that would clear MSR_RI */
  3278. return -1;
  3279. /* here op.val is the mask of bits to change */
  3280. regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
  3281. goto instr_done;
  3282. case SYSCALL: /* sc */
  3283. /*
  3284. * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
  3285. * single step a system call instruction:
  3286. *
  3287. * Successful completion for an instruction means that the
  3288. * instruction caused no other interrupt. Thus a Trace
  3289. * interrupt never occurs for a System Call or System Call
  3290. * Vectored instruction, or for a Trap instruction that
  3291. * traps.
  3292. */
  3293. return -1;
  3294. case SYSCALL_VECTORED_0: /* scv 0 */
  3295. return -1;
  3296. case RFI:
  3297. return -1;
  3298. }
  3299. return 0;
  3300. instr_done:
  3301. regs_set_return_ip(regs,
  3302. truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
  3303. return 1;
  3304. }
  3305. NOKPROBE_SYMBOL(emulate_step);