evergreen_cs.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657
  1. /*
  2. * Copyright 2010 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <drm/drmP.h>
  29. #include "radeon.h"
  30. #include "radeon_asic.h"
  31. #include "evergreend.h"
  32. #include "evergreen_reg_safe.h"
  33. #include "cayman_reg_safe.h"
  34. #define MAX(a,b) (((a)>(b))?(a):(b))
  35. #define MIN(a,b) (((a)<(b))?(a):(b))
  36. #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
  37. int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
  38. struct radeon_bo_list **cs_reloc);
  39. struct evergreen_cs_track {
  40. u32 group_size;
  41. u32 nbanks;
  42. u32 npipes;
  43. u32 row_size;
  44. /* value we track */
  45. u32 nsamples; /* unused */
  46. struct radeon_bo *cb_color_bo[12];
  47. u32 cb_color_bo_offset[12];
  48. struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
  49. struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
  50. u32 cb_color_info[12];
  51. u32 cb_color_view[12];
  52. u32 cb_color_pitch[12];
  53. u32 cb_color_slice[12];
  54. u32 cb_color_slice_idx[12];
  55. u32 cb_color_attrib[12];
  56. u32 cb_color_cmask_slice[8];/* unused */
  57. u32 cb_color_fmask_slice[8];/* unused */
  58. u32 cb_target_mask;
  59. u32 cb_shader_mask; /* unused */
  60. u32 vgt_strmout_config;
  61. u32 vgt_strmout_buffer_config;
  62. struct radeon_bo *vgt_strmout_bo[4];
  63. u32 vgt_strmout_bo_offset[4];
  64. u32 vgt_strmout_size[4];
  65. u32 db_depth_control;
  66. u32 db_depth_view;
  67. u32 db_depth_slice;
  68. u32 db_depth_size;
  69. u32 db_z_info;
  70. u32 db_z_read_offset;
  71. u32 db_z_write_offset;
  72. struct radeon_bo *db_z_read_bo;
  73. struct radeon_bo *db_z_write_bo;
  74. u32 db_s_info;
  75. u32 db_s_read_offset;
  76. u32 db_s_write_offset;
  77. struct radeon_bo *db_s_read_bo;
  78. struct radeon_bo *db_s_write_bo;
  79. bool sx_misc_kill_all_prims;
  80. bool cb_dirty;
  81. bool db_dirty;
  82. bool streamout_dirty;
  83. u32 htile_offset;
  84. u32 htile_surface;
  85. struct radeon_bo *htile_bo;
  86. unsigned long indirect_draw_buffer_size;
  87. const unsigned *reg_safe_bm;
  88. };
  89. static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
  90. {
  91. if (tiling_flags & RADEON_TILING_MACRO)
  92. return ARRAY_2D_TILED_THIN1;
  93. else if (tiling_flags & RADEON_TILING_MICRO)
  94. return ARRAY_1D_TILED_THIN1;
  95. else
  96. return ARRAY_LINEAR_GENERAL;
  97. }
  98. static u32 evergreen_cs_get_num_banks(u32 nbanks)
  99. {
  100. switch (nbanks) {
  101. case 2:
  102. return ADDR_SURF_2_BANK;
  103. case 4:
  104. return ADDR_SURF_4_BANK;
  105. case 8:
  106. default:
  107. return ADDR_SURF_8_BANK;
  108. case 16:
  109. return ADDR_SURF_16_BANK;
  110. }
  111. }
  112. static void evergreen_cs_track_init(struct evergreen_cs_track *track)
  113. {
  114. int i;
  115. for (i = 0; i < 8; i++) {
  116. track->cb_color_fmask_bo[i] = NULL;
  117. track->cb_color_cmask_bo[i] = NULL;
  118. track->cb_color_cmask_slice[i] = 0;
  119. track->cb_color_fmask_slice[i] = 0;
  120. }
  121. for (i = 0; i < 12; i++) {
  122. track->cb_color_bo[i] = NULL;
  123. track->cb_color_bo_offset[i] = 0xFFFFFFFF;
  124. track->cb_color_info[i] = 0;
  125. track->cb_color_view[i] = 0xFFFFFFFF;
  126. track->cb_color_pitch[i] = 0;
  127. track->cb_color_slice[i] = 0xfffffff;
  128. track->cb_color_slice_idx[i] = 0;
  129. }
  130. track->cb_target_mask = 0xFFFFFFFF;
  131. track->cb_shader_mask = 0xFFFFFFFF;
  132. track->cb_dirty = true;
  133. track->db_depth_slice = 0xffffffff;
  134. track->db_depth_view = 0xFFFFC000;
  135. track->db_depth_size = 0xFFFFFFFF;
  136. track->db_depth_control = 0xFFFFFFFF;
  137. track->db_z_info = 0xFFFFFFFF;
  138. track->db_z_read_offset = 0xFFFFFFFF;
  139. track->db_z_write_offset = 0xFFFFFFFF;
  140. track->db_z_read_bo = NULL;
  141. track->db_z_write_bo = NULL;
  142. track->db_s_info = 0xFFFFFFFF;
  143. track->db_s_read_offset = 0xFFFFFFFF;
  144. track->db_s_write_offset = 0xFFFFFFFF;
  145. track->db_s_read_bo = NULL;
  146. track->db_s_write_bo = NULL;
  147. track->db_dirty = true;
  148. track->htile_bo = NULL;
  149. track->htile_offset = 0xFFFFFFFF;
  150. track->htile_surface = 0;
  151. for (i = 0; i < 4; i++) {
  152. track->vgt_strmout_size[i] = 0;
  153. track->vgt_strmout_bo[i] = NULL;
  154. track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
  155. }
  156. track->streamout_dirty = true;
  157. track->sx_misc_kill_all_prims = false;
  158. }
  159. struct eg_surface {
  160. /* value gathered from cs */
  161. unsigned nbx;
  162. unsigned nby;
  163. unsigned format;
  164. unsigned mode;
  165. unsigned nbanks;
  166. unsigned bankw;
  167. unsigned bankh;
  168. unsigned tsplit;
  169. unsigned mtilea;
  170. unsigned nsamples;
  171. /* output value */
  172. unsigned bpe;
  173. unsigned layer_size;
  174. unsigned palign;
  175. unsigned halign;
  176. unsigned long base_align;
  177. };
  178. static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
  179. struct eg_surface *surf,
  180. const char *prefix)
  181. {
  182. surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
  183. surf->base_align = surf->bpe;
  184. surf->palign = 1;
  185. surf->halign = 1;
  186. return 0;
  187. }
  188. static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
  189. struct eg_surface *surf,
  190. const char *prefix)
  191. {
  192. struct evergreen_cs_track *track = p->track;
  193. unsigned palign;
  194. palign = MAX(64, track->group_size / surf->bpe);
  195. surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
  196. surf->base_align = track->group_size;
  197. surf->palign = palign;
  198. surf->halign = 1;
  199. if (surf->nbx & (palign - 1)) {
  200. if (prefix) {
  201. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
  202. __func__, __LINE__, prefix, surf->nbx, palign);
  203. }
  204. return -EINVAL;
  205. }
  206. return 0;
  207. }
  208. static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
  209. struct eg_surface *surf,
  210. const char *prefix)
  211. {
  212. struct evergreen_cs_track *track = p->track;
  213. unsigned palign;
  214. palign = track->group_size / (8 * surf->bpe * surf->nsamples);
  215. palign = MAX(8, palign);
  216. surf->layer_size = surf->nbx * surf->nby * surf->bpe;
  217. surf->base_align = track->group_size;
  218. surf->palign = palign;
  219. surf->halign = 8;
  220. if ((surf->nbx & (palign - 1))) {
  221. if (prefix) {
  222. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
  223. __func__, __LINE__, prefix, surf->nbx, palign,
  224. track->group_size, surf->bpe, surf->nsamples);
  225. }
  226. return -EINVAL;
  227. }
  228. if ((surf->nby & (8 - 1))) {
  229. if (prefix) {
  230. dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
  231. __func__, __LINE__, prefix, surf->nby);
  232. }
  233. return -EINVAL;
  234. }
  235. return 0;
  236. }
  237. static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
  238. struct eg_surface *surf,
  239. const char *prefix)
  240. {
  241. struct evergreen_cs_track *track = p->track;
  242. unsigned palign, halign, tileb, slice_pt;
  243. unsigned mtile_pr, mtile_ps, mtileb;
  244. tileb = 64 * surf->bpe * surf->nsamples;
  245. slice_pt = 1;
  246. if (tileb > surf->tsplit) {
  247. slice_pt = tileb / surf->tsplit;
  248. }
  249. tileb = tileb / slice_pt;
  250. /* macro tile width & height */
  251. palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
  252. halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
  253. mtileb = (palign / 8) * (halign / 8) * tileb;
  254. mtile_pr = surf->nbx / palign;
  255. mtile_ps = (mtile_pr * surf->nby) / halign;
  256. surf->layer_size = mtile_ps * mtileb * slice_pt;
  257. surf->base_align = (palign / 8) * (halign / 8) * tileb;
  258. surf->palign = palign;
  259. surf->halign = halign;
  260. if ((surf->nbx & (palign - 1))) {
  261. if (prefix) {
  262. dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
  263. __func__, __LINE__, prefix, surf->nbx, palign);
  264. }
  265. return -EINVAL;
  266. }
  267. if ((surf->nby & (halign - 1))) {
  268. if (prefix) {
  269. dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
  270. __func__, __LINE__, prefix, surf->nby, halign);
  271. }
  272. return -EINVAL;
  273. }
  274. return 0;
  275. }
  276. static int evergreen_surface_check(struct radeon_cs_parser *p,
  277. struct eg_surface *surf,
  278. const char *prefix)
  279. {
  280. /* some common value computed here */
  281. surf->bpe = r600_fmt_get_blocksize(surf->format);
  282. switch (surf->mode) {
  283. case ARRAY_LINEAR_GENERAL:
  284. return evergreen_surface_check_linear(p, surf, prefix);
  285. case ARRAY_LINEAR_ALIGNED:
  286. return evergreen_surface_check_linear_aligned(p, surf, prefix);
  287. case ARRAY_1D_TILED_THIN1:
  288. return evergreen_surface_check_1d(p, surf, prefix);
  289. case ARRAY_2D_TILED_THIN1:
  290. return evergreen_surface_check_2d(p, surf, prefix);
  291. default:
  292. dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
  293. __func__, __LINE__, prefix, surf->mode);
  294. return -EINVAL;
  295. }
  296. return -EINVAL;
  297. }
  298. static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
  299. struct eg_surface *surf,
  300. const char *prefix)
  301. {
  302. switch (surf->mode) {
  303. case ARRAY_2D_TILED_THIN1:
  304. break;
  305. case ARRAY_LINEAR_GENERAL:
  306. case ARRAY_LINEAR_ALIGNED:
  307. case ARRAY_1D_TILED_THIN1:
  308. return 0;
  309. default:
  310. dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
  311. __func__, __LINE__, prefix, surf->mode);
  312. return -EINVAL;
  313. }
  314. switch (surf->nbanks) {
  315. case 0: surf->nbanks = 2; break;
  316. case 1: surf->nbanks = 4; break;
  317. case 2: surf->nbanks = 8; break;
  318. case 3: surf->nbanks = 16; break;
  319. default:
  320. dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
  321. __func__, __LINE__, prefix, surf->nbanks);
  322. return -EINVAL;
  323. }
  324. switch (surf->bankw) {
  325. case 0: surf->bankw = 1; break;
  326. case 1: surf->bankw = 2; break;
  327. case 2: surf->bankw = 4; break;
  328. case 3: surf->bankw = 8; break;
  329. default:
  330. dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
  331. __func__, __LINE__, prefix, surf->bankw);
  332. return -EINVAL;
  333. }
  334. switch (surf->bankh) {
  335. case 0: surf->bankh = 1; break;
  336. case 1: surf->bankh = 2; break;
  337. case 2: surf->bankh = 4; break;
  338. case 3: surf->bankh = 8; break;
  339. default:
  340. dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
  341. __func__, __LINE__, prefix, surf->bankh);
  342. return -EINVAL;
  343. }
  344. switch (surf->mtilea) {
  345. case 0: surf->mtilea = 1; break;
  346. case 1: surf->mtilea = 2; break;
  347. case 2: surf->mtilea = 4; break;
  348. case 3: surf->mtilea = 8; break;
  349. default:
  350. dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
  351. __func__, __LINE__, prefix, surf->mtilea);
  352. return -EINVAL;
  353. }
  354. switch (surf->tsplit) {
  355. case 0: surf->tsplit = 64; break;
  356. case 1: surf->tsplit = 128; break;
  357. case 2: surf->tsplit = 256; break;
  358. case 3: surf->tsplit = 512; break;
  359. case 4: surf->tsplit = 1024; break;
  360. case 5: surf->tsplit = 2048; break;
  361. case 6: surf->tsplit = 4096; break;
  362. default:
  363. dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
  364. __func__, __LINE__, prefix, surf->tsplit);
  365. return -EINVAL;
  366. }
  367. return 0;
  368. }
  369. static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
  370. {
  371. struct evergreen_cs_track *track = p->track;
  372. struct eg_surface surf;
  373. unsigned pitch, slice, mslice;
  374. unsigned long offset;
  375. int r;
  376. mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
  377. pitch = track->cb_color_pitch[id];
  378. slice = track->cb_color_slice[id];
  379. surf.nbx = (pitch + 1) * 8;
  380. surf.nby = ((slice + 1) * 64) / surf.nbx;
  381. surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
  382. surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
  383. surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
  384. surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
  385. surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
  386. surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
  387. surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
  388. surf.nsamples = 1;
  389. if (!r600_fmt_is_valid_color(surf.format)) {
  390. dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
  391. __func__, __LINE__, surf.format,
  392. id, track->cb_color_info[id]);
  393. return -EINVAL;
  394. }
  395. r = evergreen_surface_value_conv_check(p, &surf, "cb");
  396. if (r) {
  397. return r;
  398. }
  399. r = evergreen_surface_check(p, &surf, "cb");
  400. if (r) {
  401. dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  402. __func__, __LINE__, id, track->cb_color_pitch[id],
  403. track->cb_color_slice[id], track->cb_color_attrib[id],
  404. track->cb_color_info[id]);
  405. return r;
  406. }
  407. offset = track->cb_color_bo_offset[id] << 8;
  408. if (offset & (surf.base_align - 1)) {
  409. dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
  410. __func__, __LINE__, id, offset, surf.base_align);
  411. return -EINVAL;
  412. }
  413. offset += surf.layer_size * mslice;
  414. if (offset > radeon_bo_size(track->cb_color_bo[id])) {
  415. /* old ddx are broken they allocate bo with w*h*bpp but
  416. * program slice with ALIGN(h, 8), catch this and patch
  417. * command stream.
  418. */
  419. if (!surf.mode) {
  420. uint32_t *ib = p->ib.ptr;
  421. unsigned long tmp, nby, bsize, size, min = 0;
  422. /* find the height the ddx wants */
  423. if (surf.nby > 8) {
  424. min = surf.nby - 8;
  425. }
  426. bsize = radeon_bo_size(track->cb_color_bo[id]);
  427. tmp = track->cb_color_bo_offset[id] << 8;
  428. for (nby = surf.nby; nby > min; nby--) {
  429. size = nby * surf.nbx * surf.bpe * surf.nsamples;
  430. if ((tmp + size * mslice) <= bsize) {
  431. break;
  432. }
  433. }
  434. if (nby > min) {
  435. surf.nby = nby;
  436. slice = ((nby * surf.nbx) / 64) - 1;
  437. if (!evergreen_surface_check(p, &surf, "cb")) {
  438. /* check if this one works */
  439. tmp += surf.layer_size * mslice;
  440. if (tmp <= bsize) {
  441. ib[track->cb_color_slice_idx[id]] = slice;
  442. goto old_ddx_ok;
  443. }
  444. }
  445. }
  446. }
  447. dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
  448. "offset %d, max layer %d, bo size %ld, slice %d)\n",
  449. __func__, __LINE__, id, surf.layer_size,
  450. track->cb_color_bo_offset[id] << 8, mslice,
  451. radeon_bo_size(track->cb_color_bo[id]), slice);
  452. dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
  453. __func__, __LINE__, surf.nbx, surf.nby,
  454. surf.mode, surf.bpe, surf.nsamples,
  455. surf.bankw, surf.bankh,
  456. surf.tsplit, surf.mtilea);
  457. return -EINVAL;
  458. }
  459. old_ddx_ok:
  460. return 0;
  461. }
  462. static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
  463. unsigned nbx, unsigned nby)
  464. {
  465. struct evergreen_cs_track *track = p->track;
  466. unsigned long size;
  467. if (track->htile_bo == NULL) {
  468. dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
  469. __func__, __LINE__, track->db_z_info);
  470. return -EINVAL;
  471. }
  472. if (G_028ABC_LINEAR(track->htile_surface)) {
  473. /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
  474. nbx = round_up(nbx, 16 * 8);
  475. /* height is npipes htiles aligned == npipes * 8 pixel aligned */
  476. nby = round_up(nby, track->npipes * 8);
  477. } else {
  478. /* always assume 8x8 htile */
  479. /* align is htile align * 8, htile align vary according to
  480. * number of pipe and tile width and nby
  481. */
  482. switch (track->npipes) {
  483. case 8:
  484. /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  485. nbx = round_up(nbx, 64 * 8);
  486. nby = round_up(nby, 64 * 8);
  487. break;
  488. case 4:
  489. /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  490. nbx = round_up(nbx, 64 * 8);
  491. nby = round_up(nby, 32 * 8);
  492. break;
  493. case 2:
  494. /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  495. nbx = round_up(nbx, 32 * 8);
  496. nby = round_up(nby, 32 * 8);
  497. break;
  498. case 1:
  499. /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
  500. nbx = round_up(nbx, 32 * 8);
  501. nby = round_up(nby, 16 * 8);
  502. break;
  503. default:
  504. dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
  505. __func__, __LINE__, track->npipes);
  506. return -EINVAL;
  507. }
  508. }
  509. /* compute number of htile */
  510. nbx = nbx >> 3;
  511. nby = nby >> 3;
  512. /* size must be aligned on npipes * 2K boundary */
  513. size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
  514. size += track->htile_offset;
  515. if (size > radeon_bo_size(track->htile_bo)) {
  516. dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
  517. __func__, __LINE__, radeon_bo_size(track->htile_bo),
  518. size, nbx, nby);
  519. return -EINVAL;
  520. }
  521. return 0;
  522. }
  523. static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
  524. {
  525. struct evergreen_cs_track *track = p->track;
  526. struct eg_surface surf;
  527. unsigned pitch, slice, mslice;
  528. unsigned long offset;
  529. int r;
  530. mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
  531. pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
  532. slice = track->db_depth_slice;
  533. surf.nbx = (pitch + 1) * 8;
  534. surf.nby = ((slice + 1) * 64) / surf.nbx;
  535. surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
  536. surf.format = G_028044_FORMAT(track->db_s_info);
  537. surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
  538. surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
  539. surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
  540. surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
  541. surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
  542. surf.nsamples = 1;
  543. if (surf.format != 1) {
  544. dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
  545. __func__, __LINE__, surf.format);
  546. return -EINVAL;
  547. }
  548. /* replace by color format so we can use same code */
  549. surf.format = V_028C70_COLOR_8;
  550. r = evergreen_surface_value_conv_check(p, &surf, "stencil");
  551. if (r) {
  552. return r;
  553. }
  554. r = evergreen_surface_check(p, &surf, NULL);
  555. if (r) {
  556. /* old userspace doesn't compute proper depth/stencil alignment
  557. * check that alignment against a bigger byte per elements and
  558. * only report if that alignment is wrong too.
  559. */
  560. surf.format = V_028C70_COLOR_8_8_8_8;
  561. r = evergreen_surface_check(p, &surf, "stencil");
  562. if (r) {
  563. dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  564. __func__, __LINE__, track->db_depth_size,
  565. track->db_depth_slice, track->db_s_info, track->db_z_info);
  566. }
  567. return r;
  568. }
  569. offset = track->db_s_read_offset << 8;
  570. if (offset & (surf.base_align - 1)) {
  571. dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
  572. __func__, __LINE__, offset, surf.base_align);
  573. return -EINVAL;
  574. }
  575. offset += surf.layer_size * mslice;
  576. if (offset > radeon_bo_size(track->db_s_read_bo)) {
  577. dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
  578. "offset %ld, max layer %d, bo size %ld)\n",
  579. __func__, __LINE__, surf.layer_size,
  580. (unsigned long)track->db_s_read_offset << 8, mslice,
  581. radeon_bo_size(track->db_s_read_bo));
  582. dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
  583. __func__, __LINE__, track->db_depth_size,
  584. track->db_depth_slice, track->db_s_info, track->db_z_info);
  585. return -EINVAL;
  586. }
  587. offset = track->db_s_write_offset << 8;
  588. if (offset & (surf.base_align - 1)) {
  589. dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
  590. __func__, __LINE__, offset, surf.base_align);
  591. return -EINVAL;
  592. }
  593. offset += surf.layer_size * mslice;
  594. if (offset > radeon_bo_size(track->db_s_write_bo)) {
  595. dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
  596. "offset %ld, max layer %d, bo size %ld)\n",
  597. __func__, __LINE__, surf.layer_size,
  598. (unsigned long)track->db_s_write_offset << 8, mslice,
  599. radeon_bo_size(track->db_s_write_bo));
  600. return -EINVAL;
  601. }
  602. /* hyperz */
  603. if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
  604. r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
  605. if (r) {
  606. return r;
  607. }
  608. }
  609. return 0;
  610. }
  611. static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
  612. {
  613. struct evergreen_cs_track *track = p->track;
  614. struct eg_surface surf;
  615. unsigned pitch, slice, mslice;
  616. unsigned long offset;
  617. int r;
  618. mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
  619. pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
  620. slice = track->db_depth_slice;
  621. surf.nbx = (pitch + 1) * 8;
  622. surf.nby = ((slice + 1) * 64) / surf.nbx;
  623. surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
  624. surf.format = G_028040_FORMAT(track->db_z_info);
  625. surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
  626. surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
  627. surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
  628. surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
  629. surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
  630. surf.nsamples = 1;
  631. switch (surf.format) {
  632. case V_028040_Z_16:
  633. surf.format = V_028C70_COLOR_16;
  634. break;
  635. case V_028040_Z_24:
  636. case V_028040_Z_32_FLOAT:
  637. surf.format = V_028C70_COLOR_8_8_8_8;
  638. break;
  639. default:
  640. dev_warn(p->dev, "%s:%d depth invalid format %d\n",
  641. __func__, __LINE__, surf.format);
  642. return -EINVAL;
  643. }
  644. r = evergreen_surface_value_conv_check(p, &surf, "depth");
  645. if (r) {
  646. dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
  647. __func__, __LINE__, track->db_depth_size,
  648. track->db_depth_slice, track->db_z_info);
  649. return r;
  650. }
  651. r = evergreen_surface_check(p, &surf, "depth");
  652. if (r) {
  653. dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
  654. __func__, __LINE__, track->db_depth_size,
  655. track->db_depth_slice, track->db_z_info);
  656. return r;
  657. }
  658. offset = track->db_z_read_offset << 8;
  659. if (offset & (surf.base_align - 1)) {
  660. dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
  661. __func__, __LINE__, offset, surf.base_align);
  662. return -EINVAL;
  663. }
  664. offset += surf.layer_size * mslice;
  665. if (offset > radeon_bo_size(track->db_z_read_bo)) {
  666. dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
  667. "offset %ld, max layer %d, bo size %ld)\n",
  668. __func__, __LINE__, surf.layer_size,
  669. (unsigned long)track->db_z_read_offset << 8, mslice,
  670. radeon_bo_size(track->db_z_read_bo));
  671. return -EINVAL;
  672. }
  673. offset = track->db_z_write_offset << 8;
  674. if (offset & (surf.base_align - 1)) {
  675. dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
  676. __func__, __LINE__, offset, surf.base_align);
  677. return -EINVAL;
  678. }
  679. offset += surf.layer_size * mslice;
  680. if (offset > radeon_bo_size(track->db_z_write_bo)) {
  681. dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
  682. "offset %ld, max layer %d, bo size %ld)\n",
  683. __func__, __LINE__, surf.layer_size,
  684. (unsigned long)track->db_z_write_offset << 8, mslice,
  685. radeon_bo_size(track->db_z_write_bo));
  686. return -EINVAL;
  687. }
  688. /* hyperz */
  689. if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
  690. r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
  691. if (r) {
  692. return r;
  693. }
  694. }
  695. return 0;
  696. }
  697. static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
  698. struct radeon_bo *texture,
  699. struct radeon_bo *mipmap,
  700. unsigned idx)
  701. {
  702. struct eg_surface surf;
  703. unsigned long toffset, moffset;
  704. unsigned dim, llevel, mslice, width, height, depth, i;
  705. u32 texdw[8];
  706. int r;
  707. texdw[0] = radeon_get_ib_value(p, idx + 0);
  708. texdw[1] = radeon_get_ib_value(p, idx + 1);
  709. texdw[2] = radeon_get_ib_value(p, idx + 2);
  710. texdw[3] = radeon_get_ib_value(p, idx + 3);
  711. texdw[4] = radeon_get_ib_value(p, idx + 4);
  712. texdw[5] = radeon_get_ib_value(p, idx + 5);
  713. texdw[6] = radeon_get_ib_value(p, idx + 6);
  714. texdw[7] = radeon_get_ib_value(p, idx + 7);
  715. dim = G_030000_DIM(texdw[0]);
  716. llevel = G_030014_LAST_LEVEL(texdw[5]);
  717. mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
  718. width = G_030000_TEX_WIDTH(texdw[0]) + 1;
  719. height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
  720. depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
  721. surf.format = G_03001C_DATA_FORMAT(texdw[7]);
  722. surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
  723. surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
  724. surf.nby = r600_fmt_get_nblocksy(surf.format, height);
  725. surf.mode = G_030004_ARRAY_MODE(texdw[1]);
  726. surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
  727. surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
  728. surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
  729. surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
  730. surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
  731. surf.nsamples = 1;
  732. toffset = texdw[2] << 8;
  733. moffset = texdw[3] << 8;
  734. if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
  735. dev_warn(p->dev, "%s:%d texture invalid format %d\n",
  736. __func__, __LINE__, surf.format);
  737. return -EINVAL;
  738. }
  739. switch (dim) {
  740. case V_030000_SQ_TEX_DIM_1D:
  741. case V_030000_SQ_TEX_DIM_2D:
  742. case V_030000_SQ_TEX_DIM_CUBEMAP:
  743. case V_030000_SQ_TEX_DIM_1D_ARRAY:
  744. case V_030000_SQ_TEX_DIM_2D_ARRAY:
  745. depth = 1;
  746. break;
  747. case V_030000_SQ_TEX_DIM_2D_MSAA:
  748. case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
  749. surf.nsamples = 1 << llevel;
  750. llevel = 0;
  751. depth = 1;
  752. break;
  753. case V_030000_SQ_TEX_DIM_3D:
  754. break;
  755. default:
  756. dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
  757. __func__, __LINE__, dim);
  758. return -EINVAL;
  759. }
  760. r = evergreen_surface_value_conv_check(p, &surf, "texture");
  761. if (r) {
  762. return r;
  763. }
  764. /* align height */
  765. evergreen_surface_check(p, &surf, NULL);
  766. surf.nby = ALIGN(surf.nby, surf.halign);
  767. r = evergreen_surface_check(p, &surf, "texture");
  768. if (r) {
  769. dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  770. __func__, __LINE__, texdw[0], texdw[1], texdw[4],
  771. texdw[5], texdw[6], texdw[7]);
  772. return r;
  773. }
  774. /* check texture size */
  775. if (toffset & (surf.base_align - 1)) {
  776. dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
  777. __func__, __LINE__, toffset, surf.base_align);
  778. return -EINVAL;
  779. }
  780. if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
  781. dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
  782. __func__, __LINE__, moffset, surf.base_align);
  783. return -EINVAL;
  784. }
  785. if (dim == SQ_TEX_DIM_3D) {
  786. toffset += surf.layer_size * depth;
  787. } else {
  788. toffset += surf.layer_size * mslice;
  789. }
  790. if (toffset > radeon_bo_size(texture)) {
  791. dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
  792. "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
  793. __func__, __LINE__, surf.layer_size,
  794. (unsigned long)texdw[2] << 8, mslice,
  795. depth, radeon_bo_size(texture),
  796. surf.nbx, surf.nby);
  797. return -EINVAL;
  798. }
  799. if (!mipmap) {
  800. if (llevel) {
  801. dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
  802. __func__, __LINE__);
  803. return -EINVAL;
  804. } else {
  805. return 0; /* everything's ok */
  806. }
  807. }
  808. /* check mipmap size */
  809. for (i = 1; i <= llevel; i++) {
  810. unsigned w, h, d;
  811. w = r600_mip_minify(width, i);
  812. h = r600_mip_minify(height, i);
  813. d = r600_mip_minify(depth, i);
  814. surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
  815. surf.nby = r600_fmt_get_nblocksy(surf.format, h);
  816. switch (surf.mode) {
  817. case ARRAY_2D_TILED_THIN1:
  818. if (surf.nbx < surf.palign || surf.nby < surf.halign) {
  819. surf.mode = ARRAY_1D_TILED_THIN1;
  820. }
  821. /* recompute alignment */
  822. evergreen_surface_check(p, &surf, NULL);
  823. break;
  824. case ARRAY_LINEAR_GENERAL:
  825. case ARRAY_LINEAR_ALIGNED:
  826. case ARRAY_1D_TILED_THIN1:
  827. break;
  828. default:
  829. dev_warn(p->dev, "%s:%d invalid array mode %d\n",
  830. __func__, __LINE__, surf.mode);
  831. return -EINVAL;
  832. }
  833. surf.nbx = ALIGN(surf.nbx, surf.palign);
  834. surf.nby = ALIGN(surf.nby, surf.halign);
  835. r = evergreen_surface_check(p, &surf, "mipmap");
  836. if (r) {
  837. return r;
  838. }
  839. if (dim == SQ_TEX_DIM_3D) {
  840. moffset += surf.layer_size * d;
  841. } else {
  842. moffset += surf.layer_size * mslice;
  843. }
  844. if (moffset > radeon_bo_size(mipmap)) {
  845. dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
  846. "offset %ld, coffset %ld, max layer %d, depth %d, "
  847. "bo size %ld) level0 (%d %d %d)\n",
  848. __func__, __LINE__, i, surf.layer_size,
  849. (unsigned long)texdw[3] << 8, moffset, mslice,
  850. d, radeon_bo_size(mipmap),
  851. width, height, depth);
  852. dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
  853. __func__, __LINE__, surf.nbx, surf.nby,
  854. surf.mode, surf.bpe, surf.nsamples,
  855. surf.bankw, surf.bankh,
  856. surf.tsplit, surf.mtilea);
  857. return -EINVAL;
  858. }
  859. }
  860. return 0;
  861. }
  862. static int evergreen_cs_track_check(struct radeon_cs_parser *p)
  863. {
  864. struct evergreen_cs_track *track = p->track;
  865. unsigned tmp, i;
  866. int r;
  867. unsigned buffer_mask = 0;
  868. /* check streamout */
  869. if (track->streamout_dirty && track->vgt_strmout_config) {
  870. for (i = 0; i < 4; i++) {
  871. if (track->vgt_strmout_config & (1 << i)) {
  872. buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
  873. }
  874. }
  875. for (i = 0; i < 4; i++) {
  876. if (buffer_mask & (1 << i)) {
  877. if (track->vgt_strmout_bo[i]) {
  878. u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
  879. (u64)track->vgt_strmout_size[i];
  880. if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
  881. DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
  882. i, offset,
  883. radeon_bo_size(track->vgt_strmout_bo[i]));
  884. return -EINVAL;
  885. }
  886. } else {
  887. dev_warn(p->dev, "No buffer for streamout %d\n", i);
  888. return -EINVAL;
  889. }
  890. }
  891. }
  892. track->streamout_dirty = false;
  893. }
  894. if (track->sx_misc_kill_all_prims)
  895. return 0;
  896. /* check that we have a cb for each enabled target
  897. */
  898. if (track->cb_dirty) {
  899. tmp = track->cb_target_mask;
  900. for (i = 0; i < 8; i++) {
  901. u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
  902. if (format != V_028C70_COLOR_INVALID &&
  903. (tmp >> (i * 4)) & 0xF) {
  904. /* at least one component is enabled */
  905. if (track->cb_color_bo[i] == NULL) {
  906. dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
  907. __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
  908. return -EINVAL;
  909. }
  910. /* check cb */
  911. r = evergreen_cs_track_validate_cb(p, i);
  912. if (r) {
  913. return r;
  914. }
  915. }
  916. }
  917. track->cb_dirty = false;
  918. }
  919. if (track->db_dirty) {
  920. /* Check stencil buffer */
  921. if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
  922. G_028800_STENCIL_ENABLE(track->db_depth_control)) {
  923. r = evergreen_cs_track_validate_stencil(p);
  924. if (r)
  925. return r;
  926. }
  927. /* Check depth buffer */
  928. if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
  929. G_028800_Z_ENABLE(track->db_depth_control)) {
  930. r = evergreen_cs_track_validate_depth(p);
  931. if (r)
  932. return r;
  933. }
  934. track->db_dirty = false;
  935. }
  936. return 0;
  937. }
  938. /**
  939. * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
  940. * @parser: parser structure holding parsing context.
  941. *
  942. * This is an Evergreen(+)-specific function for parsing VLINE packets.
  943. * Real work is done by r600_cs_common_vline_parse function.
  944. * Here we just set up ASIC-specific register table and call
  945. * the common implementation function.
  946. */
  947. static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
  948. {
  949. static uint32_t vline_start_end[6] = {
  950. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
  951. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
  952. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
  953. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
  954. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
  955. EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
  956. };
  957. static uint32_t vline_status[6] = {
  958. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
  959. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
  960. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
  961. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
  962. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
  963. EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
  964. };
  965. return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
  966. }
  967. static int evergreen_packet0_check(struct radeon_cs_parser *p,
  968. struct radeon_cs_packet *pkt,
  969. unsigned idx, unsigned reg)
  970. {
  971. int r;
  972. switch (reg) {
  973. case EVERGREEN_VLINE_START_END:
  974. r = evergreen_cs_packet_parse_vline(p);
  975. if (r) {
  976. DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
  977. idx, reg);
  978. return r;
  979. }
  980. break;
  981. default:
  982. pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
  983. return -EINVAL;
  984. }
  985. return 0;
  986. }
  987. static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
  988. struct radeon_cs_packet *pkt)
  989. {
  990. unsigned reg, i;
  991. unsigned idx;
  992. int r;
  993. idx = pkt->idx + 1;
  994. reg = pkt->reg;
  995. for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
  996. r = evergreen_packet0_check(p, pkt, idx, reg);
  997. if (r) {
  998. return r;
  999. }
  1000. }
  1001. return 0;
  1002. }
  1003. /**
  1004. * evergreen_cs_handle_reg() - process registers that need special handling.
  1005. * @parser: parser structure holding parsing context
  1006. * @reg: register we are testing
  1007. * @idx: index into the cs buffer
  1008. */
  1009. static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
  1010. {
  1011. struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
  1012. struct radeon_bo_list *reloc;
  1013. u32 tmp, *ib;
  1014. int r;
  1015. ib = p->ib.ptr;
  1016. switch (reg) {
  1017. /* force following reg to 0 in an attempt to disable out buffer
  1018. * which will need us to better understand how it works to perform
  1019. * security check on it (Jerome)
  1020. */
  1021. case SQ_ESGS_RING_SIZE:
  1022. case SQ_GSVS_RING_SIZE:
  1023. case SQ_ESTMP_RING_SIZE:
  1024. case SQ_GSTMP_RING_SIZE:
  1025. case SQ_HSTMP_RING_SIZE:
  1026. case SQ_LSTMP_RING_SIZE:
  1027. case SQ_PSTMP_RING_SIZE:
  1028. case SQ_VSTMP_RING_SIZE:
  1029. case SQ_ESGS_RING_ITEMSIZE:
  1030. case SQ_ESTMP_RING_ITEMSIZE:
  1031. case SQ_GSTMP_RING_ITEMSIZE:
  1032. case SQ_GSVS_RING_ITEMSIZE:
  1033. case SQ_GS_VERT_ITEMSIZE:
  1034. case SQ_GS_VERT_ITEMSIZE_1:
  1035. case SQ_GS_VERT_ITEMSIZE_2:
  1036. case SQ_GS_VERT_ITEMSIZE_3:
  1037. case SQ_GSVS_RING_OFFSET_1:
  1038. case SQ_GSVS_RING_OFFSET_2:
  1039. case SQ_GSVS_RING_OFFSET_3:
  1040. case SQ_HSTMP_RING_ITEMSIZE:
  1041. case SQ_LSTMP_RING_ITEMSIZE:
  1042. case SQ_PSTMP_RING_ITEMSIZE:
  1043. case SQ_VSTMP_RING_ITEMSIZE:
  1044. case VGT_TF_RING_SIZE:
  1045. /* get value to populate the IB don't remove */
  1046. /*tmp =radeon_get_ib_value(p, idx);
  1047. ib[idx] = 0;*/
  1048. break;
  1049. case SQ_ESGS_RING_BASE:
  1050. case SQ_GSVS_RING_BASE:
  1051. case SQ_ESTMP_RING_BASE:
  1052. case SQ_GSTMP_RING_BASE:
  1053. case SQ_HSTMP_RING_BASE:
  1054. case SQ_LSTMP_RING_BASE:
  1055. case SQ_PSTMP_RING_BASE:
  1056. case SQ_VSTMP_RING_BASE:
  1057. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1058. if (r) {
  1059. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1060. "0x%04X\n", reg);
  1061. return -EINVAL;
  1062. }
  1063. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1064. break;
  1065. case DB_DEPTH_CONTROL:
  1066. track->db_depth_control = radeon_get_ib_value(p, idx);
  1067. track->db_dirty = true;
  1068. break;
  1069. case CAYMAN_DB_EQAA:
  1070. if (p->rdev->family < CHIP_CAYMAN) {
  1071. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1072. "0x%04X\n", reg);
  1073. return -EINVAL;
  1074. }
  1075. break;
  1076. case CAYMAN_DB_DEPTH_INFO:
  1077. if (p->rdev->family < CHIP_CAYMAN) {
  1078. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1079. "0x%04X\n", reg);
  1080. return -EINVAL;
  1081. }
  1082. break;
  1083. case DB_Z_INFO:
  1084. track->db_z_info = radeon_get_ib_value(p, idx);
  1085. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1086. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1087. if (r) {
  1088. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1089. "0x%04X\n", reg);
  1090. return -EINVAL;
  1091. }
  1092. ib[idx] &= ~Z_ARRAY_MODE(0xf);
  1093. track->db_z_info &= ~Z_ARRAY_MODE(0xf);
  1094. ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1095. track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1096. if (reloc->tiling_flags & RADEON_TILING_MACRO) {
  1097. unsigned bankw, bankh, mtaspect, tile_split;
  1098. evergreen_tiling_fields(reloc->tiling_flags,
  1099. &bankw, &bankh, &mtaspect,
  1100. &tile_split);
  1101. ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1102. ib[idx] |= DB_TILE_SPLIT(tile_split) |
  1103. DB_BANK_WIDTH(bankw) |
  1104. DB_BANK_HEIGHT(bankh) |
  1105. DB_MACRO_TILE_ASPECT(mtaspect);
  1106. }
  1107. }
  1108. track->db_dirty = true;
  1109. break;
  1110. case DB_STENCIL_INFO:
  1111. track->db_s_info = radeon_get_ib_value(p, idx);
  1112. track->db_dirty = true;
  1113. break;
  1114. case DB_DEPTH_VIEW:
  1115. track->db_depth_view = radeon_get_ib_value(p, idx);
  1116. track->db_dirty = true;
  1117. break;
  1118. case DB_DEPTH_SIZE:
  1119. track->db_depth_size = radeon_get_ib_value(p, idx);
  1120. track->db_dirty = true;
  1121. break;
  1122. case R_02805C_DB_DEPTH_SLICE:
  1123. track->db_depth_slice = radeon_get_ib_value(p, idx);
  1124. track->db_dirty = true;
  1125. break;
  1126. case DB_Z_READ_BASE:
  1127. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1128. if (r) {
  1129. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1130. "0x%04X\n", reg);
  1131. return -EINVAL;
  1132. }
  1133. track->db_z_read_offset = radeon_get_ib_value(p, idx);
  1134. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1135. track->db_z_read_bo = reloc->robj;
  1136. track->db_dirty = true;
  1137. break;
  1138. case DB_Z_WRITE_BASE:
  1139. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1140. if (r) {
  1141. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1142. "0x%04X\n", reg);
  1143. return -EINVAL;
  1144. }
  1145. track->db_z_write_offset = radeon_get_ib_value(p, idx);
  1146. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1147. track->db_z_write_bo = reloc->robj;
  1148. track->db_dirty = true;
  1149. break;
  1150. case DB_STENCIL_READ_BASE:
  1151. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1152. if (r) {
  1153. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1154. "0x%04X\n", reg);
  1155. return -EINVAL;
  1156. }
  1157. track->db_s_read_offset = radeon_get_ib_value(p, idx);
  1158. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1159. track->db_s_read_bo = reloc->robj;
  1160. track->db_dirty = true;
  1161. break;
  1162. case DB_STENCIL_WRITE_BASE:
  1163. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1164. if (r) {
  1165. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1166. "0x%04X\n", reg);
  1167. return -EINVAL;
  1168. }
  1169. track->db_s_write_offset = radeon_get_ib_value(p, idx);
  1170. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1171. track->db_s_write_bo = reloc->robj;
  1172. track->db_dirty = true;
  1173. break;
  1174. case VGT_STRMOUT_CONFIG:
  1175. track->vgt_strmout_config = radeon_get_ib_value(p, idx);
  1176. track->streamout_dirty = true;
  1177. break;
  1178. case VGT_STRMOUT_BUFFER_CONFIG:
  1179. track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
  1180. track->streamout_dirty = true;
  1181. break;
  1182. case VGT_STRMOUT_BUFFER_BASE_0:
  1183. case VGT_STRMOUT_BUFFER_BASE_1:
  1184. case VGT_STRMOUT_BUFFER_BASE_2:
  1185. case VGT_STRMOUT_BUFFER_BASE_3:
  1186. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1187. if (r) {
  1188. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1189. "0x%04X\n", reg);
  1190. return -EINVAL;
  1191. }
  1192. tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
  1193. track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
  1194. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1195. track->vgt_strmout_bo[tmp] = reloc->robj;
  1196. track->streamout_dirty = true;
  1197. break;
  1198. case VGT_STRMOUT_BUFFER_SIZE_0:
  1199. case VGT_STRMOUT_BUFFER_SIZE_1:
  1200. case VGT_STRMOUT_BUFFER_SIZE_2:
  1201. case VGT_STRMOUT_BUFFER_SIZE_3:
  1202. tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
  1203. /* size in register is DWs, convert to bytes */
  1204. track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
  1205. track->streamout_dirty = true;
  1206. break;
  1207. case CP_COHER_BASE:
  1208. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1209. if (r) {
  1210. dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
  1211. "0x%04X\n", reg);
  1212. return -EINVAL;
  1213. }
  1214. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1215. break;
  1216. case CB_TARGET_MASK:
  1217. track->cb_target_mask = radeon_get_ib_value(p, idx);
  1218. track->cb_dirty = true;
  1219. break;
  1220. case CB_SHADER_MASK:
  1221. track->cb_shader_mask = radeon_get_ib_value(p, idx);
  1222. track->cb_dirty = true;
  1223. break;
  1224. case PA_SC_AA_CONFIG:
  1225. if (p->rdev->family >= CHIP_CAYMAN) {
  1226. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1227. "0x%04X\n", reg);
  1228. return -EINVAL;
  1229. }
  1230. tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
  1231. track->nsamples = 1 << tmp;
  1232. break;
  1233. case CAYMAN_PA_SC_AA_CONFIG:
  1234. if (p->rdev->family < CHIP_CAYMAN) {
  1235. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1236. "0x%04X\n", reg);
  1237. return -EINVAL;
  1238. }
  1239. tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
  1240. track->nsamples = 1 << tmp;
  1241. break;
  1242. case CB_COLOR0_VIEW:
  1243. case CB_COLOR1_VIEW:
  1244. case CB_COLOR2_VIEW:
  1245. case CB_COLOR3_VIEW:
  1246. case CB_COLOR4_VIEW:
  1247. case CB_COLOR5_VIEW:
  1248. case CB_COLOR6_VIEW:
  1249. case CB_COLOR7_VIEW:
  1250. tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
  1251. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1252. track->cb_dirty = true;
  1253. break;
  1254. case CB_COLOR8_VIEW:
  1255. case CB_COLOR9_VIEW:
  1256. case CB_COLOR10_VIEW:
  1257. case CB_COLOR11_VIEW:
  1258. tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
  1259. track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
  1260. track->cb_dirty = true;
  1261. break;
  1262. case CB_COLOR0_INFO:
  1263. case CB_COLOR1_INFO:
  1264. case CB_COLOR2_INFO:
  1265. case CB_COLOR3_INFO:
  1266. case CB_COLOR4_INFO:
  1267. case CB_COLOR5_INFO:
  1268. case CB_COLOR6_INFO:
  1269. case CB_COLOR7_INFO:
  1270. tmp = (reg - CB_COLOR0_INFO) / 0x3c;
  1271. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1272. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1273. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1274. if (r) {
  1275. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1276. "0x%04X\n", reg);
  1277. return -EINVAL;
  1278. }
  1279. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1280. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1281. }
  1282. track->cb_dirty = true;
  1283. break;
  1284. case CB_COLOR8_INFO:
  1285. case CB_COLOR9_INFO:
  1286. case CB_COLOR10_INFO:
  1287. case CB_COLOR11_INFO:
  1288. tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
  1289. track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
  1290. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1291. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1292. if (r) {
  1293. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1294. "0x%04X\n", reg);
  1295. return -EINVAL;
  1296. }
  1297. ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1298. track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  1299. }
  1300. track->cb_dirty = true;
  1301. break;
  1302. case CB_COLOR0_PITCH:
  1303. case CB_COLOR1_PITCH:
  1304. case CB_COLOR2_PITCH:
  1305. case CB_COLOR3_PITCH:
  1306. case CB_COLOR4_PITCH:
  1307. case CB_COLOR5_PITCH:
  1308. case CB_COLOR6_PITCH:
  1309. case CB_COLOR7_PITCH:
  1310. tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
  1311. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  1312. track->cb_dirty = true;
  1313. break;
  1314. case CB_COLOR8_PITCH:
  1315. case CB_COLOR9_PITCH:
  1316. case CB_COLOR10_PITCH:
  1317. case CB_COLOR11_PITCH:
  1318. tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
  1319. track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
  1320. track->cb_dirty = true;
  1321. break;
  1322. case CB_COLOR0_SLICE:
  1323. case CB_COLOR1_SLICE:
  1324. case CB_COLOR2_SLICE:
  1325. case CB_COLOR3_SLICE:
  1326. case CB_COLOR4_SLICE:
  1327. case CB_COLOR5_SLICE:
  1328. case CB_COLOR6_SLICE:
  1329. case CB_COLOR7_SLICE:
  1330. tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
  1331. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  1332. track->cb_color_slice_idx[tmp] = idx;
  1333. track->cb_dirty = true;
  1334. break;
  1335. case CB_COLOR8_SLICE:
  1336. case CB_COLOR9_SLICE:
  1337. case CB_COLOR10_SLICE:
  1338. case CB_COLOR11_SLICE:
  1339. tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
  1340. track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
  1341. track->cb_color_slice_idx[tmp] = idx;
  1342. track->cb_dirty = true;
  1343. break;
  1344. case CB_COLOR0_ATTRIB:
  1345. case CB_COLOR1_ATTRIB:
  1346. case CB_COLOR2_ATTRIB:
  1347. case CB_COLOR3_ATTRIB:
  1348. case CB_COLOR4_ATTRIB:
  1349. case CB_COLOR5_ATTRIB:
  1350. case CB_COLOR6_ATTRIB:
  1351. case CB_COLOR7_ATTRIB:
  1352. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1353. if (r) {
  1354. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1355. "0x%04X\n", reg);
  1356. return -EINVAL;
  1357. }
  1358. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1359. if (reloc->tiling_flags & RADEON_TILING_MACRO) {
  1360. unsigned bankw, bankh, mtaspect, tile_split;
  1361. evergreen_tiling_fields(reloc->tiling_flags,
  1362. &bankw, &bankh, &mtaspect,
  1363. &tile_split);
  1364. ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1365. ib[idx] |= CB_TILE_SPLIT(tile_split) |
  1366. CB_BANK_WIDTH(bankw) |
  1367. CB_BANK_HEIGHT(bankh) |
  1368. CB_MACRO_TILE_ASPECT(mtaspect);
  1369. }
  1370. }
  1371. tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
  1372. track->cb_color_attrib[tmp] = ib[idx];
  1373. track->cb_dirty = true;
  1374. break;
  1375. case CB_COLOR8_ATTRIB:
  1376. case CB_COLOR9_ATTRIB:
  1377. case CB_COLOR10_ATTRIB:
  1378. case CB_COLOR11_ATTRIB:
  1379. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1380. if (r) {
  1381. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1382. "0x%04X\n", reg);
  1383. return -EINVAL;
  1384. }
  1385. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  1386. if (reloc->tiling_flags & RADEON_TILING_MACRO) {
  1387. unsigned bankw, bankh, mtaspect, tile_split;
  1388. evergreen_tiling_fields(reloc->tiling_flags,
  1389. &bankw, &bankh, &mtaspect,
  1390. &tile_split);
  1391. ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  1392. ib[idx] |= CB_TILE_SPLIT(tile_split) |
  1393. CB_BANK_WIDTH(bankw) |
  1394. CB_BANK_HEIGHT(bankh) |
  1395. CB_MACRO_TILE_ASPECT(mtaspect);
  1396. }
  1397. }
  1398. tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
  1399. track->cb_color_attrib[tmp] = ib[idx];
  1400. track->cb_dirty = true;
  1401. break;
  1402. case CB_COLOR0_FMASK:
  1403. case CB_COLOR1_FMASK:
  1404. case CB_COLOR2_FMASK:
  1405. case CB_COLOR3_FMASK:
  1406. case CB_COLOR4_FMASK:
  1407. case CB_COLOR5_FMASK:
  1408. case CB_COLOR6_FMASK:
  1409. case CB_COLOR7_FMASK:
  1410. tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
  1411. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1412. if (r) {
  1413. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1414. return -EINVAL;
  1415. }
  1416. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1417. track->cb_color_fmask_bo[tmp] = reloc->robj;
  1418. break;
  1419. case CB_COLOR0_CMASK:
  1420. case CB_COLOR1_CMASK:
  1421. case CB_COLOR2_CMASK:
  1422. case CB_COLOR3_CMASK:
  1423. case CB_COLOR4_CMASK:
  1424. case CB_COLOR5_CMASK:
  1425. case CB_COLOR6_CMASK:
  1426. case CB_COLOR7_CMASK:
  1427. tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
  1428. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1429. if (r) {
  1430. dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
  1431. return -EINVAL;
  1432. }
  1433. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1434. track->cb_color_cmask_bo[tmp] = reloc->robj;
  1435. break;
  1436. case CB_COLOR0_FMASK_SLICE:
  1437. case CB_COLOR1_FMASK_SLICE:
  1438. case CB_COLOR2_FMASK_SLICE:
  1439. case CB_COLOR3_FMASK_SLICE:
  1440. case CB_COLOR4_FMASK_SLICE:
  1441. case CB_COLOR5_FMASK_SLICE:
  1442. case CB_COLOR6_FMASK_SLICE:
  1443. case CB_COLOR7_FMASK_SLICE:
  1444. tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
  1445. track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
  1446. break;
  1447. case CB_COLOR0_CMASK_SLICE:
  1448. case CB_COLOR1_CMASK_SLICE:
  1449. case CB_COLOR2_CMASK_SLICE:
  1450. case CB_COLOR3_CMASK_SLICE:
  1451. case CB_COLOR4_CMASK_SLICE:
  1452. case CB_COLOR5_CMASK_SLICE:
  1453. case CB_COLOR6_CMASK_SLICE:
  1454. case CB_COLOR7_CMASK_SLICE:
  1455. tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
  1456. track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
  1457. break;
  1458. case CB_COLOR0_BASE:
  1459. case CB_COLOR1_BASE:
  1460. case CB_COLOR2_BASE:
  1461. case CB_COLOR3_BASE:
  1462. case CB_COLOR4_BASE:
  1463. case CB_COLOR5_BASE:
  1464. case CB_COLOR6_BASE:
  1465. case CB_COLOR7_BASE:
  1466. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1467. if (r) {
  1468. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1469. "0x%04X\n", reg);
  1470. return -EINVAL;
  1471. }
  1472. tmp = (reg - CB_COLOR0_BASE) / 0x3c;
  1473. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  1474. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1475. track->cb_color_bo[tmp] = reloc->robj;
  1476. track->cb_dirty = true;
  1477. break;
  1478. case CB_COLOR8_BASE:
  1479. case CB_COLOR9_BASE:
  1480. case CB_COLOR10_BASE:
  1481. case CB_COLOR11_BASE:
  1482. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1483. if (r) {
  1484. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1485. "0x%04X\n", reg);
  1486. return -EINVAL;
  1487. }
  1488. tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
  1489. track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
  1490. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1491. track->cb_color_bo[tmp] = reloc->robj;
  1492. track->cb_dirty = true;
  1493. break;
  1494. case DB_HTILE_DATA_BASE:
  1495. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1496. if (r) {
  1497. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1498. "0x%04X\n", reg);
  1499. return -EINVAL;
  1500. }
  1501. track->htile_offset = radeon_get_ib_value(p, idx);
  1502. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1503. track->htile_bo = reloc->robj;
  1504. track->db_dirty = true;
  1505. break;
  1506. case DB_HTILE_SURFACE:
  1507. /* 8x8 only */
  1508. track->htile_surface = radeon_get_ib_value(p, idx);
  1509. /* force 8x8 htile width and height */
  1510. ib[idx] |= 3;
  1511. track->db_dirty = true;
  1512. break;
  1513. case CB_IMMED0_BASE:
  1514. case CB_IMMED1_BASE:
  1515. case CB_IMMED2_BASE:
  1516. case CB_IMMED3_BASE:
  1517. case CB_IMMED4_BASE:
  1518. case CB_IMMED5_BASE:
  1519. case CB_IMMED6_BASE:
  1520. case CB_IMMED7_BASE:
  1521. case CB_IMMED8_BASE:
  1522. case CB_IMMED9_BASE:
  1523. case CB_IMMED10_BASE:
  1524. case CB_IMMED11_BASE:
  1525. case SQ_PGM_START_FS:
  1526. case SQ_PGM_START_ES:
  1527. case SQ_PGM_START_VS:
  1528. case SQ_PGM_START_GS:
  1529. case SQ_PGM_START_PS:
  1530. case SQ_PGM_START_HS:
  1531. case SQ_PGM_START_LS:
  1532. case SQ_CONST_MEM_BASE:
  1533. case SQ_ALU_CONST_CACHE_GS_0:
  1534. case SQ_ALU_CONST_CACHE_GS_1:
  1535. case SQ_ALU_CONST_CACHE_GS_2:
  1536. case SQ_ALU_CONST_CACHE_GS_3:
  1537. case SQ_ALU_CONST_CACHE_GS_4:
  1538. case SQ_ALU_CONST_CACHE_GS_5:
  1539. case SQ_ALU_CONST_CACHE_GS_6:
  1540. case SQ_ALU_CONST_CACHE_GS_7:
  1541. case SQ_ALU_CONST_CACHE_GS_8:
  1542. case SQ_ALU_CONST_CACHE_GS_9:
  1543. case SQ_ALU_CONST_CACHE_GS_10:
  1544. case SQ_ALU_CONST_CACHE_GS_11:
  1545. case SQ_ALU_CONST_CACHE_GS_12:
  1546. case SQ_ALU_CONST_CACHE_GS_13:
  1547. case SQ_ALU_CONST_CACHE_GS_14:
  1548. case SQ_ALU_CONST_CACHE_GS_15:
  1549. case SQ_ALU_CONST_CACHE_PS_0:
  1550. case SQ_ALU_CONST_CACHE_PS_1:
  1551. case SQ_ALU_CONST_CACHE_PS_2:
  1552. case SQ_ALU_CONST_CACHE_PS_3:
  1553. case SQ_ALU_CONST_CACHE_PS_4:
  1554. case SQ_ALU_CONST_CACHE_PS_5:
  1555. case SQ_ALU_CONST_CACHE_PS_6:
  1556. case SQ_ALU_CONST_CACHE_PS_7:
  1557. case SQ_ALU_CONST_CACHE_PS_8:
  1558. case SQ_ALU_CONST_CACHE_PS_9:
  1559. case SQ_ALU_CONST_CACHE_PS_10:
  1560. case SQ_ALU_CONST_CACHE_PS_11:
  1561. case SQ_ALU_CONST_CACHE_PS_12:
  1562. case SQ_ALU_CONST_CACHE_PS_13:
  1563. case SQ_ALU_CONST_CACHE_PS_14:
  1564. case SQ_ALU_CONST_CACHE_PS_15:
  1565. case SQ_ALU_CONST_CACHE_VS_0:
  1566. case SQ_ALU_CONST_CACHE_VS_1:
  1567. case SQ_ALU_CONST_CACHE_VS_2:
  1568. case SQ_ALU_CONST_CACHE_VS_3:
  1569. case SQ_ALU_CONST_CACHE_VS_4:
  1570. case SQ_ALU_CONST_CACHE_VS_5:
  1571. case SQ_ALU_CONST_CACHE_VS_6:
  1572. case SQ_ALU_CONST_CACHE_VS_7:
  1573. case SQ_ALU_CONST_CACHE_VS_8:
  1574. case SQ_ALU_CONST_CACHE_VS_9:
  1575. case SQ_ALU_CONST_CACHE_VS_10:
  1576. case SQ_ALU_CONST_CACHE_VS_11:
  1577. case SQ_ALU_CONST_CACHE_VS_12:
  1578. case SQ_ALU_CONST_CACHE_VS_13:
  1579. case SQ_ALU_CONST_CACHE_VS_14:
  1580. case SQ_ALU_CONST_CACHE_VS_15:
  1581. case SQ_ALU_CONST_CACHE_HS_0:
  1582. case SQ_ALU_CONST_CACHE_HS_1:
  1583. case SQ_ALU_CONST_CACHE_HS_2:
  1584. case SQ_ALU_CONST_CACHE_HS_3:
  1585. case SQ_ALU_CONST_CACHE_HS_4:
  1586. case SQ_ALU_CONST_CACHE_HS_5:
  1587. case SQ_ALU_CONST_CACHE_HS_6:
  1588. case SQ_ALU_CONST_CACHE_HS_7:
  1589. case SQ_ALU_CONST_CACHE_HS_8:
  1590. case SQ_ALU_CONST_CACHE_HS_9:
  1591. case SQ_ALU_CONST_CACHE_HS_10:
  1592. case SQ_ALU_CONST_CACHE_HS_11:
  1593. case SQ_ALU_CONST_CACHE_HS_12:
  1594. case SQ_ALU_CONST_CACHE_HS_13:
  1595. case SQ_ALU_CONST_CACHE_HS_14:
  1596. case SQ_ALU_CONST_CACHE_HS_15:
  1597. case SQ_ALU_CONST_CACHE_LS_0:
  1598. case SQ_ALU_CONST_CACHE_LS_1:
  1599. case SQ_ALU_CONST_CACHE_LS_2:
  1600. case SQ_ALU_CONST_CACHE_LS_3:
  1601. case SQ_ALU_CONST_CACHE_LS_4:
  1602. case SQ_ALU_CONST_CACHE_LS_5:
  1603. case SQ_ALU_CONST_CACHE_LS_6:
  1604. case SQ_ALU_CONST_CACHE_LS_7:
  1605. case SQ_ALU_CONST_CACHE_LS_8:
  1606. case SQ_ALU_CONST_CACHE_LS_9:
  1607. case SQ_ALU_CONST_CACHE_LS_10:
  1608. case SQ_ALU_CONST_CACHE_LS_11:
  1609. case SQ_ALU_CONST_CACHE_LS_12:
  1610. case SQ_ALU_CONST_CACHE_LS_13:
  1611. case SQ_ALU_CONST_CACHE_LS_14:
  1612. case SQ_ALU_CONST_CACHE_LS_15:
  1613. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1614. if (r) {
  1615. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1616. "0x%04X\n", reg);
  1617. return -EINVAL;
  1618. }
  1619. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1620. break;
  1621. case SX_MEMORY_EXPORT_BASE:
  1622. if (p->rdev->family >= CHIP_CAYMAN) {
  1623. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1624. "0x%04X\n", reg);
  1625. return -EINVAL;
  1626. }
  1627. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1628. if (r) {
  1629. dev_warn(p->dev, "bad SET_CONFIG_REG "
  1630. "0x%04X\n", reg);
  1631. return -EINVAL;
  1632. }
  1633. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1634. break;
  1635. case CAYMAN_SX_SCATTER_EXPORT_BASE:
  1636. if (p->rdev->family < CHIP_CAYMAN) {
  1637. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1638. "0x%04X\n", reg);
  1639. return -EINVAL;
  1640. }
  1641. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1642. if (r) {
  1643. dev_warn(p->dev, "bad SET_CONTEXT_REG "
  1644. "0x%04X\n", reg);
  1645. return -EINVAL;
  1646. }
  1647. ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  1648. break;
  1649. case SX_MISC:
  1650. track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
  1651. break;
  1652. default:
  1653. dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
  1654. return -EINVAL;
  1655. }
  1656. return 0;
  1657. }
  1658. /**
  1659. * evergreen_is_safe_reg() - check if register is authorized or not
  1660. * @parser: parser structure holding parsing context
  1661. * @reg: register we are testing
  1662. *
  1663. * This function will test against reg_safe_bm and return true
  1664. * if register is safe or false otherwise.
  1665. */
  1666. static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
  1667. {
  1668. struct evergreen_cs_track *track = p->track;
  1669. u32 m, i;
  1670. i = (reg >> 7);
  1671. if (unlikely(i >= REG_SAFE_BM_SIZE)) {
  1672. return false;
  1673. }
  1674. m = 1 << ((reg >> 2) & 31);
  1675. if (!(track->reg_safe_bm[i] & m))
  1676. return true;
  1677. return false;
  1678. }
  1679. static int evergreen_packet3_check(struct radeon_cs_parser *p,
  1680. struct radeon_cs_packet *pkt)
  1681. {
  1682. struct radeon_bo_list *reloc;
  1683. struct evergreen_cs_track *track;
  1684. uint32_t *ib;
  1685. unsigned idx;
  1686. unsigned i;
  1687. unsigned start_reg, end_reg, reg;
  1688. int r;
  1689. u32 idx_value;
  1690. track = (struct evergreen_cs_track *)p->track;
  1691. ib = p->ib.ptr;
  1692. idx = pkt->idx + 1;
  1693. idx_value = radeon_get_ib_value(p, idx);
  1694. switch (pkt->opcode) {
  1695. case PACKET3_SET_PREDICATION:
  1696. {
  1697. int pred_op;
  1698. int tmp;
  1699. uint64_t offset;
  1700. if (pkt->count != 1) {
  1701. DRM_ERROR("bad SET PREDICATION\n");
  1702. return -EINVAL;
  1703. }
  1704. tmp = radeon_get_ib_value(p, idx + 1);
  1705. pred_op = (tmp >> 16) & 0x7;
  1706. /* for the clear predicate operation */
  1707. if (pred_op == 0)
  1708. return 0;
  1709. if (pred_op > 2) {
  1710. DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
  1711. return -EINVAL;
  1712. }
  1713. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1714. if (r) {
  1715. DRM_ERROR("bad SET PREDICATION\n");
  1716. return -EINVAL;
  1717. }
  1718. offset = reloc->gpu_offset +
  1719. (idx_value & 0xfffffff0) +
  1720. ((u64)(tmp & 0xff) << 32);
  1721. ib[idx + 0] = offset;
  1722. ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  1723. }
  1724. break;
  1725. case PACKET3_CONTEXT_CONTROL:
  1726. if (pkt->count != 1) {
  1727. DRM_ERROR("bad CONTEXT_CONTROL\n");
  1728. return -EINVAL;
  1729. }
  1730. break;
  1731. case PACKET3_INDEX_TYPE:
  1732. case PACKET3_NUM_INSTANCES:
  1733. case PACKET3_CLEAR_STATE:
  1734. if (pkt->count) {
  1735. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1736. return -EINVAL;
  1737. }
  1738. break;
  1739. case CAYMAN_PACKET3_DEALLOC_STATE:
  1740. if (p->rdev->family < CHIP_CAYMAN) {
  1741. DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
  1742. return -EINVAL;
  1743. }
  1744. if (pkt->count) {
  1745. DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
  1746. return -EINVAL;
  1747. }
  1748. break;
  1749. case PACKET3_INDEX_BASE:
  1750. {
  1751. uint64_t offset;
  1752. if (pkt->count != 1) {
  1753. DRM_ERROR("bad INDEX_BASE\n");
  1754. return -EINVAL;
  1755. }
  1756. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1757. if (r) {
  1758. DRM_ERROR("bad INDEX_BASE\n");
  1759. return -EINVAL;
  1760. }
  1761. offset = reloc->gpu_offset +
  1762. idx_value +
  1763. ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
  1764. ib[idx+0] = offset;
  1765. ib[idx+1] = upper_32_bits(offset) & 0xff;
  1766. r = evergreen_cs_track_check(p);
  1767. if (r) {
  1768. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1769. return r;
  1770. }
  1771. break;
  1772. }
  1773. case PACKET3_INDEX_BUFFER_SIZE:
  1774. {
  1775. if (pkt->count != 0) {
  1776. DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
  1777. return -EINVAL;
  1778. }
  1779. break;
  1780. }
  1781. case PACKET3_DRAW_INDEX:
  1782. {
  1783. uint64_t offset;
  1784. if (pkt->count != 3) {
  1785. DRM_ERROR("bad DRAW_INDEX\n");
  1786. return -EINVAL;
  1787. }
  1788. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1789. if (r) {
  1790. DRM_ERROR("bad DRAW_INDEX\n");
  1791. return -EINVAL;
  1792. }
  1793. offset = reloc->gpu_offset +
  1794. idx_value +
  1795. ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
  1796. ib[idx+0] = offset;
  1797. ib[idx+1] = upper_32_bits(offset) & 0xff;
  1798. r = evergreen_cs_track_check(p);
  1799. if (r) {
  1800. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1801. return r;
  1802. }
  1803. break;
  1804. }
  1805. case PACKET3_DRAW_INDEX_2:
  1806. {
  1807. uint64_t offset;
  1808. if (pkt->count != 4) {
  1809. DRM_ERROR("bad DRAW_INDEX_2\n");
  1810. return -EINVAL;
  1811. }
  1812. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1813. if (r) {
  1814. DRM_ERROR("bad DRAW_INDEX_2\n");
  1815. return -EINVAL;
  1816. }
  1817. offset = reloc->gpu_offset +
  1818. radeon_get_ib_value(p, idx+1) +
  1819. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1820. ib[idx+1] = offset;
  1821. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1822. r = evergreen_cs_track_check(p);
  1823. if (r) {
  1824. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1825. return r;
  1826. }
  1827. break;
  1828. }
  1829. case PACKET3_DRAW_INDEX_AUTO:
  1830. if (pkt->count != 1) {
  1831. DRM_ERROR("bad DRAW_INDEX_AUTO\n");
  1832. return -EINVAL;
  1833. }
  1834. r = evergreen_cs_track_check(p);
  1835. if (r) {
  1836. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1837. return r;
  1838. }
  1839. break;
  1840. case PACKET3_DRAW_INDEX_MULTI_AUTO:
  1841. if (pkt->count != 2) {
  1842. DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
  1843. return -EINVAL;
  1844. }
  1845. r = evergreen_cs_track_check(p);
  1846. if (r) {
  1847. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1848. return r;
  1849. }
  1850. break;
  1851. case PACKET3_DRAW_INDEX_IMMD:
  1852. if (pkt->count < 2) {
  1853. DRM_ERROR("bad DRAW_INDEX_IMMD\n");
  1854. return -EINVAL;
  1855. }
  1856. r = evergreen_cs_track_check(p);
  1857. if (r) {
  1858. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1859. return r;
  1860. }
  1861. break;
  1862. case PACKET3_DRAW_INDEX_OFFSET:
  1863. if (pkt->count != 2) {
  1864. DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
  1865. return -EINVAL;
  1866. }
  1867. r = evergreen_cs_track_check(p);
  1868. if (r) {
  1869. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1870. return r;
  1871. }
  1872. break;
  1873. case PACKET3_DRAW_INDEX_OFFSET_2:
  1874. if (pkt->count != 3) {
  1875. DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
  1876. return -EINVAL;
  1877. }
  1878. r = evergreen_cs_track_check(p);
  1879. if (r) {
  1880. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1881. return r;
  1882. }
  1883. break;
  1884. case PACKET3_SET_BASE:
  1885. {
  1886. /*
  1887. DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
  1888. 2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
  1889. 0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
  1890. 3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
  1891. 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
  1892. */
  1893. if (pkt->count != 2) {
  1894. DRM_ERROR("bad SET_BASE\n");
  1895. return -EINVAL;
  1896. }
  1897. /* currently only supporting setting indirect draw buffer base address */
  1898. if (idx_value != 1) {
  1899. DRM_ERROR("bad SET_BASE\n");
  1900. return -EINVAL;
  1901. }
  1902. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1903. if (r) {
  1904. DRM_ERROR("bad SET_BASE\n");
  1905. return -EINVAL;
  1906. }
  1907. track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
  1908. ib[idx+1] = reloc->gpu_offset;
  1909. ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
  1910. break;
  1911. }
  1912. case PACKET3_DRAW_INDIRECT:
  1913. case PACKET3_DRAW_INDEX_INDIRECT:
  1914. {
  1915. u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
  1916. /*
  1917. DW 1 HEADER
  1918. 2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
  1919. 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
  1920. */
  1921. if (pkt->count != 1) {
  1922. DRM_ERROR("bad DRAW_INDIRECT\n");
  1923. return -EINVAL;
  1924. }
  1925. if (idx_value + size > track->indirect_draw_buffer_size) {
  1926. dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
  1927. idx_value, size, track->indirect_draw_buffer_size);
  1928. return -EINVAL;
  1929. }
  1930. r = evergreen_cs_track_check(p);
  1931. if (r) {
  1932. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1933. return r;
  1934. }
  1935. break;
  1936. }
  1937. case PACKET3_DISPATCH_DIRECT:
  1938. if (pkt->count != 3) {
  1939. DRM_ERROR("bad DISPATCH_DIRECT\n");
  1940. return -EINVAL;
  1941. }
  1942. r = evergreen_cs_track_check(p);
  1943. if (r) {
  1944. dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
  1945. return r;
  1946. }
  1947. break;
  1948. case PACKET3_DISPATCH_INDIRECT:
  1949. if (pkt->count != 1) {
  1950. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1951. return -EINVAL;
  1952. }
  1953. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1954. if (r) {
  1955. DRM_ERROR("bad DISPATCH_INDIRECT\n");
  1956. return -EINVAL;
  1957. }
  1958. ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
  1959. r = evergreen_cs_track_check(p);
  1960. if (r) {
  1961. dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
  1962. return r;
  1963. }
  1964. break;
  1965. case PACKET3_WAIT_REG_MEM:
  1966. if (pkt->count != 5) {
  1967. DRM_ERROR("bad WAIT_REG_MEM\n");
  1968. return -EINVAL;
  1969. }
  1970. /* bit 4 is reg (0) or mem (1) */
  1971. if (idx_value & 0x10) {
  1972. uint64_t offset;
  1973. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  1974. if (r) {
  1975. DRM_ERROR("bad WAIT_REG_MEM\n");
  1976. return -EINVAL;
  1977. }
  1978. offset = reloc->gpu_offset +
  1979. (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
  1980. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  1981. ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
  1982. ib[idx+2] = upper_32_bits(offset) & 0xff;
  1983. } else if (idx_value & 0x100) {
  1984. DRM_ERROR("cannot use PFP on REG wait\n");
  1985. return -EINVAL;
  1986. }
  1987. break;
  1988. case PACKET3_CP_DMA:
  1989. {
  1990. u32 command, size, info;
  1991. u64 offset, tmp;
  1992. if (pkt->count != 4) {
  1993. DRM_ERROR("bad CP DMA\n");
  1994. return -EINVAL;
  1995. }
  1996. command = radeon_get_ib_value(p, idx+4);
  1997. size = command & 0x1fffff;
  1998. info = radeon_get_ib_value(p, idx+1);
  1999. if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
  2000. (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
  2001. ((((info & 0x00300000) >> 20) == 0) &&
  2002. (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
  2003. ((((info & 0x60000000) >> 29) == 0) &&
  2004. (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
  2005. /* non mem to mem copies requires dw aligned count */
  2006. if (size % 4) {
  2007. DRM_ERROR("CP DMA command requires dw count alignment\n");
  2008. return -EINVAL;
  2009. }
  2010. }
  2011. if (command & PACKET3_CP_DMA_CMD_SAS) {
  2012. /* src address space is register */
  2013. /* GDS is ok */
  2014. if (((info & 0x60000000) >> 29) != 1) {
  2015. DRM_ERROR("CP DMA SAS not supported\n");
  2016. return -EINVAL;
  2017. }
  2018. } else {
  2019. if (command & PACKET3_CP_DMA_CMD_SAIC) {
  2020. DRM_ERROR("CP DMA SAIC only supported for registers\n");
  2021. return -EINVAL;
  2022. }
  2023. /* src address space is memory */
  2024. if (((info & 0x60000000) >> 29) == 0) {
  2025. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2026. if (r) {
  2027. DRM_ERROR("bad CP DMA SRC\n");
  2028. return -EINVAL;
  2029. }
  2030. tmp = radeon_get_ib_value(p, idx) +
  2031. ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
  2032. offset = reloc->gpu_offset + tmp;
  2033. if ((tmp + size) > radeon_bo_size(reloc->robj)) {
  2034. dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
  2035. tmp + size, radeon_bo_size(reloc->robj));
  2036. return -EINVAL;
  2037. }
  2038. ib[idx] = offset;
  2039. ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  2040. } else if (((info & 0x60000000) >> 29) != 2) {
  2041. DRM_ERROR("bad CP DMA SRC_SEL\n");
  2042. return -EINVAL;
  2043. }
  2044. }
  2045. if (command & PACKET3_CP_DMA_CMD_DAS) {
  2046. /* dst address space is register */
  2047. /* GDS is ok */
  2048. if (((info & 0x00300000) >> 20) != 1) {
  2049. DRM_ERROR("CP DMA DAS not supported\n");
  2050. return -EINVAL;
  2051. }
  2052. } else {
  2053. /* dst address space is memory */
  2054. if (command & PACKET3_CP_DMA_CMD_DAIC) {
  2055. DRM_ERROR("CP DMA DAIC only supported for registers\n");
  2056. return -EINVAL;
  2057. }
  2058. if (((info & 0x00300000) >> 20) == 0) {
  2059. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2060. if (r) {
  2061. DRM_ERROR("bad CP DMA DST\n");
  2062. return -EINVAL;
  2063. }
  2064. tmp = radeon_get_ib_value(p, idx+2) +
  2065. ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
  2066. offset = reloc->gpu_offset + tmp;
  2067. if ((tmp + size) > radeon_bo_size(reloc->robj)) {
  2068. dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
  2069. tmp + size, radeon_bo_size(reloc->robj));
  2070. return -EINVAL;
  2071. }
  2072. ib[idx+2] = offset;
  2073. ib[idx+3] = upper_32_bits(offset) & 0xff;
  2074. } else {
  2075. DRM_ERROR("bad CP DMA DST_SEL\n");
  2076. return -EINVAL;
  2077. }
  2078. }
  2079. break;
  2080. }
  2081. case PACKET3_PFP_SYNC_ME:
  2082. if (pkt->count) {
  2083. DRM_ERROR("bad PFP_SYNC_ME\n");
  2084. return -EINVAL;
  2085. }
  2086. break;
  2087. case PACKET3_SURFACE_SYNC:
  2088. if (pkt->count != 3) {
  2089. DRM_ERROR("bad SURFACE_SYNC\n");
  2090. return -EINVAL;
  2091. }
  2092. /* 0xffffffff/0x0 is flush all cache flag */
  2093. if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
  2094. radeon_get_ib_value(p, idx + 2) != 0) {
  2095. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2096. if (r) {
  2097. DRM_ERROR("bad SURFACE_SYNC\n");
  2098. return -EINVAL;
  2099. }
  2100. ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  2101. }
  2102. break;
  2103. case PACKET3_EVENT_WRITE:
  2104. if (pkt->count != 2 && pkt->count != 0) {
  2105. DRM_ERROR("bad EVENT_WRITE\n");
  2106. return -EINVAL;
  2107. }
  2108. if (pkt->count) {
  2109. uint64_t offset;
  2110. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2111. if (r) {
  2112. DRM_ERROR("bad EVENT_WRITE\n");
  2113. return -EINVAL;
  2114. }
  2115. offset = reloc->gpu_offset +
  2116. (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
  2117. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  2118. ib[idx+1] = offset & 0xfffffff8;
  2119. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2120. }
  2121. break;
  2122. case PACKET3_EVENT_WRITE_EOP:
  2123. {
  2124. uint64_t offset;
  2125. if (pkt->count != 4) {
  2126. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  2127. return -EINVAL;
  2128. }
  2129. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2130. if (r) {
  2131. DRM_ERROR("bad EVENT_WRITE_EOP\n");
  2132. return -EINVAL;
  2133. }
  2134. offset = reloc->gpu_offset +
  2135. (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
  2136. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  2137. ib[idx+1] = offset & 0xfffffffc;
  2138. ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  2139. break;
  2140. }
  2141. case PACKET3_EVENT_WRITE_EOS:
  2142. {
  2143. uint64_t offset;
  2144. if (pkt->count != 3) {
  2145. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  2146. return -EINVAL;
  2147. }
  2148. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2149. if (r) {
  2150. DRM_ERROR("bad EVENT_WRITE_EOS\n");
  2151. return -EINVAL;
  2152. }
  2153. offset = reloc->gpu_offset +
  2154. (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
  2155. ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
  2156. ib[idx+1] = offset & 0xfffffffc;
  2157. ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
  2158. break;
  2159. }
  2160. case PACKET3_SET_CONFIG_REG:
  2161. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
  2162. end_reg = 4 * pkt->count + start_reg - 4;
  2163. if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
  2164. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  2165. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  2166. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  2167. return -EINVAL;
  2168. }
  2169. for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
  2170. if (evergreen_is_safe_reg(p, reg))
  2171. continue;
  2172. r = evergreen_cs_handle_reg(p, reg, idx);
  2173. if (r)
  2174. return r;
  2175. }
  2176. break;
  2177. case PACKET3_SET_CONTEXT_REG:
  2178. start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
  2179. end_reg = 4 * pkt->count + start_reg - 4;
  2180. if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
  2181. (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
  2182. (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
  2183. DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
  2184. return -EINVAL;
  2185. }
  2186. for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
  2187. if (evergreen_is_safe_reg(p, reg))
  2188. continue;
  2189. r = evergreen_cs_handle_reg(p, reg, idx);
  2190. if (r)
  2191. return r;
  2192. }
  2193. break;
  2194. case PACKET3_SET_RESOURCE:
  2195. if (pkt->count % 8) {
  2196. DRM_ERROR("bad SET_RESOURCE\n");
  2197. return -EINVAL;
  2198. }
  2199. start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
  2200. end_reg = 4 * pkt->count + start_reg - 4;
  2201. if ((start_reg < PACKET3_SET_RESOURCE_START) ||
  2202. (start_reg >= PACKET3_SET_RESOURCE_END) ||
  2203. (end_reg >= PACKET3_SET_RESOURCE_END)) {
  2204. DRM_ERROR("bad SET_RESOURCE\n");
  2205. return -EINVAL;
  2206. }
  2207. for (i = 0; i < (pkt->count / 8); i++) {
  2208. struct radeon_bo *texture, *mipmap;
  2209. u32 toffset, moffset;
  2210. u32 size, offset, mip_address, tex_dim;
  2211. switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
  2212. case SQ_TEX_VTX_VALID_TEXTURE:
  2213. /* tex base */
  2214. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2215. if (r) {
  2216. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  2217. return -EINVAL;
  2218. }
  2219. if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
  2220. ib[idx+1+(i*8)+1] |=
  2221. TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
  2222. if (reloc->tiling_flags & RADEON_TILING_MACRO) {
  2223. unsigned bankw, bankh, mtaspect, tile_split;
  2224. evergreen_tiling_fields(reloc->tiling_flags,
  2225. &bankw, &bankh, &mtaspect,
  2226. &tile_split);
  2227. ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
  2228. ib[idx+1+(i*8)+7] |=
  2229. TEX_BANK_WIDTH(bankw) |
  2230. TEX_BANK_HEIGHT(bankh) |
  2231. MACRO_TILE_ASPECT(mtaspect) |
  2232. TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
  2233. }
  2234. }
  2235. texture = reloc->robj;
  2236. toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  2237. /* tex mip base */
  2238. tex_dim = ib[idx+1+(i*8)+0] & 0x7;
  2239. mip_address = ib[idx+1+(i*8)+3];
  2240. if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
  2241. !mip_address &&
  2242. !radeon_cs_packet_next_is_pkt3_nop(p)) {
  2243. /* MIP_ADDRESS should point to FMASK for an MSAA texture.
  2244. * It should be 0 if FMASK is disabled. */
  2245. moffset = 0;
  2246. mipmap = NULL;
  2247. } else {
  2248. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2249. if (r) {
  2250. DRM_ERROR("bad SET_RESOURCE (tex)\n");
  2251. return -EINVAL;
  2252. }
  2253. moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
  2254. mipmap = reloc->robj;
  2255. }
  2256. r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
  2257. if (r)
  2258. return r;
  2259. ib[idx+1+(i*8)+2] += toffset;
  2260. ib[idx+1+(i*8)+3] += moffset;
  2261. break;
  2262. case SQ_TEX_VTX_VALID_BUFFER:
  2263. {
  2264. uint64_t offset64;
  2265. /* vtx base */
  2266. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2267. if (r) {
  2268. DRM_ERROR("bad SET_RESOURCE (vtx)\n");
  2269. return -EINVAL;
  2270. }
  2271. offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
  2272. size = radeon_get_ib_value(p, idx+1+(i*8)+1);
  2273. if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
  2274. /* force size to size of the buffer */
  2275. dev_warn(p->dev, "vbo resource seems too big for the bo\n");
  2276. ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
  2277. }
  2278. offset64 = reloc->gpu_offset + offset;
  2279. ib[idx+1+(i*8)+0] = offset64;
  2280. ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
  2281. (upper_32_bits(offset64) & 0xff);
  2282. break;
  2283. }
  2284. case SQ_TEX_VTX_INVALID_TEXTURE:
  2285. case SQ_TEX_VTX_INVALID_BUFFER:
  2286. default:
  2287. DRM_ERROR("bad SET_RESOURCE\n");
  2288. return -EINVAL;
  2289. }
  2290. }
  2291. break;
  2292. case PACKET3_SET_ALU_CONST:
  2293. /* XXX fix me ALU const buffers only */
  2294. break;
  2295. case PACKET3_SET_BOOL_CONST:
  2296. start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
  2297. end_reg = 4 * pkt->count + start_reg - 4;
  2298. if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
  2299. (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
  2300. (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
  2301. DRM_ERROR("bad SET_BOOL_CONST\n");
  2302. return -EINVAL;
  2303. }
  2304. break;
  2305. case PACKET3_SET_LOOP_CONST:
  2306. start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
  2307. end_reg = 4 * pkt->count + start_reg - 4;
  2308. if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
  2309. (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
  2310. (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
  2311. DRM_ERROR("bad SET_LOOP_CONST\n");
  2312. return -EINVAL;
  2313. }
  2314. break;
  2315. case PACKET3_SET_CTL_CONST:
  2316. start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
  2317. end_reg = 4 * pkt->count + start_reg - 4;
  2318. if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
  2319. (start_reg >= PACKET3_SET_CTL_CONST_END) ||
  2320. (end_reg >= PACKET3_SET_CTL_CONST_END)) {
  2321. DRM_ERROR("bad SET_CTL_CONST\n");
  2322. return -EINVAL;
  2323. }
  2324. break;
  2325. case PACKET3_SET_SAMPLER:
  2326. if (pkt->count % 3) {
  2327. DRM_ERROR("bad SET_SAMPLER\n");
  2328. return -EINVAL;
  2329. }
  2330. start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
  2331. end_reg = 4 * pkt->count + start_reg - 4;
  2332. if ((start_reg < PACKET3_SET_SAMPLER_START) ||
  2333. (start_reg >= PACKET3_SET_SAMPLER_END) ||
  2334. (end_reg >= PACKET3_SET_SAMPLER_END)) {
  2335. DRM_ERROR("bad SET_SAMPLER\n");
  2336. return -EINVAL;
  2337. }
  2338. break;
  2339. case PACKET3_STRMOUT_BUFFER_UPDATE:
  2340. if (pkt->count != 4) {
  2341. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
  2342. return -EINVAL;
  2343. }
  2344. /* Updating memory at DST_ADDRESS. */
  2345. if (idx_value & 0x1) {
  2346. u64 offset;
  2347. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2348. if (r) {
  2349. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
  2350. return -EINVAL;
  2351. }
  2352. offset = radeon_get_ib_value(p, idx+1);
  2353. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2354. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2355. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
  2356. offset + 4, radeon_bo_size(reloc->robj));
  2357. return -EINVAL;
  2358. }
  2359. offset += reloc->gpu_offset;
  2360. ib[idx+1] = offset;
  2361. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2362. }
  2363. /* Reading data from SRC_ADDRESS. */
  2364. if (((idx_value >> 1) & 0x3) == 2) {
  2365. u64 offset;
  2366. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2367. if (r) {
  2368. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
  2369. return -EINVAL;
  2370. }
  2371. offset = radeon_get_ib_value(p, idx+3);
  2372. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2373. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2374. DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
  2375. offset + 4, radeon_bo_size(reloc->robj));
  2376. return -EINVAL;
  2377. }
  2378. offset += reloc->gpu_offset;
  2379. ib[idx+3] = offset;
  2380. ib[idx+4] = upper_32_bits(offset) & 0xff;
  2381. }
  2382. break;
  2383. case PACKET3_MEM_WRITE:
  2384. {
  2385. u64 offset;
  2386. if (pkt->count != 3) {
  2387. DRM_ERROR("bad MEM_WRITE (invalid count)\n");
  2388. return -EINVAL;
  2389. }
  2390. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2391. if (r) {
  2392. DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
  2393. return -EINVAL;
  2394. }
  2395. offset = radeon_get_ib_value(p, idx+0);
  2396. offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
  2397. if (offset & 0x7) {
  2398. DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
  2399. return -EINVAL;
  2400. }
  2401. if ((offset + 8) > radeon_bo_size(reloc->robj)) {
  2402. DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
  2403. offset + 8, radeon_bo_size(reloc->robj));
  2404. return -EINVAL;
  2405. }
  2406. offset += reloc->gpu_offset;
  2407. ib[idx+0] = offset;
  2408. ib[idx+1] = upper_32_bits(offset) & 0xff;
  2409. break;
  2410. }
  2411. case PACKET3_COPY_DW:
  2412. if (pkt->count != 4) {
  2413. DRM_ERROR("bad COPY_DW (invalid count)\n");
  2414. return -EINVAL;
  2415. }
  2416. if (idx_value & 0x1) {
  2417. u64 offset;
  2418. /* SRC is memory. */
  2419. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2420. if (r) {
  2421. DRM_ERROR("bad COPY_DW (missing src reloc)\n");
  2422. return -EINVAL;
  2423. }
  2424. offset = radeon_get_ib_value(p, idx+1);
  2425. offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2426. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2427. DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
  2428. offset + 4, radeon_bo_size(reloc->robj));
  2429. return -EINVAL;
  2430. }
  2431. offset += reloc->gpu_offset;
  2432. ib[idx+1] = offset;
  2433. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2434. } else {
  2435. /* SRC is a reg. */
  2436. reg = radeon_get_ib_value(p, idx+1) << 2;
  2437. if (!evergreen_is_safe_reg(p, reg)) {
  2438. dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
  2439. reg, idx + 1);
  2440. return -EINVAL;
  2441. }
  2442. }
  2443. if (idx_value & 0x2) {
  2444. u64 offset;
  2445. /* DST is memory. */
  2446. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2447. if (r) {
  2448. DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
  2449. return -EINVAL;
  2450. }
  2451. offset = radeon_get_ib_value(p, idx+3);
  2452. offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2453. if ((offset + 4) > radeon_bo_size(reloc->robj)) {
  2454. DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
  2455. offset + 4, radeon_bo_size(reloc->robj));
  2456. return -EINVAL;
  2457. }
  2458. offset += reloc->gpu_offset;
  2459. ib[idx+3] = offset;
  2460. ib[idx+4] = upper_32_bits(offset) & 0xff;
  2461. } else {
  2462. /* DST is a reg. */
  2463. reg = radeon_get_ib_value(p, idx+3) << 2;
  2464. if (!evergreen_is_safe_reg(p, reg)) {
  2465. dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
  2466. reg, idx + 3);
  2467. return -EINVAL;
  2468. }
  2469. }
  2470. break;
  2471. case PACKET3_SET_APPEND_CNT:
  2472. {
  2473. uint32_t areg;
  2474. uint32_t allowed_reg_base;
  2475. uint32_t source_sel;
  2476. if (pkt->count != 2) {
  2477. DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
  2478. return -EINVAL;
  2479. }
  2480. allowed_reg_base = GDS_APPEND_COUNT_0;
  2481. allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
  2482. allowed_reg_base >>= 2;
  2483. areg = idx_value >> 16;
  2484. if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
  2485. dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
  2486. areg, idx);
  2487. return -EINVAL;
  2488. }
  2489. source_sel = G_PACKET3_SET_APPEND_CNT_SRC_SELECT(idx_value);
  2490. if (source_sel == PACKET3_SAC_SRC_SEL_MEM) {
  2491. uint64_t offset;
  2492. uint32_t swap;
  2493. r = radeon_cs_packet_next_reloc(p, &reloc, 0);
  2494. if (r) {
  2495. DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
  2496. return -EINVAL;
  2497. }
  2498. offset = radeon_get_ib_value(p, idx + 1);
  2499. swap = offset & 0x3;
  2500. offset &= ~0x3;
  2501. offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
  2502. offset += reloc->gpu_offset;
  2503. ib[idx+1] = (offset & 0xfffffffc) | swap;
  2504. ib[idx+2] = upper_32_bits(offset) & 0xff;
  2505. } else {
  2506. DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
  2507. return -EINVAL;
  2508. }
  2509. break;
  2510. }
  2511. case PACKET3_NOP:
  2512. break;
  2513. default:
  2514. DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
  2515. return -EINVAL;
  2516. }
  2517. return 0;
  2518. }
  2519. int evergreen_cs_parse(struct radeon_cs_parser *p)
  2520. {
  2521. struct radeon_cs_packet pkt;
  2522. struct evergreen_cs_track *track;
  2523. u32 tmp;
  2524. int r;
  2525. if (p->track == NULL) {
  2526. /* initialize tracker, we are in kms */
  2527. track = kzalloc(sizeof(*track), GFP_KERNEL);
  2528. if (track == NULL)
  2529. return -ENOMEM;
  2530. evergreen_cs_track_init(track);
  2531. if (p->rdev->family >= CHIP_CAYMAN) {
  2532. tmp = p->rdev->config.cayman.tile_config;
  2533. track->reg_safe_bm = cayman_reg_safe_bm;
  2534. } else {
  2535. tmp = p->rdev->config.evergreen.tile_config;
  2536. track->reg_safe_bm = evergreen_reg_safe_bm;
  2537. }
  2538. BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
  2539. BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
  2540. switch (tmp & 0xf) {
  2541. case 0:
  2542. track->npipes = 1;
  2543. break;
  2544. case 1:
  2545. default:
  2546. track->npipes = 2;
  2547. break;
  2548. case 2:
  2549. track->npipes = 4;
  2550. break;
  2551. case 3:
  2552. track->npipes = 8;
  2553. break;
  2554. }
  2555. switch ((tmp & 0xf0) >> 4) {
  2556. case 0:
  2557. track->nbanks = 4;
  2558. break;
  2559. case 1:
  2560. default:
  2561. track->nbanks = 8;
  2562. break;
  2563. case 2:
  2564. track->nbanks = 16;
  2565. break;
  2566. }
  2567. switch ((tmp & 0xf00) >> 8) {
  2568. case 0:
  2569. track->group_size = 256;
  2570. break;
  2571. case 1:
  2572. default:
  2573. track->group_size = 512;
  2574. break;
  2575. }
  2576. switch ((tmp & 0xf000) >> 12) {
  2577. case 0:
  2578. track->row_size = 1;
  2579. break;
  2580. case 1:
  2581. default:
  2582. track->row_size = 2;
  2583. break;
  2584. case 2:
  2585. track->row_size = 4;
  2586. break;
  2587. }
  2588. p->track = track;
  2589. }
  2590. do {
  2591. r = radeon_cs_packet_parse(p, &pkt, p->idx);
  2592. if (r) {
  2593. kfree(p->track);
  2594. p->track = NULL;
  2595. return r;
  2596. }
  2597. p->idx += pkt.count + 2;
  2598. switch (pkt.type) {
  2599. case RADEON_PACKET_TYPE0:
  2600. r = evergreen_cs_parse_packet0(p, &pkt);
  2601. break;
  2602. case RADEON_PACKET_TYPE2:
  2603. break;
  2604. case RADEON_PACKET_TYPE3:
  2605. r = evergreen_packet3_check(p, &pkt);
  2606. break;
  2607. default:
  2608. DRM_ERROR("Unknown packet type %d !\n", pkt.type);
  2609. kfree(p->track);
  2610. p->track = NULL;
  2611. return -EINVAL;
  2612. }
  2613. if (r) {
  2614. kfree(p->track);
  2615. p->track = NULL;
  2616. return r;
  2617. }
  2618. } while (p->idx < p->chunk_ib->length_dw);
  2619. #if 0
  2620. for (r = 0; r < p->ib.length_dw; r++) {
  2621. pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]);
  2622. mdelay(1);
  2623. }
  2624. #endif
  2625. kfree(p->track);
  2626. p->track = NULL;
  2627. return 0;
  2628. }
  2629. /**
  2630. * evergreen_dma_cs_parse() - parse the DMA IB
  2631. * @p: parser structure holding parsing context.
  2632. *
  2633. * Parses the DMA IB from the CS ioctl and updates
  2634. * the GPU addresses based on the reloc information and
  2635. * checks for errors. (Evergreen-Cayman)
  2636. * Returns 0 for success and an error on failure.
  2637. **/
  2638. int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
  2639. {
  2640. struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
  2641. struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
  2642. u32 header, cmd, count, sub_cmd;
  2643. uint32_t *ib = p->ib.ptr;
  2644. u32 idx;
  2645. u64 src_offset, dst_offset, dst2_offset;
  2646. int r;
  2647. do {
  2648. if (p->idx >= ib_chunk->length_dw) {
  2649. DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
  2650. p->idx, ib_chunk->length_dw);
  2651. return -EINVAL;
  2652. }
  2653. idx = p->idx;
  2654. header = radeon_get_ib_value(p, idx);
  2655. cmd = GET_DMA_CMD(header);
  2656. count = GET_DMA_COUNT(header);
  2657. sub_cmd = GET_DMA_SUB_CMD(header);
  2658. switch (cmd) {
  2659. case DMA_PACKET_WRITE:
  2660. r = r600_dma_cs_next_reloc(p, &dst_reloc);
  2661. if (r) {
  2662. DRM_ERROR("bad DMA_PACKET_WRITE\n");
  2663. return -EINVAL;
  2664. }
  2665. switch (sub_cmd) {
  2666. /* tiled */
  2667. case 8:
  2668. dst_offset = radeon_get_ib_value(p, idx+1);
  2669. dst_offset <<= 8;
  2670. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2671. p->idx += count + 7;
  2672. break;
  2673. /* linear */
  2674. case 0:
  2675. dst_offset = radeon_get_ib_value(p, idx+1);
  2676. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
  2677. ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2678. ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2679. p->idx += count + 3;
  2680. break;
  2681. default:
  2682. DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
  2683. return -EINVAL;
  2684. }
  2685. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2686. dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
  2687. dst_offset, radeon_bo_size(dst_reloc->robj));
  2688. return -EINVAL;
  2689. }
  2690. break;
  2691. case DMA_PACKET_COPY:
  2692. r = r600_dma_cs_next_reloc(p, &src_reloc);
  2693. if (r) {
  2694. DRM_ERROR("bad DMA_PACKET_COPY\n");
  2695. return -EINVAL;
  2696. }
  2697. r = r600_dma_cs_next_reloc(p, &dst_reloc);
  2698. if (r) {
  2699. DRM_ERROR("bad DMA_PACKET_COPY\n");
  2700. return -EINVAL;
  2701. }
  2702. switch (sub_cmd) {
  2703. /* Copy L2L, DW aligned */
  2704. case 0x00:
  2705. /* L2L, dw */
  2706. src_offset = radeon_get_ib_value(p, idx+2);
  2707. src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2708. dst_offset = radeon_get_ib_value(p, idx+1);
  2709. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
  2710. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2711. dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
  2712. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2713. return -EINVAL;
  2714. }
  2715. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2716. dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
  2717. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2718. return -EINVAL;
  2719. }
  2720. ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2721. ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2722. ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2723. ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2724. p->idx += 5;
  2725. break;
  2726. /* Copy L2T/T2L */
  2727. case 0x08:
  2728. /* detile bit */
  2729. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2730. /* tiled src, linear dst */
  2731. src_offset = radeon_get_ib_value(p, idx+1);
  2732. src_offset <<= 8;
  2733. ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
  2734. dst_offset = radeon_get_ib_value(p, idx + 7);
  2735. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
  2736. ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2737. ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2738. } else {
  2739. /* linear src, tiled dst */
  2740. src_offset = radeon_get_ib_value(p, idx+7);
  2741. src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
  2742. ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2743. ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2744. dst_offset = radeon_get_ib_value(p, idx+1);
  2745. dst_offset <<= 8;
  2746. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2747. }
  2748. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2749. dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
  2750. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2751. return -EINVAL;
  2752. }
  2753. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2754. dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
  2755. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2756. return -EINVAL;
  2757. }
  2758. p->idx += 9;
  2759. break;
  2760. /* Copy L2L, byte aligned */
  2761. case 0x40:
  2762. /* L2L, byte */
  2763. src_offset = radeon_get_ib_value(p, idx+2);
  2764. src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2765. dst_offset = radeon_get_ib_value(p, idx+1);
  2766. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
  2767. if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
  2768. dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
  2769. src_offset + count, radeon_bo_size(src_reloc->robj));
  2770. return -EINVAL;
  2771. }
  2772. if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
  2773. dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
  2774. dst_offset + count, radeon_bo_size(dst_reloc->robj));
  2775. return -EINVAL;
  2776. }
  2777. ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
  2778. ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
  2779. ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2780. ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2781. p->idx += 5;
  2782. break;
  2783. /* Copy L2L, partial */
  2784. case 0x41:
  2785. /* L2L, partial */
  2786. if (p->family < CHIP_CAYMAN) {
  2787. DRM_ERROR("L2L Partial is cayman only !\n");
  2788. return -EINVAL;
  2789. }
  2790. ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
  2791. ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2792. ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
  2793. ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2794. p->idx += 9;
  2795. break;
  2796. /* Copy L2L, DW aligned, broadcast */
  2797. case 0x44:
  2798. /* L2L, dw, broadcast */
  2799. r = r600_dma_cs_next_reloc(p, &dst2_reloc);
  2800. if (r) {
  2801. DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
  2802. return -EINVAL;
  2803. }
  2804. dst_offset = radeon_get_ib_value(p, idx+1);
  2805. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
  2806. dst2_offset = radeon_get_ib_value(p, idx+2);
  2807. dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
  2808. src_offset = radeon_get_ib_value(p, idx+3);
  2809. src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
  2810. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2811. dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
  2812. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2813. return -EINVAL;
  2814. }
  2815. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2816. dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
  2817. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2818. return -EINVAL;
  2819. }
  2820. if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
  2821. dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
  2822. dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
  2823. return -EINVAL;
  2824. }
  2825. ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2826. ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
  2827. ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2828. ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2829. ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
  2830. ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2831. p->idx += 7;
  2832. break;
  2833. /* Copy L2T Frame to Field */
  2834. case 0x48:
  2835. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2836. DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
  2837. return -EINVAL;
  2838. }
  2839. r = r600_dma_cs_next_reloc(p, &dst2_reloc);
  2840. if (r) {
  2841. DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
  2842. return -EINVAL;
  2843. }
  2844. dst_offset = radeon_get_ib_value(p, idx+1);
  2845. dst_offset <<= 8;
  2846. dst2_offset = radeon_get_ib_value(p, idx+2);
  2847. dst2_offset <<= 8;
  2848. src_offset = radeon_get_ib_value(p, idx+8);
  2849. src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
  2850. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2851. dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
  2852. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2853. return -EINVAL;
  2854. }
  2855. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2856. dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
  2857. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2858. return -EINVAL;
  2859. }
  2860. if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
  2861. dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
  2862. dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
  2863. return -EINVAL;
  2864. }
  2865. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2866. ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
  2867. ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2868. ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2869. p->idx += 10;
  2870. break;
  2871. /* Copy L2T/T2L, partial */
  2872. case 0x49:
  2873. /* L2T, T2L partial */
  2874. if (p->family < CHIP_CAYMAN) {
  2875. DRM_ERROR("L2T, T2L Partial is cayman only !\n");
  2876. return -EINVAL;
  2877. }
  2878. /* detile bit */
  2879. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2880. /* tiled src, linear dst */
  2881. ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
  2882. ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2883. ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2884. } else {
  2885. /* linear src, tiled dst */
  2886. ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2887. ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2888. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2889. }
  2890. p->idx += 12;
  2891. break;
  2892. /* Copy L2T broadcast */
  2893. case 0x4b:
  2894. /* L2T, broadcast */
  2895. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2896. DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
  2897. return -EINVAL;
  2898. }
  2899. r = r600_dma_cs_next_reloc(p, &dst2_reloc);
  2900. if (r) {
  2901. DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
  2902. return -EINVAL;
  2903. }
  2904. dst_offset = radeon_get_ib_value(p, idx+1);
  2905. dst_offset <<= 8;
  2906. dst2_offset = radeon_get_ib_value(p, idx+2);
  2907. dst2_offset <<= 8;
  2908. src_offset = radeon_get_ib_value(p, idx+8);
  2909. src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
  2910. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2911. dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
  2912. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2913. return -EINVAL;
  2914. }
  2915. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2916. dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
  2917. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2918. return -EINVAL;
  2919. }
  2920. if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
  2921. dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
  2922. dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
  2923. return -EINVAL;
  2924. }
  2925. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2926. ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
  2927. ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2928. ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2929. p->idx += 10;
  2930. break;
  2931. /* Copy L2T/T2L (tile units) */
  2932. case 0x4c:
  2933. /* L2T, T2L */
  2934. /* detile bit */
  2935. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2936. /* tiled src, linear dst */
  2937. src_offset = radeon_get_ib_value(p, idx+1);
  2938. src_offset <<= 8;
  2939. ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
  2940. dst_offset = radeon_get_ib_value(p, idx+7);
  2941. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
  2942. ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  2943. ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
  2944. } else {
  2945. /* linear src, tiled dst */
  2946. src_offset = radeon_get_ib_value(p, idx+7);
  2947. src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
  2948. ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  2949. ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  2950. dst_offset = radeon_get_ib_value(p, idx+1);
  2951. dst_offset <<= 8;
  2952. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  2953. }
  2954. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2955. dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
  2956. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2957. return -EINVAL;
  2958. }
  2959. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  2960. dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
  2961. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  2962. return -EINVAL;
  2963. }
  2964. p->idx += 9;
  2965. break;
  2966. /* Copy T2T, partial (tile units) */
  2967. case 0x4d:
  2968. /* T2T partial */
  2969. if (p->family < CHIP_CAYMAN) {
  2970. DRM_ERROR("L2T, T2L Partial is cayman only !\n");
  2971. return -EINVAL;
  2972. }
  2973. ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
  2974. ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
  2975. p->idx += 13;
  2976. break;
  2977. /* Copy L2T broadcast (tile units) */
  2978. case 0x4f:
  2979. /* L2T, broadcast */
  2980. if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
  2981. DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
  2982. return -EINVAL;
  2983. }
  2984. r = r600_dma_cs_next_reloc(p, &dst2_reloc);
  2985. if (r) {
  2986. DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
  2987. return -EINVAL;
  2988. }
  2989. dst_offset = radeon_get_ib_value(p, idx+1);
  2990. dst_offset <<= 8;
  2991. dst2_offset = radeon_get_ib_value(p, idx+2);
  2992. dst2_offset <<= 8;
  2993. src_offset = radeon_get_ib_value(p, idx+8);
  2994. src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
  2995. if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
  2996. dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
  2997. src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
  2998. return -EINVAL;
  2999. }
  3000. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  3001. dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
  3002. dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
  3003. return -EINVAL;
  3004. }
  3005. if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
  3006. dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
  3007. dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
  3008. return -EINVAL;
  3009. }
  3010. ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
  3011. ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
  3012. ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
  3013. ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
  3014. p->idx += 10;
  3015. break;
  3016. default:
  3017. DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
  3018. return -EINVAL;
  3019. }
  3020. break;
  3021. case DMA_PACKET_CONSTANT_FILL:
  3022. r = r600_dma_cs_next_reloc(p, &dst_reloc);
  3023. if (r) {
  3024. DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
  3025. return -EINVAL;
  3026. }
  3027. dst_offset = radeon_get_ib_value(p, idx+1);
  3028. dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
  3029. if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
  3030. dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
  3031. dst_offset, radeon_bo_size(dst_reloc->robj));
  3032. return -EINVAL;
  3033. }
  3034. ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
  3035. ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
  3036. p->idx += 4;
  3037. break;
  3038. case DMA_PACKET_NOP:
  3039. p->idx += 1;
  3040. break;
  3041. default:
  3042. DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
  3043. return -EINVAL;
  3044. }
  3045. } while (p->idx < p->chunk_ib->length_dw);
  3046. #if 0
  3047. for (r = 0; r < p->ib->length_dw; r++) {
  3048. pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]);
  3049. mdelay(1);
  3050. }
  3051. #endif
  3052. return 0;
  3053. }
  3054. /* vm parser */
  3055. static bool evergreen_vm_reg_valid(u32 reg)
  3056. {
  3057. /* context regs are fine */
  3058. if (reg >= 0x28000)
  3059. return true;
  3060. /* check config regs */
  3061. switch (reg) {
  3062. case WAIT_UNTIL:
  3063. case GRBM_GFX_INDEX:
  3064. case CP_STRMOUT_CNTL:
  3065. case CP_COHER_CNTL:
  3066. case CP_COHER_SIZE:
  3067. case VGT_VTX_VECT_EJECT_REG:
  3068. case VGT_CACHE_INVALIDATION:
  3069. case VGT_GS_VERTEX_REUSE:
  3070. case VGT_PRIMITIVE_TYPE:
  3071. case VGT_INDEX_TYPE:
  3072. case VGT_NUM_INDICES:
  3073. case VGT_NUM_INSTANCES:
  3074. case VGT_COMPUTE_DIM_X:
  3075. case VGT_COMPUTE_DIM_Y:
  3076. case VGT_COMPUTE_DIM_Z:
  3077. case VGT_COMPUTE_START_X:
  3078. case VGT_COMPUTE_START_Y:
  3079. case VGT_COMPUTE_START_Z:
  3080. case VGT_COMPUTE_INDEX:
  3081. case VGT_COMPUTE_THREAD_GROUP_SIZE:
  3082. case VGT_HS_OFFCHIP_PARAM:
  3083. case PA_CL_ENHANCE:
  3084. case PA_SU_LINE_STIPPLE_VALUE:
  3085. case PA_SC_LINE_STIPPLE_STATE:
  3086. case PA_SC_ENHANCE:
  3087. case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
  3088. case SQ_DYN_GPR_SIMD_LOCK_EN:
  3089. case SQ_CONFIG:
  3090. case SQ_GPR_RESOURCE_MGMT_1:
  3091. case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
  3092. case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
  3093. case SQ_CONST_MEM_BASE:
  3094. case SQ_STATIC_THREAD_MGMT_1:
  3095. case SQ_STATIC_THREAD_MGMT_2:
  3096. case SQ_STATIC_THREAD_MGMT_3:
  3097. case SPI_CONFIG_CNTL:
  3098. case SPI_CONFIG_CNTL_1:
  3099. case TA_CNTL_AUX:
  3100. case DB_DEBUG:
  3101. case DB_DEBUG2:
  3102. case DB_DEBUG3:
  3103. case DB_DEBUG4:
  3104. case DB_WATERMARKS:
  3105. case TD_PS_BORDER_COLOR_INDEX:
  3106. case TD_PS_BORDER_COLOR_RED:
  3107. case TD_PS_BORDER_COLOR_GREEN:
  3108. case TD_PS_BORDER_COLOR_BLUE:
  3109. case TD_PS_BORDER_COLOR_ALPHA:
  3110. case TD_VS_BORDER_COLOR_INDEX:
  3111. case TD_VS_BORDER_COLOR_RED:
  3112. case TD_VS_BORDER_COLOR_GREEN:
  3113. case TD_VS_BORDER_COLOR_BLUE:
  3114. case TD_VS_BORDER_COLOR_ALPHA:
  3115. case TD_GS_BORDER_COLOR_INDEX:
  3116. case TD_GS_BORDER_COLOR_RED:
  3117. case TD_GS_BORDER_COLOR_GREEN:
  3118. case TD_GS_BORDER_COLOR_BLUE:
  3119. case TD_GS_BORDER_COLOR_ALPHA:
  3120. case TD_HS_BORDER_COLOR_INDEX:
  3121. case TD_HS_BORDER_COLOR_RED:
  3122. case TD_HS_BORDER_COLOR_GREEN:
  3123. case TD_HS_BORDER_COLOR_BLUE:
  3124. case TD_HS_BORDER_COLOR_ALPHA:
  3125. case TD_LS_BORDER_COLOR_INDEX:
  3126. case TD_LS_BORDER_COLOR_RED:
  3127. case TD_LS_BORDER_COLOR_GREEN:
  3128. case TD_LS_BORDER_COLOR_BLUE:
  3129. case TD_LS_BORDER_COLOR_ALPHA:
  3130. case TD_CS_BORDER_COLOR_INDEX:
  3131. case TD_CS_BORDER_COLOR_RED:
  3132. case TD_CS_BORDER_COLOR_GREEN:
  3133. case TD_CS_BORDER_COLOR_BLUE:
  3134. case TD_CS_BORDER_COLOR_ALPHA:
  3135. case SQ_ESGS_RING_SIZE:
  3136. case SQ_GSVS_RING_SIZE:
  3137. case SQ_ESTMP_RING_SIZE:
  3138. case SQ_GSTMP_RING_SIZE:
  3139. case SQ_HSTMP_RING_SIZE:
  3140. case SQ_LSTMP_RING_SIZE:
  3141. case SQ_PSTMP_RING_SIZE:
  3142. case SQ_VSTMP_RING_SIZE:
  3143. case SQ_ESGS_RING_ITEMSIZE:
  3144. case SQ_ESTMP_RING_ITEMSIZE:
  3145. case SQ_GSTMP_RING_ITEMSIZE:
  3146. case SQ_GSVS_RING_ITEMSIZE:
  3147. case SQ_GS_VERT_ITEMSIZE:
  3148. case SQ_GS_VERT_ITEMSIZE_1:
  3149. case SQ_GS_VERT_ITEMSIZE_2:
  3150. case SQ_GS_VERT_ITEMSIZE_3:
  3151. case SQ_GSVS_RING_OFFSET_1:
  3152. case SQ_GSVS_RING_OFFSET_2:
  3153. case SQ_GSVS_RING_OFFSET_3:
  3154. case SQ_HSTMP_RING_ITEMSIZE:
  3155. case SQ_LSTMP_RING_ITEMSIZE:
  3156. case SQ_PSTMP_RING_ITEMSIZE:
  3157. case SQ_VSTMP_RING_ITEMSIZE:
  3158. case VGT_TF_RING_SIZE:
  3159. case SQ_ESGS_RING_BASE:
  3160. case SQ_GSVS_RING_BASE:
  3161. case SQ_ESTMP_RING_BASE:
  3162. case SQ_GSTMP_RING_BASE:
  3163. case SQ_HSTMP_RING_BASE:
  3164. case SQ_LSTMP_RING_BASE:
  3165. case SQ_PSTMP_RING_BASE:
  3166. case SQ_VSTMP_RING_BASE:
  3167. case CAYMAN_VGT_OFFCHIP_LDS_BASE:
  3168. case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
  3169. return true;
  3170. default:
  3171. DRM_ERROR("Invalid register 0x%x in CS\n", reg);
  3172. return false;
  3173. }
  3174. }
  3175. static int evergreen_vm_packet3_check(struct radeon_device *rdev,
  3176. u32 *ib, struct radeon_cs_packet *pkt)
  3177. {
  3178. u32 idx = pkt->idx + 1;
  3179. u32 idx_value = ib[idx];
  3180. u32 start_reg, end_reg, reg, i;
  3181. u32 command, info;
  3182. switch (pkt->opcode) {
  3183. case PACKET3_NOP:
  3184. break;
  3185. case PACKET3_SET_BASE:
  3186. if (idx_value != 1) {
  3187. DRM_ERROR("bad SET_BASE");
  3188. return -EINVAL;
  3189. }
  3190. break;
  3191. case PACKET3_CLEAR_STATE:
  3192. case PACKET3_INDEX_BUFFER_SIZE:
  3193. case PACKET3_DISPATCH_DIRECT:
  3194. case PACKET3_DISPATCH_INDIRECT:
  3195. case PACKET3_MODE_CONTROL:
  3196. case PACKET3_SET_PREDICATION:
  3197. case PACKET3_COND_EXEC:
  3198. case PACKET3_PRED_EXEC:
  3199. case PACKET3_DRAW_INDIRECT:
  3200. case PACKET3_DRAW_INDEX_INDIRECT:
  3201. case PACKET3_INDEX_BASE:
  3202. case PACKET3_DRAW_INDEX_2:
  3203. case PACKET3_CONTEXT_CONTROL:
  3204. case PACKET3_DRAW_INDEX_OFFSET:
  3205. case PACKET3_INDEX_TYPE:
  3206. case PACKET3_DRAW_INDEX:
  3207. case PACKET3_DRAW_INDEX_AUTO:
  3208. case PACKET3_DRAW_INDEX_IMMD:
  3209. case PACKET3_NUM_INSTANCES:
  3210. case PACKET3_DRAW_INDEX_MULTI_AUTO:
  3211. case PACKET3_STRMOUT_BUFFER_UPDATE:
  3212. case PACKET3_DRAW_INDEX_OFFSET_2:
  3213. case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
  3214. case PACKET3_MPEG_INDEX:
  3215. case PACKET3_WAIT_REG_MEM:
  3216. case PACKET3_MEM_WRITE:
  3217. case PACKET3_PFP_SYNC_ME:
  3218. case PACKET3_SURFACE_SYNC:
  3219. case PACKET3_EVENT_WRITE:
  3220. case PACKET3_EVENT_WRITE_EOP:
  3221. case PACKET3_EVENT_WRITE_EOS:
  3222. case PACKET3_SET_CONTEXT_REG:
  3223. case PACKET3_SET_BOOL_CONST:
  3224. case PACKET3_SET_LOOP_CONST:
  3225. case PACKET3_SET_RESOURCE:
  3226. case PACKET3_SET_SAMPLER:
  3227. case PACKET3_SET_CTL_CONST:
  3228. case PACKET3_SET_RESOURCE_OFFSET:
  3229. case PACKET3_SET_CONTEXT_REG_INDIRECT:
  3230. case PACKET3_SET_RESOURCE_INDIRECT:
  3231. case CAYMAN_PACKET3_DEALLOC_STATE:
  3232. break;
  3233. case PACKET3_COND_WRITE:
  3234. if (idx_value & 0x100) {
  3235. reg = ib[idx + 5] * 4;
  3236. if (!evergreen_vm_reg_valid(reg))
  3237. return -EINVAL;
  3238. }
  3239. break;
  3240. case PACKET3_COPY_DW:
  3241. if (idx_value & 0x2) {
  3242. reg = ib[idx + 3] * 4;
  3243. if (!evergreen_vm_reg_valid(reg))
  3244. return -EINVAL;
  3245. }
  3246. break;
  3247. case PACKET3_SET_CONFIG_REG:
  3248. start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
  3249. end_reg = 4 * pkt->count + start_reg - 4;
  3250. if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
  3251. (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
  3252. (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
  3253. DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
  3254. return -EINVAL;
  3255. }
  3256. for (i = 0; i < pkt->count; i++) {
  3257. reg = start_reg + (4 * i);
  3258. if (!evergreen_vm_reg_valid(reg))
  3259. return -EINVAL;
  3260. }
  3261. break;
  3262. case PACKET3_CP_DMA:
  3263. command = ib[idx + 4];
  3264. info = ib[idx + 1];
  3265. if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
  3266. (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
  3267. ((((info & 0x00300000) >> 20) == 0) &&
  3268. (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
  3269. ((((info & 0x60000000) >> 29) == 0) &&
  3270. (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
  3271. /* non mem to mem copies requires dw aligned count */
  3272. if ((command & 0x1fffff) % 4) {
  3273. DRM_ERROR("CP DMA command requires dw count alignment\n");
  3274. return -EINVAL;
  3275. }
  3276. }
  3277. if (command & PACKET3_CP_DMA_CMD_SAS) {
  3278. /* src address space is register */
  3279. if (((info & 0x60000000) >> 29) == 0) {
  3280. start_reg = idx_value << 2;
  3281. if (command & PACKET3_CP_DMA_CMD_SAIC) {
  3282. reg = start_reg;
  3283. if (!evergreen_vm_reg_valid(reg)) {
  3284. DRM_ERROR("CP DMA Bad SRC register\n");
  3285. return -EINVAL;
  3286. }
  3287. } else {
  3288. for (i = 0; i < (command & 0x1fffff); i++) {
  3289. reg = start_reg + (4 * i);
  3290. if (!evergreen_vm_reg_valid(reg)) {
  3291. DRM_ERROR("CP DMA Bad SRC register\n");
  3292. return -EINVAL;
  3293. }
  3294. }
  3295. }
  3296. }
  3297. }
  3298. if (command & PACKET3_CP_DMA_CMD_DAS) {
  3299. /* dst address space is register */
  3300. if (((info & 0x00300000) >> 20) == 0) {
  3301. start_reg = ib[idx + 2];
  3302. if (command & PACKET3_CP_DMA_CMD_DAIC) {
  3303. reg = start_reg;
  3304. if (!evergreen_vm_reg_valid(reg)) {
  3305. DRM_ERROR("CP DMA Bad DST register\n");
  3306. return -EINVAL;
  3307. }
  3308. } else {
  3309. for (i = 0; i < (command & 0x1fffff); i++) {
  3310. reg = start_reg + (4 * i);
  3311. if (!evergreen_vm_reg_valid(reg)) {
  3312. DRM_ERROR("CP DMA Bad DST register\n");
  3313. return -EINVAL;
  3314. }
  3315. }
  3316. }
  3317. }
  3318. }
  3319. break;
  3320. case PACKET3_SET_APPEND_CNT: {
  3321. uint32_t areg;
  3322. uint32_t allowed_reg_base;
  3323. if (pkt->count != 2) {
  3324. DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
  3325. return -EINVAL;
  3326. }
  3327. allowed_reg_base = GDS_APPEND_COUNT_0;
  3328. allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
  3329. allowed_reg_base >>= 2;
  3330. areg = idx_value >> 16;
  3331. if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
  3332. DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
  3333. areg, idx);
  3334. return -EINVAL;
  3335. }
  3336. break;
  3337. }
  3338. default:
  3339. return -EINVAL;
  3340. }
  3341. return 0;
  3342. }
  3343. int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  3344. {
  3345. int ret = 0;
  3346. u32 idx = 0;
  3347. struct radeon_cs_packet pkt;
  3348. do {
  3349. pkt.idx = idx;
  3350. pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
  3351. pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
  3352. pkt.one_reg_wr = 0;
  3353. switch (pkt.type) {
  3354. case RADEON_PACKET_TYPE0:
  3355. dev_err(rdev->dev, "Packet0 not allowed!\n");
  3356. ret = -EINVAL;
  3357. break;
  3358. case RADEON_PACKET_TYPE2:
  3359. idx += 1;
  3360. break;
  3361. case RADEON_PACKET_TYPE3:
  3362. pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
  3363. ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
  3364. idx += pkt.count + 2;
  3365. break;
  3366. default:
  3367. dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
  3368. ret = -EINVAL;
  3369. break;
  3370. }
  3371. if (ret)
  3372. break;
  3373. } while (idx < ib->length_dw);
  3374. return ret;
  3375. }
  3376. /**
  3377. * evergreen_dma_ib_parse() - parse the DMA IB for VM
  3378. * @rdev: radeon_device pointer
  3379. * @ib: radeon_ib pointer
  3380. *
  3381. * Parses the DMA IB from the VM CS ioctl
  3382. * checks for errors. (Cayman-SI)
  3383. * Returns 0 for success and an error on failure.
  3384. **/
  3385. int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  3386. {
  3387. u32 idx = 0;
  3388. u32 header, cmd, count, sub_cmd;
  3389. do {
  3390. header = ib->ptr[idx];
  3391. cmd = GET_DMA_CMD(header);
  3392. count = GET_DMA_COUNT(header);
  3393. sub_cmd = GET_DMA_SUB_CMD(header);
  3394. switch (cmd) {
  3395. case DMA_PACKET_WRITE:
  3396. switch (sub_cmd) {
  3397. /* tiled */
  3398. case 8:
  3399. idx += count + 7;
  3400. break;
  3401. /* linear */
  3402. case 0:
  3403. idx += count + 3;
  3404. break;
  3405. default:
  3406. DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
  3407. return -EINVAL;
  3408. }
  3409. break;
  3410. case DMA_PACKET_COPY:
  3411. switch (sub_cmd) {
  3412. /* Copy L2L, DW aligned */
  3413. case 0x00:
  3414. idx += 5;
  3415. break;
  3416. /* Copy L2T/T2L */
  3417. case 0x08:
  3418. idx += 9;
  3419. break;
  3420. /* Copy L2L, byte aligned */
  3421. case 0x40:
  3422. idx += 5;
  3423. break;
  3424. /* Copy L2L, partial */
  3425. case 0x41:
  3426. idx += 9;
  3427. break;
  3428. /* Copy L2L, DW aligned, broadcast */
  3429. case 0x44:
  3430. idx += 7;
  3431. break;
  3432. /* Copy L2T Frame to Field */
  3433. case 0x48:
  3434. idx += 10;
  3435. break;
  3436. /* Copy L2T/T2L, partial */
  3437. case 0x49:
  3438. idx += 12;
  3439. break;
  3440. /* Copy L2T broadcast */
  3441. case 0x4b:
  3442. idx += 10;
  3443. break;
  3444. /* Copy L2T/T2L (tile units) */
  3445. case 0x4c:
  3446. idx += 9;
  3447. break;
  3448. /* Copy T2T, partial (tile units) */
  3449. case 0x4d:
  3450. idx += 13;
  3451. break;
  3452. /* Copy L2T broadcast (tile units) */
  3453. case 0x4f:
  3454. idx += 10;
  3455. break;
  3456. default:
  3457. DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
  3458. return -EINVAL;
  3459. }
  3460. break;
  3461. case DMA_PACKET_CONSTANT_FILL:
  3462. idx += 4;
  3463. break;
  3464. case DMA_PACKET_NOP:
  3465. idx += 1;
  3466. break;
  3467. default:
  3468. DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
  3469. return -EINVAL;
  3470. }
  3471. } while (idx < ib->length_dw);
  3472. return 0;
  3473. }