safexcel.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Marvell
  4. *
  5. * Antoine Tenart <antoine.tenart@free-electrons.com>
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/pci.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/workqueue.h>
  19. #include <crypto/internal/aead.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/internal/skcipher.h>
  22. #include "safexcel.h"
  23. static u32 max_rings = EIP197_MAX_RINGS;
  24. module_param(max_rings, uint, 0644);
  25. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  26. static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
  27. {
  28. int i;
  29. /*
  30. * Map all interfaces/rings to register index 0
  31. * so they can share contexts. Without this, the EIP197 will
  32. * assume each interface/ring to be in its own memory domain
  33. * i.e. have its own subset of UNIQUE memory addresses.
  34. * Which would cause records with the SAME memory address to
  35. * use DIFFERENT cache buffers, causing both poor cache utilization
  36. * AND serious coherence/invalidation issues.
  37. */
  38. for (i = 0; i < 4; i++)
  39. writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
  40. /*
  41. * Initialize other virtualization regs for cache
  42. * These may not be in their reset state ...
  43. */
  44. for (i = 0; i < priv->config.rings; i++) {
  45. writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
  46. writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
  47. writel(EIP197_FLUE_CONFIG_MAGIC,
  48. priv->base + EIP197_FLUE_CONFIG(i));
  49. }
  50. writel(0, priv->base + EIP197_FLUE_OFFSETS);
  51. writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
  52. }
  53. static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
  54. u32 addrmid, int *actbank)
  55. {
  56. u32 val;
  57. int curbank;
  58. curbank = addrmid >> 16;
  59. if (curbank != *actbank) {
  60. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  61. val = (val & ~EIP197_CS_BANKSEL_MASK) |
  62. (curbank << EIP197_CS_BANKSEL_OFS);
  63. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  64. *actbank = curbank;
  65. }
  66. }
  67. static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
  68. int maxbanks, u32 probemask, u32 stride)
  69. {
  70. u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
  71. int actbank;
  72. /*
  73. * And probe the actual size of the physically attached cache data RAM
  74. * Using a binary subdivision algorithm downto 32 byte cache lines.
  75. */
  76. addrhi = 1 << (16 + maxbanks);
  77. addrlo = 0;
  78. actbank = min(maxbanks - 1, 0);
  79. while ((addrhi - addrlo) > stride) {
  80. /* write marker to lowest address in top half */
  81. addrmid = (addrhi + addrlo) >> 1;
  82. marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
  83. eip197_trc_cache_banksel(priv, addrmid, &actbank);
  84. writel(marker,
  85. priv->base + EIP197_CLASSIFICATION_RAMS +
  86. (addrmid & 0xffff));
  87. /* write invalid markers to possible aliases */
  88. delta = 1 << __fls(addrmid);
  89. while (delta >= stride) {
  90. addralias = addrmid - delta;
  91. eip197_trc_cache_banksel(priv, addralias, &actbank);
  92. writel(~marker,
  93. priv->base + EIP197_CLASSIFICATION_RAMS +
  94. (addralias & 0xffff));
  95. delta >>= 1;
  96. }
  97. /* read back marker from top half */
  98. eip197_trc_cache_banksel(priv, addrmid, &actbank);
  99. val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
  100. (addrmid & 0xffff));
  101. if ((val & probemask) == marker)
  102. /* read back correct, continue with top half */
  103. addrlo = addrmid;
  104. else
  105. /* not read back correct, continue with bottom half */
  106. addrhi = addrmid;
  107. }
  108. return addrhi;
  109. }
  110. static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
  111. int cs_rc_max, int cs_ht_wc)
  112. {
  113. int i;
  114. u32 htable_offset, val, offset;
  115. /* Clear all records in administration RAM */
  116. for (i = 0; i < cs_rc_max; i++) {
  117. offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  118. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  119. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  120. priv->base + offset);
  121. val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
  122. if (i == 0)
  123. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  124. else if (i == cs_rc_max - 1)
  125. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  126. writel(val, priv->base + offset + 4);
  127. /* must also initialize the address key due to ECC! */
  128. writel(0, priv->base + offset + 8);
  129. writel(0, priv->base + offset + 12);
  130. }
  131. /* Clear the hash table entries */
  132. htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
  133. for (i = 0; i < cs_ht_wc; i++)
  134. writel(GENMASK(29, 0),
  135. priv->base + EIP197_CLASSIFICATION_RAMS +
  136. htable_offset + i * sizeof(u32));
  137. }
  138. static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  139. {
  140. u32 val, dsize, asize;
  141. int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
  142. int cs_rc_abs_max, cs_ht_sz;
  143. int maxbanks;
  144. /* Setup (dummy) virtualization for cache */
  145. eip197_trc_cache_setupvirt(priv);
  146. /*
  147. * Enable the record cache memory access and
  148. * probe the bank select width
  149. */
  150. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  151. val &= ~EIP197_TRC_ENABLE_MASK;
  152. val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
  153. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  154. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  155. maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
  156. /* Clear all ECC errors */
  157. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  158. /*
  159. * Make sure the cache memory is accessible by taking record cache into
  160. * reset. Need data memory access here, not admin access.
  161. */
  162. val = readl(priv->base + EIP197_TRC_PARAMS);
  163. val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
  164. writel(val, priv->base + EIP197_TRC_PARAMS);
  165. /* Probed data RAM size in bytes */
  166. dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
  167. /*
  168. * Now probe the administration RAM size pretty much the same way
  169. * Except that only the lower 30 bits are writable and we don't need
  170. * bank selects
  171. */
  172. val = readl(priv->base + EIP197_TRC_PARAMS);
  173. /* admin access now */
  174. val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
  175. writel(val, priv->base + EIP197_TRC_PARAMS);
  176. /* Probed admin RAM size in admin words */
  177. asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
  178. /* Clear any ECC errors detected while probing! */
  179. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  180. /* Sanity check probing results */
  181. if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
  182. dev_err(priv->dev, "Record cache probing failed (%d,%d).",
  183. dsize, asize);
  184. return -ENODEV;
  185. }
  186. /*
  187. * Determine optimal configuration from RAM sizes
  188. * Note that we assume that the physical RAM configuration is sane
  189. * Therefore, we don't do any parameter error checking here ...
  190. */
  191. /* For now, just use a single record format covering everything */
  192. cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
  193. cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
  194. /*
  195. * Step #1: How many records will physically fit?
  196. * Hard upper limit is 1023!
  197. */
  198. cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
  199. /* Step #2: Need at least 2 words in the admin RAM per record */
  200. cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
  201. /* Step #3: Determine log2 of hash table size */
  202. cs_ht_sz = __fls(asize - cs_rc_max) - 2;
  203. /* Step #4: determine current size of hash table in dwords */
  204. cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
  205. /* Step #5: add back excess words and see if we can fit more records */
  206. cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
  207. /* Clear the cache RAMs */
  208. eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
  209. /* Disable the record cache memory access */
  210. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  211. val &= ~EIP197_TRC_ENABLE_MASK;
  212. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  213. /* Write head and tail pointers of the record free chain */
  214. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  215. EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
  216. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  217. /* Configure the record cache #1 */
  218. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
  219. EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
  220. writel(val, priv->base + EIP197_TRC_PARAMS2);
  221. /* Configure the record cache #2 */
  222. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
  223. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  224. EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
  225. writel(val, priv->base + EIP197_TRC_PARAMS);
  226. dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
  227. dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
  228. return 0;
  229. }
  230. static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
  231. {
  232. int pe, i;
  233. u32 val;
  234. for (pe = 0; pe < priv->config.pes; pe++) {
  235. /* Configure the token FIFO's */
  236. writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
  237. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
  238. /* Clear the ICE scratchpad memory */
  239. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  240. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  241. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  242. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  243. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  244. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  245. /* clear the scratchpad RAM using 32 bit writes only */
  246. for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
  247. writel(0, EIP197_PE(priv) +
  248. EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
  249. /* Reset the IFPP engine to make its program mem accessible */
  250. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  251. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  252. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  253. EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
  254. /* Reset the IPUE engine to make its program mem accessible */
  255. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  256. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  257. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  258. EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
  259. /* Enable access to all IFPP program memories */
  260. writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
  261. EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  262. /* bypass the OCE, if present */
  263. if (priv->flags & EIP197_OCE)
  264. writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
  265. EIP197_PE_DEBUG(pe));
  266. }
  267. }
  268. static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
  269. const struct firmware *fw)
  270. {
  271. u32 val;
  272. int i;
  273. /* Write the firmware */
  274. for (i = 0; i < fw->size / sizeof(u32); i++) {
  275. if (priv->data->fw_little_endian)
  276. val = le32_to_cpu(((const __le32 *)fw->data)[i]);
  277. else
  278. val = be32_to_cpu(((const __be32 *)fw->data)[i]);
  279. writel(val,
  280. priv->base + EIP197_CLASSIFICATION_RAMS +
  281. i * sizeof(val));
  282. }
  283. /* Exclude final 2 NOPs from size */
  284. return i - EIP197_FW_TERMINAL_NOPS;
  285. }
  286. /*
  287. * If FW is actual production firmware, then poll for its initialization
  288. * to complete and check if it is good for the HW, otherwise just return OK.
  289. */
  290. static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
  291. {
  292. int pe, pollcnt;
  293. u32 base, pollofs;
  294. if (fpp)
  295. pollofs = EIP197_FW_FPP_READY;
  296. else
  297. pollofs = EIP197_FW_PUE_READY;
  298. for (pe = 0; pe < priv->config.pes; pe++) {
  299. base = EIP197_PE_ICE_SCRATCH_RAM(pe);
  300. pollcnt = EIP197_FW_START_POLLCNT;
  301. while (pollcnt &&
  302. (readl_relaxed(EIP197_PE(priv) + base +
  303. pollofs) != 1)) {
  304. pollcnt--;
  305. }
  306. if (!pollcnt) {
  307. dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
  308. fpp, pe);
  309. return false;
  310. }
  311. }
  312. return true;
  313. }
  314. static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
  315. int ipuesz, int ifppsz, int minifw)
  316. {
  317. int pe;
  318. u32 val;
  319. for (pe = 0; pe < priv->config.pes; pe++) {
  320. /* Disable access to all program memory */
  321. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  322. /* Start IFPP microengines */
  323. if (minifw)
  324. val = 0;
  325. else
  326. val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
  327. EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
  328. EIP197_PE_ICE_UENG_DEBUG_RESET;
  329. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
  330. /* Start IPUE microengines */
  331. if (minifw)
  332. val = 0;
  333. else
  334. val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
  335. EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
  336. EIP197_PE_ICE_UENG_DEBUG_RESET;
  337. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
  338. }
  339. /* For miniFW startup, there is no initialization, so always succeed */
  340. if (minifw)
  341. return true;
  342. /* Wait until all the firmwares have properly started up */
  343. if (!poll_fw_ready(priv, 1))
  344. return false;
  345. if (!poll_fw_ready(priv, 0))
  346. return false;
  347. return true;
  348. }
  349. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  350. {
  351. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  352. const struct firmware *fw[FW_NB];
  353. char fw_path[37], *dir = NULL;
  354. int i, j, ret = 0, pe;
  355. int ipuesz, ifppsz, minifw = 0;
  356. if (priv->data->version == EIP197D_MRVL)
  357. dir = "eip197d";
  358. else if (priv->data->version == EIP197B_MRVL ||
  359. priv->data->version == EIP197_DEVBRD)
  360. dir = "eip197b";
  361. else if (priv->data->version == EIP197C_MXL)
  362. dir = "eip197c";
  363. else
  364. return -ENODEV;
  365. retry_fw:
  366. for (i = 0; i < FW_NB; i++) {
  367. snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
  368. ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
  369. if (ret) {
  370. if (minifw || priv->data->version != EIP197B_MRVL)
  371. goto release_fw;
  372. /* Fallback to the old firmware location for the
  373. * EIP197b.
  374. */
  375. ret = firmware_request_nowarn(&fw[i], fw_name[i],
  376. priv->dev);
  377. if (ret)
  378. goto release_fw;
  379. }
  380. }
  381. eip197_init_firmware(priv);
  382. ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
  383. /* Enable access to IPUE program memories */
  384. for (pe = 0; pe < priv->config.pes; pe++)
  385. writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
  386. EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  387. ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
  388. if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
  389. dev_dbg(priv->dev, "Firmware loaded successfully\n");
  390. return 0;
  391. }
  392. ret = -ENODEV;
  393. release_fw:
  394. for (j = 0; j < i; j++)
  395. release_firmware(fw[j]);
  396. if (!minifw) {
  397. /* Retry with minifw path */
  398. dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
  399. dir = "eip197_minifw";
  400. minifw = 1;
  401. goto retry_fw;
  402. }
  403. dev_err(priv->dev, "Firmware load failed.\n");
  404. return ret;
  405. }
  406. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  407. {
  408. u32 cd_size_rnd, val;
  409. int i, cd_fetch_cnt;
  410. cd_size_rnd = (priv->config.cd_size +
  411. (BIT(priv->hwconfig.hwdataw) - 1)) >>
  412. priv->hwconfig.hwdataw;
  413. /* determine number of CD's we can fetch into the CD FIFO as 1 block */
  414. if (priv->flags & SAFEXCEL_HW_EIP197) {
  415. /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
  416. cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
  417. cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
  418. (priv->config.pes * EIP197_FETCH_DEPTH));
  419. } else {
  420. /* for the EIP97, just fetch all that fits minus 1 */
  421. cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
  422. cd_size_rnd) - 1;
  423. }
  424. /*
  425. * Since we're using command desc's way larger than formally specified,
  426. * we need to check whether we can fit even 1 for low-end EIP196's!
  427. */
  428. if (!cd_fetch_cnt) {
  429. dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
  430. return -ENODEV;
  431. }
  432. for (i = 0; i < priv->config.rings; i++) {
  433. /* ring base address */
  434. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  435. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  436. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  437. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  438. writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
  439. (priv->config.cd_offset << 14) | priv->config.cd_size,
  440. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  441. writel(((cd_fetch_cnt *
  442. (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
  443. (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
  444. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  445. /* Configure DMA tx control */
  446. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  447. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  448. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  449. /* clear any pending interrupt */
  450. writel(GENMASK(5, 0),
  451. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  452. }
  453. return 0;
  454. }
  455. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  456. {
  457. u32 rd_size_rnd, val;
  458. int i, rd_fetch_cnt;
  459. /* determine number of RD's we can fetch into the FIFO as one block */
  460. rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
  461. (BIT(priv->hwconfig.hwdataw) - 1)) >>
  462. priv->hwconfig.hwdataw;
  463. if (priv->flags & SAFEXCEL_HW_EIP197) {
  464. /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
  465. rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
  466. rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
  467. (priv->config.pes * EIP197_FETCH_DEPTH));
  468. } else {
  469. /* for the EIP97, just fetch all that fits minus 1 */
  470. rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
  471. rd_size_rnd) - 1;
  472. }
  473. for (i = 0; i < priv->config.rings; i++) {
  474. /* ring base address */
  475. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  476. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  477. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  478. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  479. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
  480. priv->config.rd_size,
  481. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  482. writel(((rd_fetch_cnt *
  483. (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
  484. (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
  485. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  486. /* Configure DMA tx control */
  487. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  488. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  489. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  490. writel(val,
  491. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  492. /* clear any pending interrupt */
  493. writel(GENMASK(7, 0),
  494. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  495. /* enable ring interrupt */
  496. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  497. val |= EIP197_RDR_IRQ(i);
  498. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  499. }
  500. return 0;
  501. }
  502. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  503. {
  504. u32 val;
  505. int i, ret, pe, opbuflo, opbufhi;
  506. dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
  507. priv->config.pes, priv->config.rings);
  508. /*
  509. * For EIP197's only set maximum number of TX commands to 2^5 = 32
  510. * Skip for the EIP97 as it does not have this field.
  511. */
  512. if (priv->flags & SAFEXCEL_HW_EIP197) {
  513. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  514. val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
  515. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  516. }
  517. /* Configure wr/rd cache values */
  518. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  519. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  520. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  521. /* Interrupts reset */
  522. /* Disable all global interrupts */
  523. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  524. /* Clear any pending interrupt */
  525. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  526. /* Processing Engine configuration */
  527. for (pe = 0; pe < priv->config.pes; pe++) {
  528. /* Data Fetch Engine configuration */
  529. /* Reset all DFE threads */
  530. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  531. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  532. if (priv->flags & EIP197_PE_ARB)
  533. /* Reset HIA input interface arbiter (if present) */
  534. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  535. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  536. /* DMA transfer size to use */
  537. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  538. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
  539. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  540. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
  541. EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  542. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  543. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  544. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
  545. /* Leave the DFE threads reset state */
  546. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  547. /* Configure the processing engine thresholds */
  548. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  549. EIP197_PE_IN_xBUF_THRES_MAX(9),
  550. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
  551. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  552. EIP197_PE_IN_xBUF_THRES_MAX(7),
  553. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
  554. if (priv->flags & SAFEXCEL_HW_EIP197)
  555. /* enable HIA input interface arbiter and rings */
  556. writel(EIP197_HIA_RA_PE_CTRL_EN |
  557. GENMASK(priv->config.rings - 1, 0),
  558. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  559. /* Data Store Engine configuration */
  560. /* Reset all DSE threads */
  561. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  562. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  563. /* Wait for all DSE threads to complete */
  564. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
  565. GENMASK(15, 12)) != GENMASK(15, 12))
  566. ;
  567. /* DMA transfer size to use */
  568. if (priv->hwconfig.hwnumpes > 4) {
  569. opbuflo = 9;
  570. opbufhi = 10;
  571. } else {
  572. opbuflo = 7;
  573. opbufhi = 8;
  574. }
  575. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  576. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
  577. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
  578. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  579. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  580. /* FIXME: instability issues can occur for EIP97 but disabling
  581. * it impacts performance.
  582. */
  583. if (priv->flags & SAFEXCEL_HW_EIP197)
  584. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  585. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
  586. /* Leave the DSE threads reset state */
  587. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  588. /* Configure the processing engine thresholds */
  589. writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
  590. EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
  591. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
  592. /* Processing Engine configuration */
  593. /* Token & context configuration */
  594. val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
  595. EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
  596. EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
  597. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
  598. /* H/W capabilities selection: just enable everything */
  599. writel(EIP197_FUNCTION_ALL,
  600. EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
  601. writel(EIP197_FUNCTION_ALL,
  602. EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
  603. }
  604. /* Command Descriptor Rings prepare */
  605. for (i = 0; i < priv->config.rings; i++) {
  606. /* Clear interrupts for this ring */
  607. writel(GENMASK(31, 0),
  608. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  609. /* Disable external triggering */
  610. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  611. /* Clear the pending prepared counter */
  612. writel(EIP197_xDR_PREP_CLR_COUNT,
  613. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  614. /* Clear the pending processed counter */
  615. writel(EIP197_xDR_PROC_CLR_COUNT,
  616. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  617. writel(0,
  618. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  619. writel(0,
  620. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  621. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
  622. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  623. }
  624. /* Result Descriptor Ring prepare */
  625. for (i = 0; i < priv->config.rings; i++) {
  626. /* Disable external triggering*/
  627. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  628. /* Clear the pending prepared counter */
  629. writel(EIP197_xDR_PREP_CLR_COUNT,
  630. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  631. /* Clear the pending processed counter */
  632. writel(EIP197_xDR_PROC_CLR_COUNT,
  633. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  634. writel(0,
  635. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  636. writel(0,
  637. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  638. /* Ring size */
  639. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
  640. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  641. }
  642. for (pe = 0; pe < priv->config.pes; pe++) {
  643. /* Enable command descriptor rings */
  644. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  645. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  646. /* Enable result descriptor rings */
  647. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  648. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  649. }
  650. /* Clear any HIA interrupt */
  651. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  652. if (priv->flags & EIP197_SIMPLE_TRC) {
  653. writel(EIP197_STRC_CONFIG_INIT |
  654. EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
  655. EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
  656. priv->base + EIP197_STRC_CONFIG);
  657. writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
  658. EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
  659. } else if (priv->flags & SAFEXCEL_HW_EIP197) {
  660. ret = eip197_trc_cache_init(priv);
  661. if (ret)
  662. return ret;
  663. }
  664. if (priv->flags & EIP197_ICE) {
  665. ret = eip197_load_firmwares(priv);
  666. if (ret)
  667. return ret;
  668. }
  669. return safexcel_hw_setup_cdesc_rings(priv) ?:
  670. safexcel_hw_setup_rdesc_rings(priv) ?:
  671. 0;
  672. }
  673. /* Called with ring's lock taken */
  674. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  675. int ring)
  676. {
  677. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  678. if (!coal)
  679. return;
  680. /* Configure when we want an interrupt */
  681. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  682. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  683. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  684. }
  685. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  686. {
  687. struct crypto_async_request *req, *backlog;
  688. struct safexcel_context *ctx;
  689. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  690. /* If a request wasn't properly dequeued because of a lack of resources,
  691. * proceeded it first,
  692. */
  693. req = priv->ring[ring].req;
  694. backlog = priv->ring[ring].backlog;
  695. if (req)
  696. goto handle_req;
  697. while (true) {
  698. spin_lock_bh(&priv->ring[ring].queue_lock);
  699. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  700. req = crypto_dequeue_request(&priv->ring[ring].queue);
  701. spin_unlock_bh(&priv->ring[ring].queue_lock);
  702. if (!req) {
  703. priv->ring[ring].req = NULL;
  704. priv->ring[ring].backlog = NULL;
  705. goto finalize;
  706. }
  707. handle_req:
  708. ctx = crypto_tfm_ctx(req->tfm);
  709. ret = ctx->send(req, ring, &commands, &results);
  710. if (ret)
  711. goto request_failed;
  712. if (backlog)
  713. crypto_request_complete(backlog, -EINPROGRESS);
  714. /* In case the send() helper did not issue any command to push
  715. * to the engine because the input data was cached, continue to
  716. * dequeue other requests as this is valid and not an error.
  717. */
  718. if (!commands && !results)
  719. continue;
  720. cdesc += commands;
  721. rdesc += results;
  722. nreq++;
  723. }
  724. request_failed:
  725. /* Not enough resources to handle all the requests. Bail out and save
  726. * the request and the backlog for the next dequeue call (per-ring).
  727. */
  728. priv->ring[ring].req = req;
  729. priv->ring[ring].backlog = backlog;
  730. finalize:
  731. if (!nreq)
  732. return;
  733. spin_lock_bh(&priv->ring[ring].lock);
  734. priv->ring[ring].requests += nreq;
  735. if (!priv->ring[ring].busy) {
  736. safexcel_try_push_requests(priv, ring);
  737. priv->ring[ring].busy = true;
  738. }
  739. spin_unlock_bh(&priv->ring[ring].lock);
  740. /* let the RDR know we have pending descriptors */
  741. writel((rdesc * priv->config.rd_offset),
  742. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  743. /* let the CDR know we have pending descriptors */
  744. writel((cdesc * priv->config.cd_offset),
  745. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  746. }
  747. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  748. void *rdp)
  749. {
  750. struct safexcel_result_desc *rdesc = rdp;
  751. struct result_data_desc *result_data = rdp + priv->config.res_offset;
  752. if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
  753. ((!rdesc->descriptor_overflow) &&
  754. (!rdesc->buffer_overflow) &&
  755. (!result_data->error_code))))
  756. return 0;
  757. if (rdesc->descriptor_overflow)
  758. dev_err(priv->dev, "Descriptor overflow detected");
  759. if (rdesc->buffer_overflow)
  760. dev_err(priv->dev, "Buffer overflow detected");
  761. if (result_data->error_code & 0x4066) {
  762. /* Fatal error (bits 1,2,5,6 & 14) */
  763. dev_err(priv->dev,
  764. "result descriptor error (%x)",
  765. result_data->error_code);
  766. return -EIO;
  767. } else if (result_data->error_code &
  768. (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
  769. /*
  770. * Give priority over authentication fails:
  771. * Blocksize, length & overflow errors,
  772. * something wrong with the input!
  773. */
  774. return -EINVAL;
  775. } else if (result_data->error_code & BIT(9)) {
  776. /* Authentication failed */
  777. return -EBADMSG;
  778. }
  779. /* All other non-fatal errors */
  780. return -EINVAL;
  781. }
  782. inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
  783. int ring,
  784. struct safexcel_result_desc *rdesc,
  785. struct crypto_async_request *req)
  786. {
  787. int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
  788. priv->ring[ring].rdr_req[i] = req;
  789. }
  790. inline struct crypto_async_request *
  791. safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
  792. {
  793. int i = safexcel_ring_first_rdr_index(priv, ring);
  794. return priv->ring[ring].rdr_req[i];
  795. }
  796. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  797. {
  798. struct safexcel_command_desc *cdesc;
  799. /* Acknowledge the command descriptors */
  800. do {
  801. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  802. if (IS_ERR(cdesc)) {
  803. dev_err(priv->dev,
  804. "Could not retrieve the command descriptor\n");
  805. return;
  806. }
  807. } while (!cdesc->last_seg);
  808. }
  809. int safexcel_invalidate_cache(struct crypto_async_request *async,
  810. struct safexcel_crypto_priv *priv,
  811. dma_addr_t ctxr_dma, int ring)
  812. {
  813. struct safexcel_command_desc *cdesc;
  814. struct safexcel_result_desc *rdesc;
  815. struct safexcel_token *dmmy;
  816. int ret = 0;
  817. /* Prepare command descriptor */
  818. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
  819. &dmmy);
  820. if (IS_ERR(cdesc))
  821. return PTR_ERR(cdesc);
  822. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  823. cdesc->control_data.options = 0;
  824. cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
  825. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  826. /* Prepare result descriptor */
  827. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  828. if (IS_ERR(rdesc)) {
  829. ret = PTR_ERR(rdesc);
  830. goto cdesc_rollback;
  831. }
  832. safexcel_rdr_req_set(priv, ring, rdesc, async);
  833. return ret;
  834. cdesc_rollback:
  835. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  836. return ret;
  837. }
  838. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  839. int ring)
  840. {
  841. struct crypto_async_request *req;
  842. struct safexcel_context *ctx;
  843. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  844. bool should_complete;
  845. handle_results:
  846. tot_descs = 0;
  847. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  848. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  849. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  850. if (!nreq)
  851. goto requests_left;
  852. for (i = 0; i < nreq; i++) {
  853. req = safexcel_rdr_req_get(priv, ring);
  854. ctx = crypto_tfm_ctx(req->tfm);
  855. ndesc = ctx->handle_result(priv, ring, req,
  856. &should_complete, &ret);
  857. if (ndesc < 0) {
  858. dev_err(priv->dev, "failed to handle result (%d)\n",
  859. ndesc);
  860. goto acknowledge;
  861. }
  862. if (should_complete) {
  863. local_bh_disable();
  864. crypto_request_complete(req, ret);
  865. local_bh_enable();
  866. }
  867. tot_descs += ndesc;
  868. handled++;
  869. }
  870. acknowledge:
  871. if (i)
  872. writel(EIP197_xDR_PROC_xD_PKT(i) |
  873. (tot_descs * priv->config.rd_offset),
  874. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  875. /* If the number of requests overflowed the counter, try to proceed more
  876. * requests.
  877. */
  878. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  879. goto handle_results;
  880. requests_left:
  881. spin_lock_bh(&priv->ring[ring].lock);
  882. priv->ring[ring].requests -= handled;
  883. safexcel_try_push_requests(priv, ring);
  884. if (!priv->ring[ring].requests)
  885. priv->ring[ring].busy = false;
  886. spin_unlock_bh(&priv->ring[ring].lock);
  887. }
  888. static void safexcel_dequeue_work(struct work_struct *work)
  889. {
  890. struct safexcel_work_data *data =
  891. container_of(work, struct safexcel_work_data, work);
  892. safexcel_dequeue(data->priv, data->ring);
  893. }
  894. struct safexcel_ring_irq_data {
  895. struct safexcel_crypto_priv *priv;
  896. int ring;
  897. };
  898. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  899. {
  900. struct safexcel_ring_irq_data *irq_data = data;
  901. struct safexcel_crypto_priv *priv = irq_data->priv;
  902. int ring = irq_data->ring, rc = IRQ_NONE;
  903. u32 status, stat;
  904. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  905. if (!status)
  906. return rc;
  907. /* RDR interrupts */
  908. if (status & EIP197_RDR_IRQ(ring)) {
  909. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  910. if (unlikely(stat & EIP197_xDR_ERR)) {
  911. /*
  912. * Fatal error, the RDR is unusable and must be
  913. * reinitialized. This should not happen under
  914. * normal circumstances.
  915. */
  916. dev_err(priv->dev, "RDR: fatal error.\n");
  917. } else if (likely(stat & EIP197_xDR_THRESH)) {
  918. rc = IRQ_WAKE_THREAD;
  919. }
  920. /* ACK the interrupts */
  921. writel(stat & 0xff,
  922. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  923. }
  924. /* ACK the interrupts */
  925. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  926. return rc;
  927. }
  928. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  929. {
  930. struct safexcel_ring_irq_data *irq_data = data;
  931. struct safexcel_crypto_priv *priv = irq_data->priv;
  932. int ring = irq_data->ring;
  933. safexcel_handle_result_descriptor(priv, ring);
  934. queue_work(priv->ring[ring].workqueue,
  935. &priv->ring[ring].work_data.work);
  936. return IRQ_HANDLED;
  937. }
  938. static int safexcel_request_ring_irq(void *pdev, int irqid,
  939. int is_pci_dev,
  940. int ring_id,
  941. irq_handler_t handler,
  942. irq_handler_t threaded_handler,
  943. struct safexcel_ring_irq_data *ring_irq_priv)
  944. {
  945. int ret, irq, cpu;
  946. struct device *dev;
  947. if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
  948. struct pci_dev *pci_pdev = pdev;
  949. dev = &pci_pdev->dev;
  950. irq = pci_irq_vector(pci_pdev, irqid);
  951. if (irq < 0) {
  952. dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
  953. irqid, irq);
  954. return irq;
  955. }
  956. } else if (IS_ENABLED(CONFIG_OF)) {
  957. struct platform_device *plf_pdev = pdev;
  958. char irq_name[6] = {0}; /* "ringX\0" */
  959. snprintf(irq_name, 6, "ring%d", irqid);
  960. dev = &plf_pdev->dev;
  961. irq = platform_get_irq_byname(plf_pdev, irq_name);
  962. if (irq < 0)
  963. return irq;
  964. } else {
  965. return -ENXIO;
  966. }
  967. ret = devm_request_threaded_irq(dev, irq, handler,
  968. threaded_handler, IRQF_ONESHOT,
  969. dev_name(dev), ring_irq_priv);
  970. if (ret) {
  971. dev_err(dev, "unable to request IRQ %d\n", irq);
  972. return ret;
  973. }
  974. /* Set affinity */
  975. cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
  976. irq_set_affinity_hint(irq, get_cpu_mask(cpu));
  977. return irq;
  978. }
  979. static struct safexcel_alg_template *safexcel_algs[] = {
  980. &safexcel_alg_ecb_des,
  981. &safexcel_alg_cbc_des,
  982. &safexcel_alg_ecb_des3_ede,
  983. &safexcel_alg_cbc_des3_ede,
  984. &safexcel_alg_ecb_aes,
  985. &safexcel_alg_cbc_aes,
  986. &safexcel_alg_ctr_aes,
  987. &safexcel_alg_md5,
  988. &safexcel_alg_sha1,
  989. &safexcel_alg_sha224,
  990. &safexcel_alg_sha256,
  991. &safexcel_alg_sha384,
  992. &safexcel_alg_sha512,
  993. &safexcel_alg_hmac_md5,
  994. &safexcel_alg_hmac_sha1,
  995. &safexcel_alg_hmac_sha224,
  996. &safexcel_alg_hmac_sha256,
  997. &safexcel_alg_hmac_sha384,
  998. &safexcel_alg_hmac_sha512,
  999. &safexcel_alg_authenc_hmac_sha1_cbc_aes,
  1000. &safexcel_alg_authenc_hmac_sha224_cbc_aes,
  1001. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  1002. &safexcel_alg_authenc_hmac_sha384_cbc_aes,
  1003. &safexcel_alg_authenc_hmac_sha512_cbc_aes,
  1004. &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
  1005. &safexcel_alg_authenc_hmac_sha1_ctr_aes,
  1006. &safexcel_alg_authenc_hmac_sha224_ctr_aes,
  1007. &safexcel_alg_authenc_hmac_sha256_ctr_aes,
  1008. &safexcel_alg_authenc_hmac_sha384_ctr_aes,
  1009. &safexcel_alg_authenc_hmac_sha512_ctr_aes,
  1010. &safexcel_alg_xts_aes,
  1011. &safexcel_alg_gcm,
  1012. &safexcel_alg_ccm,
  1013. &safexcel_alg_crc32,
  1014. &safexcel_alg_cbcmac,
  1015. &safexcel_alg_xcbcmac,
  1016. &safexcel_alg_cmac,
  1017. &safexcel_alg_chacha20,
  1018. &safexcel_alg_chachapoly,
  1019. &safexcel_alg_chachapoly_esp,
  1020. &safexcel_alg_sm3,
  1021. &safexcel_alg_hmac_sm3,
  1022. &safexcel_alg_ecb_sm4,
  1023. &safexcel_alg_cbc_sm4,
  1024. &safexcel_alg_ctr_sm4,
  1025. &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
  1026. &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
  1027. &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
  1028. &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
  1029. &safexcel_alg_sha3_224,
  1030. &safexcel_alg_sha3_256,
  1031. &safexcel_alg_sha3_384,
  1032. &safexcel_alg_sha3_512,
  1033. &safexcel_alg_hmac_sha3_224,
  1034. &safexcel_alg_hmac_sha3_256,
  1035. &safexcel_alg_hmac_sha3_384,
  1036. &safexcel_alg_hmac_sha3_512,
  1037. &safexcel_alg_authenc_hmac_sha1_cbc_des,
  1038. &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
  1039. &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
  1040. &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
  1041. &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
  1042. &safexcel_alg_authenc_hmac_sha256_cbc_des,
  1043. &safexcel_alg_authenc_hmac_sha224_cbc_des,
  1044. &safexcel_alg_authenc_hmac_sha512_cbc_des,
  1045. &safexcel_alg_authenc_hmac_sha384_cbc_des,
  1046. &safexcel_alg_rfc4106_gcm,
  1047. &safexcel_alg_rfc4543_gcm,
  1048. &safexcel_alg_rfc4309_ccm,
  1049. };
  1050. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  1051. {
  1052. int i, j, ret = 0;
  1053. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  1054. safexcel_algs[i]->priv = priv;
  1055. /* Do we have all required base algorithms available? */
  1056. if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
  1057. safexcel_algs[i]->algo_mask)
  1058. /* No, so don't register this ciphersuite */
  1059. continue;
  1060. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1061. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  1062. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1063. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  1064. else
  1065. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  1066. if (ret)
  1067. goto fail;
  1068. }
  1069. return 0;
  1070. fail:
  1071. for (j = 0; j < i; j++) {
  1072. /* Do we have all required base algorithms available? */
  1073. if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
  1074. safexcel_algs[j]->algo_mask)
  1075. /* No, so don't unregister this ciphersuite */
  1076. continue;
  1077. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1078. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  1079. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1080. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  1081. else
  1082. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  1083. }
  1084. return ret;
  1085. }
  1086. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  1087. {
  1088. int i;
  1089. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  1090. /* Do we have all required base algorithms available? */
  1091. if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
  1092. safexcel_algs[i]->algo_mask)
  1093. /* No, so don't unregister this ciphersuite */
  1094. continue;
  1095. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  1096. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  1097. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  1098. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  1099. else
  1100. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  1101. }
  1102. }
  1103. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  1104. {
  1105. u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
  1106. priv->config.pes = priv->hwconfig.hwnumpes;
  1107. priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
  1108. /* Cannot currently support more rings than we have ring AICs! */
  1109. priv->config.rings = min_t(u32, priv->config.rings,
  1110. priv->hwconfig.hwnumraic);
  1111. priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
  1112. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  1113. priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
  1114. /* res token is behind the descr, but ofs must be rounded to buswdth */
  1115. priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
  1116. /* now the size of the descr is this 1st part plus the result struct */
  1117. priv->config.rd_size = priv->config.res_offset +
  1118. EIP197_RD64_RESULT_SIZE;
  1119. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  1120. /* convert dwords to bytes */
  1121. priv->config.cd_offset *= sizeof(u32);
  1122. priv->config.cdsh_offset *= sizeof(u32);
  1123. priv->config.rd_offset *= sizeof(u32);
  1124. priv->config.res_offset *= sizeof(u32);
  1125. }
  1126. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  1127. {
  1128. struct safexcel_register_offsets *offsets = &priv->offsets;
  1129. if (priv->flags & SAFEXCEL_HW_EIP197) {
  1130. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  1131. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  1132. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  1133. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  1134. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  1135. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  1136. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  1137. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  1138. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  1139. offsets->pe = EIP197_PE_BASE;
  1140. offsets->global = EIP197_GLOBAL_BASE;
  1141. } else {
  1142. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  1143. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  1144. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  1145. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  1146. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  1147. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  1148. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  1149. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  1150. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  1151. offsets->pe = EIP97_PE_BASE;
  1152. offsets->global = EIP97_GLOBAL_BASE;
  1153. }
  1154. }
  1155. /*
  1156. * Generic part of probe routine, shared by platform and PCI driver
  1157. *
  1158. * Assumes IO resources have been mapped, private data mem has been allocated,
  1159. * clocks have been enabled, device pointer has been assigned etc.
  1160. *
  1161. */
  1162. static int safexcel_probe_generic(void *pdev,
  1163. struct safexcel_crypto_priv *priv,
  1164. int is_pci_dev)
  1165. {
  1166. struct device *dev = priv->dev;
  1167. u32 peid, version, mask, val, hiaopt, hwopt, peopt;
  1168. int i, ret, hwctg;
  1169. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  1170. sizeof(struct safexcel_context_record),
  1171. 1, 0);
  1172. if (!priv->context_pool)
  1173. return -ENOMEM;
  1174. /*
  1175. * First try the EIP97 HIA version regs
  1176. * For the EIP197, this is guaranteed to NOT return any of the test
  1177. * values
  1178. */
  1179. version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
  1180. mask = 0; /* do not swap */
  1181. if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
  1182. priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
  1183. } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
  1184. /* read back byte-swapped, so complement byte swap bits */
  1185. mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
  1186. priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
  1187. } else {
  1188. /* So it wasn't an EIP97 ... maybe it's an EIP197? */
  1189. version = readl(priv->base + EIP197_HIA_AIC_BASE +
  1190. EIP197_HIA_VERSION);
  1191. if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
  1192. priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
  1193. priv->flags |= SAFEXCEL_HW_EIP197;
  1194. } else if (EIP197_REG_HI16(version) ==
  1195. EIP197_HIA_VERSION_BE) {
  1196. /* read back byte-swapped, so complement swap bits */
  1197. mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
  1198. priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
  1199. priv->flags |= SAFEXCEL_HW_EIP197;
  1200. } else {
  1201. return -ENODEV;
  1202. }
  1203. }
  1204. /* Now initialize the reg offsets based on the probing info so far */
  1205. safexcel_init_register_offsets(priv);
  1206. /*
  1207. * If the version was read byte-swapped, we need to flip the device
  1208. * swapping Keep in mind here, though, that what we write will also be
  1209. * byte-swapped ...
  1210. */
  1211. if (mask) {
  1212. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  1213. val = val ^ (mask >> 24); /* toggle byte swap bits */
  1214. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  1215. }
  1216. /*
  1217. * We're not done probing yet! We may fall through to here if no HIA
  1218. * was found at all. So, with the endianness presumably correct now and
  1219. * the offsets setup, *really* probe for the EIP97/EIP197.
  1220. */
  1221. version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
  1222. if (((priv->flags & SAFEXCEL_HW_EIP197) &&
  1223. (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
  1224. (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
  1225. ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
  1226. (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
  1227. /*
  1228. * We did not find the device that matched our initial probing
  1229. * (or our initial probing failed) Report appropriate error.
  1230. */
  1231. dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
  1232. version);
  1233. return -ENODEV;
  1234. }
  1235. priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
  1236. hwctg = version >> 28;
  1237. peid = version & 255;
  1238. /* Detect EIP206 processing pipe */
  1239. version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
  1240. if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
  1241. dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
  1242. return -ENODEV;
  1243. }
  1244. priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
  1245. /* Detect EIP96 packet engine and version */
  1246. version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
  1247. if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
  1248. dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
  1249. return -ENODEV;
  1250. }
  1251. priv->hwconfig.pever = EIP197_VERSION_MASK(version);
  1252. hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
  1253. hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
  1254. priv->hwconfig.icever = 0;
  1255. priv->hwconfig.ocever = 0;
  1256. priv->hwconfig.psever = 0;
  1257. if (priv->flags & SAFEXCEL_HW_EIP197) {
  1258. /* EIP197 */
  1259. peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
  1260. priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
  1261. EIP197_HWDATAW_MASK;
  1262. priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
  1263. EIP197_CFSIZE_MASK) +
  1264. EIP197_CFSIZE_ADJUST;
  1265. priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
  1266. EIP197_RFSIZE_MASK) +
  1267. EIP197_RFSIZE_ADJUST;
  1268. priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
  1269. EIP197_N_PES_MASK;
  1270. priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
  1271. EIP197_N_RINGS_MASK;
  1272. if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
  1273. priv->flags |= EIP197_PE_ARB;
  1274. if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
  1275. priv->flags |= EIP197_ICE;
  1276. /* Detect ICE EIP207 class. engine and version */
  1277. version = readl(EIP197_PE(priv) +
  1278. EIP197_PE_ICE_VERSION(0));
  1279. if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
  1280. dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
  1281. peid);
  1282. return -ENODEV;
  1283. }
  1284. priv->hwconfig.icever = EIP197_VERSION_MASK(version);
  1285. }
  1286. if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
  1287. priv->flags |= EIP197_OCE;
  1288. /* Detect EIP96PP packet stream editor and version */
  1289. version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
  1290. if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
  1291. dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
  1292. return -ENODEV;
  1293. }
  1294. priv->hwconfig.psever = EIP197_VERSION_MASK(version);
  1295. /* Detect OCE EIP207 class. engine and version */
  1296. version = readl(EIP197_PE(priv) +
  1297. EIP197_PE_ICE_VERSION(0));
  1298. if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
  1299. dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
  1300. peid);
  1301. return -ENODEV;
  1302. }
  1303. priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
  1304. }
  1305. /* If not a full TRC, then assume simple TRC */
  1306. if (!(hwopt & EIP197_OPT_HAS_TRC))
  1307. priv->flags |= EIP197_SIMPLE_TRC;
  1308. /* EIP197 always has SOME form of TRC */
  1309. priv->flags |= EIP197_TRC_CACHE;
  1310. } else {
  1311. /* EIP97 */
  1312. priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
  1313. EIP97_HWDATAW_MASK;
  1314. priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
  1315. EIP97_CFSIZE_MASK;
  1316. priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
  1317. EIP97_RFSIZE_MASK;
  1318. priv->hwconfig.hwnumpes = 1; /* by definition */
  1319. priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
  1320. EIP197_N_RINGS_MASK;
  1321. }
  1322. /* Scan for ring AIC's */
  1323. for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
  1324. version = readl(EIP197_HIA_AIC_R(priv) +
  1325. EIP197_HIA_AIC_R_VERSION(i));
  1326. if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
  1327. break;
  1328. }
  1329. priv->hwconfig.hwnumraic = i;
  1330. /* Low-end EIP196 may not have any ring AIC's ... */
  1331. if (!priv->hwconfig.hwnumraic) {
  1332. dev_err(priv->dev, "No ring interrupt controller present!\n");
  1333. return -ENODEV;
  1334. }
  1335. /* Get supported algorithms from EIP96 transform engine */
  1336. priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
  1337. EIP197_PE_EIP96_OPTIONS(0));
  1338. /* Print single info line describing what we just detected */
  1339. dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
  1340. peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
  1341. priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
  1342. priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
  1343. priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
  1344. priv->hwconfig.ppver, priv->hwconfig.pever,
  1345. priv->hwconfig.algo_flags, priv->hwconfig.icever,
  1346. priv->hwconfig.ocever, priv->hwconfig.psever);
  1347. safexcel_configure(priv);
  1348. if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) {
  1349. /*
  1350. * Request MSI vectors for global + 1 per ring -
  1351. * or just 1 for older dev images
  1352. */
  1353. struct pci_dev *pci_pdev = pdev;
  1354. ret = pci_alloc_irq_vectors(pci_pdev,
  1355. priv->config.rings + 1,
  1356. priv->config.rings + 1,
  1357. PCI_IRQ_MSI | PCI_IRQ_MSIX);
  1358. if (ret < 0) {
  1359. dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
  1360. return ret;
  1361. }
  1362. }
  1363. /* Register the ring IRQ handlers and configure the rings */
  1364. priv->ring = devm_kcalloc(dev, priv->config.rings,
  1365. sizeof(*priv->ring),
  1366. GFP_KERNEL);
  1367. if (!priv->ring)
  1368. return -ENOMEM;
  1369. for (i = 0; i < priv->config.rings; i++) {
  1370. char wq_name[9] = {0};
  1371. int irq;
  1372. struct safexcel_ring_irq_data *ring_irq;
  1373. ret = safexcel_init_ring_descriptors(priv,
  1374. &priv->ring[i].cdr,
  1375. &priv->ring[i].rdr);
  1376. if (ret) {
  1377. dev_err(dev, "Failed to initialize rings\n");
  1378. goto err_cleanup_rings;
  1379. }
  1380. priv->ring[i].rdr_req = devm_kcalloc(dev,
  1381. EIP197_DEFAULT_RING_SIZE,
  1382. sizeof(*priv->ring[i].rdr_req),
  1383. GFP_KERNEL);
  1384. if (!priv->ring[i].rdr_req) {
  1385. ret = -ENOMEM;
  1386. goto err_cleanup_rings;
  1387. }
  1388. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  1389. if (!ring_irq) {
  1390. ret = -ENOMEM;
  1391. goto err_cleanup_rings;
  1392. }
  1393. ring_irq->priv = priv;
  1394. ring_irq->ring = i;
  1395. irq = safexcel_request_ring_irq(pdev,
  1396. EIP197_IRQ_NUMBER(i, is_pci_dev),
  1397. is_pci_dev,
  1398. i,
  1399. safexcel_irq_ring,
  1400. safexcel_irq_ring_thread,
  1401. ring_irq);
  1402. if (irq < 0) {
  1403. dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
  1404. ret = irq;
  1405. goto err_cleanup_rings;
  1406. }
  1407. priv->ring[i].irq = irq;
  1408. priv->ring[i].work_data.priv = priv;
  1409. priv->ring[i].work_data.ring = i;
  1410. INIT_WORK(&priv->ring[i].work_data.work,
  1411. safexcel_dequeue_work);
  1412. snprintf(wq_name, 9, "wq_ring%d", i);
  1413. priv->ring[i].workqueue =
  1414. create_singlethread_workqueue(wq_name);
  1415. if (!priv->ring[i].workqueue) {
  1416. ret = -ENOMEM;
  1417. goto err_cleanup_rings;
  1418. }
  1419. priv->ring[i].requests = 0;
  1420. priv->ring[i].busy = false;
  1421. crypto_init_queue(&priv->ring[i].queue,
  1422. EIP197_DEFAULT_RING_SIZE);
  1423. spin_lock_init(&priv->ring[i].lock);
  1424. spin_lock_init(&priv->ring[i].queue_lock);
  1425. }
  1426. atomic_set(&priv->ring_used, 0);
  1427. ret = safexcel_hw_init(priv);
  1428. if (ret) {
  1429. dev_err(dev, "HW init failed (%d)\n", ret);
  1430. goto err_cleanup_rings;
  1431. }
  1432. ret = safexcel_register_algorithms(priv);
  1433. if (ret) {
  1434. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  1435. goto err_cleanup_rings;
  1436. }
  1437. return 0;
  1438. err_cleanup_rings:
  1439. for (i = 0; i < priv->config.rings; i++) {
  1440. if (priv->ring[i].irq)
  1441. irq_set_affinity_hint(priv->ring[i].irq, NULL);
  1442. if (priv->ring[i].workqueue)
  1443. destroy_workqueue(priv->ring[i].workqueue);
  1444. }
  1445. return ret;
  1446. }
  1447. static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
  1448. {
  1449. int i;
  1450. for (i = 0; i < priv->config.rings; i++) {
  1451. /* clear any pending interrupt */
  1452. writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  1453. writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  1454. /* Reset the CDR base address */
  1455. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  1456. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  1457. /* Reset the RDR base address */
  1458. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  1459. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  1460. }
  1461. }
  1462. /* for Device Tree platform driver */
  1463. static int safexcel_probe(struct platform_device *pdev)
  1464. {
  1465. struct device *dev = &pdev->dev;
  1466. struct safexcel_crypto_priv *priv;
  1467. int ret;
  1468. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1469. if (!priv)
  1470. return -ENOMEM;
  1471. priv->dev = dev;
  1472. priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev);
  1473. platform_set_drvdata(pdev, priv);
  1474. priv->base = devm_platform_ioremap_resource(pdev, 0);
  1475. if (IS_ERR(priv->base)) {
  1476. dev_err(dev, "failed to get resource\n");
  1477. return PTR_ERR(priv->base);
  1478. }
  1479. priv->clk = devm_clk_get(&pdev->dev, NULL);
  1480. ret = PTR_ERR_OR_ZERO(priv->clk);
  1481. /* The clock isn't mandatory */
  1482. if (ret != -ENOENT) {
  1483. if (ret)
  1484. return ret;
  1485. ret = clk_prepare_enable(priv->clk);
  1486. if (ret) {
  1487. dev_err(dev, "unable to enable clk (%d)\n", ret);
  1488. return ret;
  1489. }
  1490. }
  1491. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  1492. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  1493. /* The clock isn't mandatory */
  1494. if (ret != -ENOENT) {
  1495. if (ret)
  1496. goto err_core_clk;
  1497. ret = clk_prepare_enable(priv->reg_clk);
  1498. if (ret) {
  1499. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  1500. goto err_core_clk;
  1501. }
  1502. }
  1503. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  1504. if (ret)
  1505. goto err_reg_clk;
  1506. /* Generic EIP97/EIP197 device probing */
  1507. ret = safexcel_probe_generic(pdev, priv, 0);
  1508. if (ret)
  1509. goto err_reg_clk;
  1510. return 0;
  1511. err_reg_clk:
  1512. clk_disable_unprepare(priv->reg_clk);
  1513. err_core_clk:
  1514. clk_disable_unprepare(priv->clk);
  1515. return ret;
  1516. }
  1517. static void safexcel_remove(struct platform_device *pdev)
  1518. {
  1519. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  1520. int i;
  1521. safexcel_unregister_algorithms(priv);
  1522. safexcel_hw_reset_rings(priv);
  1523. clk_disable_unprepare(priv->reg_clk);
  1524. clk_disable_unprepare(priv->clk);
  1525. for (i = 0; i < priv->config.rings; i++) {
  1526. irq_set_affinity_hint(priv->ring[i].irq, NULL);
  1527. destroy_workqueue(priv->ring[i].workqueue);
  1528. }
  1529. }
  1530. static const struct safexcel_priv_data eip97ies_mrvl_data = {
  1531. .version = EIP97IES_MRVL,
  1532. };
  1533. static const struct safexcel_priv_data eip197b_mrvl_data = {
  1534. .version = EIP197B_MRVL,
  1535. };
  1536. static const struct safexcel_priv_data eip197d_mrvl_data = {
  1537. .version = EIP197D_MRVL,
  1538. };
  1539. static const struct safexcel_priv_data eip197_devbrd_data = {
  1540. .version = EIP197_DEVBRD,
  1541. };
  1542. static const struct safexcel_priv_data eip197c_mxl_data = {
  1543. .version = EIP197C_MXL,
  1544. .fw_little_endian = true,
  1545. };
  1546. static const struct of_device_id safexcel_of_match_table[] = {
  1547. {
  1548. .compatible = "inside-secure,safexcel-eip97ies",
  1549. .data = &eip97ies_mrvl_data,
  1550. },
  1551. {
  1552. .compatible = "inside-secure,safexcel-eip197b",
  1553. .data = &eip197b_mrvl_data,
  1554. },
  1555. {
  1556. .compatible = "inside-secure,safexcel-eip197d",
  1557. .data = &eip197d_mrvl_data,
  1558. },
  1559. {
  1560. .compatible = "inside-secure,safexcel-eip197c-mxl",
  1561. .data = &eip197c_mxl_data,
  1562. },
  1563. /* For backward compatibility and intended for generic use */
  1564. {
  1565. .compatible = "inside-secure,safexcel-eip97",
  1566. .data = &eip97ies_mrvl_data,
  1567. },
  1568. {
  1569. .compatible = "inside-secure,safexcel-eip197",
  1570. .data = &eip197b_mrvl_data,
  1571. },
  1572. {},
  1573. };
  1574. MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
  1575. static struct platform_driver crypto_safexcel = {
  1576. .probe = safexcel_probe,
  1577. .remove_new = safexcel_remove,
  1578. .driver = {
  1579. .name = "crypto-safexcel",
  1580. .of_match_table = safexcel_of_match_table,
  1581. },
  1582. };
  1583. /* PCIE devices - i.e. Inside Secure development boards */
  1584. static int safexcel_pci_probe(struct pci_dev *pdev,
  1585. const struct pci_device_id *ent)
  1586. {
  1587. struct device *dev = &pdev->dev;
  1588. struct safexcel_crypto_priv *priv;
  1589. void __iomem *pciebase;
  1590. int rc;
  1591. u32 val;
  1592. dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
  1593. ent->vendor, ent->device, ent->subvendor,
  1594. ent->subdevice, ent->driver_data);
  1595. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  1596. if (!priv)
  1597. return -ENOMEM;
  1598. priv->dev = dev;
  1599. priv->data = (struct safexcel_priv_data *)ent->driver_data;
  1600. pci_set_drvdata(pdev, priv);
  1601. /* enable the device */
  1602. rc = pcim_enable_device(pdev);
  1603. if (rc) {
  1604. dev_err(dev, "Failed to enable PCI device\n");
  1605. return rc;
  1606. }
  1607. /* take ownership of PCI BAR0 */
  1608. rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
  1609. if (rc) {
  1610. dev_err(dev, "Failed to map IO region for BAR0\n");
  1611. return rc;
  1612. }
  1613. priv->base = pcim_iomap_table(pdev)[0];
  1614. if (priv->data->version == EIP197_DEVBRD) {
  1615. dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
  1616. rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
  1617. if (rc) {
  1618. dev_err(dev, "Failed to map IO region for BAR4\n");
  1619. return rc;
  1620. }
  1621. pciebase = pcim_iomap_table(pdev)[2];
  1622. val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
  1623. if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
  1624. dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
  1625. (val & 0xff));
  1626. /* Setup MSI identity map mapping */
  1627. writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
  1628. pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
  1629. writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
  1630. pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
  1631. writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
  1632. pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
  1633. writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
  1634. pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
  1635. /* Enable all device interrupts */
  1636. writel(GENMASK(31, 0),
  1637. pciebase + EIP197_XLX_USER_INT_ENB_MSK);
  1638. } else {
  1639. dev_err(dev, "Unrecognised IRQ block identifier %x\n",
  1640. val);
  1641. return -ENODEV;
  1642. }
  1643. /* HW reset FPGA dev board */
  1644. /* assert reset */
  1645. writel(1, priv->base + EIP197_XLX_GPIO_BASE);
  1646. wmb(); /* maintain strict ordering for accesses here */
  1647. /* deassert reset */
  1648. writel(0, priv->base + EIP197_XLX_GPIO_BASE);
  1649. wmb(); /* maintain strict ordering for accesses here */
  1650. }
  1651. /* enable bus mastering */
  1652. pci_set_master(pdev);
  1653. /* Generic EIP97/EIP197 device probing */
  1654. rc = safexcel_probe_generic(pdev, priv, 1);
  1655. return rc;
  1656. }
  1657. static void safexcel_pci_remove(struct pci_dev *pdev)
  1658. {
  1659. struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
  1660. int i;
  1661. safexcel_unregister_algorithms(priv);
  1662. for (i = 0; i < priv->config.rings; i++)
  1663. destroy_workqueue(priv->ring[i].workqueue);
  1664. safexcel_hw_reset_rings(priv);
  1665. }
  1666. static const struct pci_device_id safexcel_pci_ids[] = {
  1667. {
  1668. PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
  1669. 0x16ae, 0xc522),
  1670. .driver_data = (kernel_ulong_t)&eip197_devbrd_data,
  1671. },
  1672. {},
  1673. };
  1674. MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
  1675. static struct pci_driver safexcel_pci_driver = {
  1676. .name = "crypto-safexcel",
  1677. .id_table = safexcel_pci_ids,
  1678. .probe = safexcel_pci_probe,
  1679. .remove = safexcel_pci_remove,
  1680. };
  1681. static int __init safexcel_init(void)
  1682. {
  1683. int ret;
  1684. /* Register PCI driver */
  1685. ret = pci_register_driver(&safexcel_pci_driver);
  1686. /* Register platform driver */
  1687. if (IS_ENABLED(CONFIG_OF) && !ret) {
  1688. ret = platform_driver_register(&crypto_safexcel);
  1689. if (ret)
  1690. pci_unregister_driver(&safexcel_pci_driver);
  1691. }
  1692. return ret;
  1693. }
  1694. static void __exit safexcel_exit(void)
  1695. {
  1696. /* Unregister platform driver */
  1697. if (IS_ENABLED(CONFIG_OF))
  1698. platform_driver_unregister(&crypto_safexcel);
  1699. /* Unregister PCI driver if successfully registered before */
  1700. pci_unregister_driver(&safexcel_pci_driver);
  1701. }
  1702. module_init(safexcel_init);
  1703. module_exit(safexcel_exit);
  1704. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  1705. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  1706. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  1707. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
  1708. MODULE_LICENSE("GPL v2");
  1709. MODULE_IMPORT_NS(CRYPTO_INTERNAL);
  1710. MODULE_FIRMWARE("ifpp.bin");
  1711. MODULE_FIRMWARE("ipue.bin");
  1712. MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
  1713. MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
  1714. MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
  1715. MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
  1716. MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
  1717. MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");