svc-i3c-master.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Silvaco dual-role I3C master driver
  4. *
  5. * Copyright (C) 2020 Silvaco
  6. * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
  7. * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
  8. */
  9. #include <linux/bitfield.h>
  10. #include <linux/clk.h>
  11. #include <linux/completion.h>
  12. #include <linux/errno.h>
  13. #include <linux/i3c/master.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/pinctrl/consumer.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. /* Master Mode Registers */
  23. #define SVC_I3C_MCONFIG 0x000
  24. #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
  25. #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
  26. #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
  27. #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
  28. #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
  29. #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
  30. #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
  31. #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
  32. #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
  33. #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
  34. #define SVC_I3C_MCTRL 0x084
  35. #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
  36. #define SVC_I3C_MCTRL_REQUEST_NONE 0
  37. #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
  38. #define SVC_I3C_MCTRL_REQUEST_STOP 2
  39. #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
  40. #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
  41. #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
  42. #define SVC_I3C_MCTRL_TYPE_I3C 0
  43. #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
  44. #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
  45. #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
  46. #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
  47. #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
  48. #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
  49. #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
  50. #define SVC_I3C_MCTRL_DIR_WRITE 0
  51. #define SVC_I3C_MCTRL_DIR_READ 1
  52. #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
  53. #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
  54. #define SVC_I3C_MSTATUS 0x088
  55. #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
  56. #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
  57. #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
  58. #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
  59. #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
  60. #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
  61. #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
  62. #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
  63. #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
  64. #define SVC_I3C_MINT_SLVSTART BIT(8)
  65. #define SVC_I3C_MINT_MCTRLDONE BIT(9)
  66. #define SVC_I3C_MINT_COMPLETE BIT(10)
  67. #define SVC_I3C_MINT_RXPEND BIT(11)
  68. #define SVC_I3C_MINT_TXNOTFULL BIT(12)
  69. #define SVC_I3C_MINT_IBIWON BIT(13)
  70. #define SVC_I3C_MINT_ERRWARN BIT(15)
  71. #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
  72. #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
  73. #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
  74. #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
  75. #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
  76. #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
  77. #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
  78. #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
  79. #define SVC_I3C_IBIRULES 0x08C
  80. #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
  81. ((addr) & 0x3F) << ((slot) * 6))
  82. #define SVC_I3C_IBIRULES_ADDRS 5
  83. #define SVC_I3C_IBIRULES_MSB0 BIT(30)
  84. #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
  85. #define SVC_I3C_IBIRULES_MANDBYTE 0
  86. #define SVC_I3C_MINTSET 0x090
  87. #define SVC_I3C_MINTCLR 0x094
  88. #define SVC_I3C_MINTMASKED 0x098
  89. #define SVC_I3C_MERRWARN 0x09C
  90. #define SVC_I3C_MERRWARN_NACK BIT(2)
  91. #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
  92. #define SVC_I3C_MDMACTRL 0x0A0
  93. #define SVC_I3C_MDATACTRL 0x0AC
  94. #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
  95. #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
  96. #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
  97. #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
  98. #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
  99. #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
  100. #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
  101. #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
  102. #define SVC_I3C_MWDATAB 0x0B0
  103. #define SVC_I3C_MWDATAB_END BIT(8)
  104. #define SVC_I3C_MWDATABE 0x0B4
  105. #define SVC_I3C_MWDATAH 0x0B8
  106. #define SVC_I3C_MWDATAHE 0x0BC
  107. #define SVC_I3C_MRDATAB 0x0C0
  108. #define SVC_I3C_MRDATAH 0x0C8
  109. #define SVC_I3C_MWMSG_SDR 0x0D0
  110. #define SVC_I3C_MRMSG_SDR 0x0D4
  111. #define SVC_I3C_MWMSG_DDR 0x0D8
  112. #define SVC_I3C_MRMSG_DDR 0x0DC
  113. #define SVC_I3C_MDYNADDR 0x0E4
  114. #define SVC_MDYNADDR_VALID BIT(0)
  115. #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
  116. #define SVC_I3C_MAX_DEVS 32
  117. #define SVC_I3C_PM_TIMEOUT_MS 1000
  118. /* This parameter depends on the implementation and may be tuned */
  119. #define SVC_I3C_FIFO_SIZE 16
  120. #define SVC_I3C_PPBAUD_MAX 15
  121. #define SVC_I3C_QUICK_I2C_CLK 4170000
  122. #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
  123. #define SVC_I3C_EVENT_HOTJOIN BIT(31)
  124. struct svc_i3c_cmd {
  125. u8 addr;
  126. bool rnw;
  127. u8 *in;
  128. const void *out;
  129. unsigned int len;
  130. unsigned int actual_len;
  131. struct i3c_priv_xfer *xfer;
  132. bool continued;
  133. };
  134. struct svc_i3c_xfer {
  135. struct list_head node;
  136. struct completion comp;
  137. int ret;
  138. unsigned int type;
  139. unsigned int ncmds;
  140. struct svc_i3c_cmd cmds[] __counted_by(ncmds);
  141. };
  142. struct svc_i3c_regs_save {
  143. u32 mconfig;
  144. u32 mdynaddr;
  145. };
  146. /**
  147. * struct svc_i3c_master - Silvaco I3C Master structure
  148. * @base: I3C master controller
  149. * @dev: Corresponding device
  150. * @regs: Memory mapping
  151. * @saved_regs: Volatile values for PM operations
  152. * @free_slots: Bit array of available slots
  153. * @addrs: Array containing the dynamic addresses of each attached device
  154. * @descs: Array of descriptors, one per attached device
  155. * @hj_work: Hot-join work
  156. * @ibi_work: IBI work
  157. * @irq: Main interrupt
  158. * @pclk: System clock
  159. * @fclk: Fast clock (bus)
  160. * @sclk: Slow clock (other events)
  161. * @xferqueue: Transfer queue structure
  162. * @xferqueue.list: List member
  163. * @xferqueue.cur: Current ongoing transfer
  164. * @xferqueue.lock: Queue lock
  165. * @ibi: IBI structure
  166. * @ibi.num_slots: Number of slots available in @ibi.slots
  167. * @ibi.slots: Available IBI slots
  168. * @ibi.tbq_slot: To be queued IBI slot
  169. * @ibi.lock: IBI lock
  170. * @lock: Transfer lock, protect between IBI work thread and callbacks from master
  171. * @enabled_events: Bit masks for enable events (IBI, HotJoin).
  172. * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
  173. */
  174. struct svc_i3c_master {
  175. struct i3c_master_controller base;
  176. struct device *dev;
  177. void __iomem *regs;
  178. struct svc_i3c_regs_save saved_regs;
  179. u32 free_slots;
  180. u8 addrs[SVC_I3C_MAX_DEVS];
  181. struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
  182. struct work_struct hj_work;
  183. struct work_struct ibi_work;
  184. int irq;
  185. struct clk *pclk;
  186. struct clk *fclk;
  187. struct clk *sclk;
  188. struct {
  189. struct list_head list;
  190. struct svc_i3c_xfer *cur;
  191. /* Prevent races between transfers */
  192. spinlock_t lock;
  193. } xferqueue;
  194. struct {
  195. unsigned int num_slots;
  196. struct i3c_dev_desc **slots;
  197. struct i3c_ibi_slot *tbq_slot;
  198. /* Prevent races within IBI handlers */
  199. spinlock_t lock;
  200. } ibi;
  201. struct mutex lock;
  202. u32 enabled_events;
  203. u32 mctrl_config;
  204. };
  205. /**
  206. * struct svc_i3c_i2c_dev_data - Device specific data
  207. * @index: Index in the master tables corresponding to this device
  208. * @ibi: IBI slot index in the master structure
  209. * @ibi_pool: IBI pool associated to this device
  210. */
  211. struct svc_i3c_i2c_dev_data {
  212. u8 index;
  213. int ibi;
  214. struct i3c_generic_ibi_pool *ibi_pool;
  215. };
  216. static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
  217. {
  218. return !!(master->enabled_events & mask);
  219. }
  220. static bool svc_i3c_master_error(struct svc_i3c_master *master)
  221. {
  222. u32 mstatus, merrwarn;
  223. mstatus = readl(master->regs + SVC_I3C_MSTATUS);
  224. if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
  225. merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
  226. writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
  227. /* Ignore timeout error */
  228. if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
  229. dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
  230. mstatus, merrwarn);
  231. return false;
  232. }
  233. dev_err(master->dev,
  234. "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
  235. mstatus, merrwarn);
  236. return true;
  237. }
  238. return false;
  239. }
  240. static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
  241. {
  242. writel(mask, master->regs + SVC_I3C_MINTSET);
  243. }
  244. static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
  245. {
  246. u32 mask = readl(master->regs + SVC_I3C_MINTSET);
  247. writel(mask, master->regs + SVC_I3C_MINTCLR);
  248. }
  249. static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
  250. {
  251. /* Clear pending warnings */
  252. writel(readl(master->regs + SVC_I3C_MERRWARN),
  253. master->regs + SVC_I3C_MERRWARN);
  254. }
  255. static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
  256. {
  257. /* Flush FIFOs */
  258. writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
  259. master->regs + SVC_I3C_MDATACTRL);
  260. }
  261. static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
  262. {
  263. u32 reg;
  264. /* Set RX and TX tigger levels, flush FIFOs */
  265. reg = SVC_I3C_MDATACTRL_FLUSHTB |
  266. SVC_I3C_MDATACTRL_FLUSHRB |
  267. SVC_I3C_MDATACTRL_UNLOCK_TRIG |
  268. SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
  269. SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
  270. writel(reg, master->regs + SVC_I3C_MDATACTRL);
  271. }
  272. static void svc_i3c_master_reset(struct svc_i3c_master *master)
  273. {
  274. svc_i3c_master_clear_merrwarn(master);
  275. svc_i3c_master_reset_fifo_trigger(master);
  276. svc_i3c_master_disable_interrupts(master);
  277. }
  278. static inline struct svc_i3c_master *
  279. to_svc_i3c_master(struct i3c_master_controller *master)
  280. {
  281. return container_of(master, struct svc_i3c_master, base);
  282. }
  283. static void svc_i3c_master_hj_work(struct work_struct *work)
  284. {
  285. struct svc_i3c_master *master;
  286. master = container_of(work, struct svc_i3c_master, hj_work);
  287. i3c_master_do_daa(&master->base);
  288. }
  289. static struct i3c_dev_desc *
  290. svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
  291. unsigned int ibiaddr)
  292. {
  293. int i;
  294. for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
  295. if (master->addrs[i] == ibiaddr)
  296. break;
  297. if (i == SVC_I3C_MAX_DEVS)
  298. return NULL;
  299. return master->descs[i];
  300. }
  301. static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
  302. {
  303. writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
  304. /*
  305. * This delay is necessary after the emission of a stop, otherwise eg.
  306. * repeating IBIs do not get detected. There is a note in the manual
  307. * about it, stating that the stop condition might not be settled
  308. * correctly if a start condition follows too rapidly.
  309. */
  310. udelay(1);
  311. }
  312. static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
  313. struct i3c_dev_desc *dev)
  314. {
  315. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  316. struct i3c_ibi_slot *slot;
  317. unsigned int count;
  318. u32 mdatactrl;
  319. int ret, val;
  320. u8 *buf;
  321. slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
  322. if (!slot)
  323. return -ENOSPC;
  324. slot->len = 0;
  325. buf = slot->data;
  326. ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
  327. SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
  328. if (ret) {
  329. dev_err(master->dev, "Timeout when polling for COMPLETE\n");
  330. return ret;
  331. }
  332. while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
  333. slot->len < SVC_I3C_FIFO_SIZE) {
  334. mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
  335. count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
  336. readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
  337. slot->len += count;
  338. buf += count;
  339. }
  340. master->ibi.tbq_slot = slot;
  341. return 0;
  342. }
  343. static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
  344. bool mandatory_byte)
  345. {
  346. unsigned int ibi_ack_nack;
  347. ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
  348. if (mandatory_byte)
  349. ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
  350. else
  351. ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
  352. writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
  353. }
  354. static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
  355. {
  356. writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
  357. SVC_I3C_MCTRL_IBIRESP_NACK,
  358. master->regs + SVC_I3C_MCTRL);
  359. }
  360. static void svc_i3c_master_ibi_work(struct work_struct *work)
  361. {
  362. struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
  363. struct svc_i3c_i2c_dev_data *data;
  364. unsigned int ibitype, ibiaddr;
  365. struct i3c_dev_desc *dev;
  366. u32 status, val;
  367. int ret;
  368. mutex_lock(&master->lock);
  369. /*
  370. * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
  371. * readl_relaxed_poll_timeout() to return immediately. Consequently,
  372. * ibitype will be 0 since it was last updated only after the 8th SCL
  373. * cycle, leading to missed client IBI handlers.
  374. *
  375. * A typical scenario is when IBIWON occurs and bus arbitration is lost
  376. * at svc_i3c_master_priv_xfers().
  377. *
  378. * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
  379. */
  380. writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
  381. /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
  382. writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
  383. SVC_I3C_MCTRL_IBIRESP_AUTO,
  384. master->regs + SVC_I3C_MCTRL);
  385. /* Wait for IBIWON, should take approximately 100us */
  386. ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
  387. SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
  388. if (ret) {
  389. dev_err(master->dev, "Timeout when polling for IBIWON\n");
  390. svc_i3c_master_emit_stop(master);
  391. goto reenable_ibis;
  392. }
  393. status = readl(master->regs + SVC_I3C_MSTATUS);
  394. ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
  395. ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
  396. /* Handle the critical responses to IBI's */
  397. switch (ibitype) {
  398. case SVC_I3C_MSTATUS_IBITYPE_IBI:
  399. dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
  400. if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
  401. svc_i3c_master_nack_ibi(master);
  402. else
  403. svc_i3c_master_handle_ibi(master, dev);
  404. break;
  405. case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
  406. if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
  407. svc_i3c_master_ack_ibi(master, false);
  408. else
  409. svc_i3c_master_nack_ibi(master);
  410. break;
  411. case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
  412. svc_i3c_master_nack_ibi(master);
  413. break;
  414. default:
  415. break;
  416. }
  417. /*
  418. * If an error happened, we probably got interrupted and the exchange
  419. * timedout. In this case we just drop everything, emit a stop and wait
  420. * for the slave to interrupt again.
  421. */
  422. if (svc_i3c_master_error(master)) {
  423. if (master->ibi.tbq_slot) {
  424. data = i3c_dev_get_master_data(dev);
  425. i3c_generic_ibi_recycle_slot(data->ibi_pool,
  426. master->ibi.tbq_slot);
  427. master->ibi.tbq_slot = NULL;
  428. }
  429. svc_i3c_master_emit_stop(master);
  430. goto reenable_ibis;
  431. }
  432. /* Handle the non critical tasks */
  433. switch (ibitype) {
  434. case SVC_I3C_MSTATUS_IBITYPE_IBI:
  435. if (dev) {
  436. i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
  437. master->ibi.tbq_slot = NULL;
  438. }
  439. svc_i3c_master_emit_stop(master);
  440. break;
  441. case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
  442. svc_i3c_master_emit_stop(master);
  443. if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
  444. queue_work(master->base.wq, &master->hj_work);
  445. break;
  446. case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
  447. default:
  448. break;
  449. }
  450. reenable_ibis:
  451. svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
  452. mutex_unlock(&master->lock);
  453. }
  454. static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
  455. {
  456. struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
  457. u32 active = readl(master->regs + SVC_I3C_MSTATUS);
  458. if (!SVC_I3C_MSTATUS_SLVSTART(active))
  459. return IRQ_NONE;
  460. /* Clear the interrupt status */
  461. writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
  462. svc_i3c_master_disable_interrupts(master);
  463. /* Handle the interrupt in a non atomic context */
  464. queue_work(master->base.wq, &master->ibi_work);
  465. return IRQ_HANDLED;
  466. }
  467. static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
  468. enum i3c_open_drain_speed speed)
  469. {
  470. struct svc_i3c_master *master = to_svc_i3c_master(m);
  471. struct i3c_bus *bus = i3c_master_get_bus(&master->base);
  472. u32 ppbaud, odbaud, odhpp, mconfig;
  473. unsigned long fclk_rate;
  474. int ret;
  475. ret = pm_runtime_resume_and_get(master->dev);
  476. if (ret < 0) {
  477. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  478. return ret;
  479. }
  480. switch (speed) {
  481. case I3C_OPEN_DRAIN_SLOW_SPEED:
  482. fclk_rate = clk_get_rate(master->fclk);
  483. if (!fclk_rate) {
  484. ret = -EINVAL;
  485. goto rpm_out;
  486. }
  487. /*
  488. * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
  489. * broadcast address is visible to all I2C/I3C devices on the I3C bus.
  490. * I3C device working as a I2C device will turn off its 50ns Spike
  491. * Filter to change to I3C mode.
  492. */
  493. mconfig = master->mctrl_config;
  494. ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
  495. odhpp = 0;
  496. odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
  497. mconfig &= ~GENMASK(24, 16);
  498. mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
  499. writel(mconfig, master->regs + SVC_I3C_MCONFIG);
  500. break;
  501. case I3C_OPEN_DRAIN_NORMAL_SPEED:
  502. writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
  503. break;
  504. }
  505. rpm_out:
  506. pm_runtime_mark_last_busy(master->dev);
  507. pm_runtime_put_autosuspend(master->dev);
  508. return ret;
  509. }
  510. static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
  511. {
  512. struct svc_i3c_master *master = to_svc_i3c_master(m);
  513. struct i3c_bus *bus = i3c_master_get_bus(m);
  514. struct i3c_device_info info = {};
  515. unsigned long fclk_rate, fclk_period_ns;
  516. unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
  517. unsigned int high_period_ns, od_low_period_ns;
  518. u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
  519. int ret;
  520. ret = pm_runtime_resume_and_get(master->dev);
  521. if (ret < 0) {
  522. dev_err(master->dev,
  523. "<%s> cannot resume i3c bus master, err: %d\n",
  524. __func__, ret);
  525. return ret;
  526. }
  527. /* Timings derivation */
  528. fclk_rate = clk_get_rate(master->fclk);
  529. if (!fclk_rate) {
  530. ret = -EINVAL;
  531. goto rpm_out;
  532. }
  533. fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
  534. i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
  535. i2c_scl_rate = bus->scl_rate.i2c;
  536. i3c_scl_rate = bus->scl_rate.i3c;
  537. /*
  538. * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
  539. * Simplest configuration is using a 50% duty-cycle of 40ns.
  540. */
  541. ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
  542. pplow = 0;
  543. /*
  544. * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
  545. * duty-cycle tuned so that high levels are filetered out by
  546. * the 50ns filter (target being 40ns).
  547. */
  548. odhpp = 1;
  549. high_period_ns = (ppbaud + 1) * fclk_period_ns;
  550. odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
  551. od_low_period_ns = (odbaud + 1) * high_period_ns;
  552. switch (bus->mode) {
  553. case I3C_BUS_MODE_PURE:
  554. i2cbaud = 0;
  555. odstop = 0;
  556. break;
  557. case I3C_BUS_MODE_MIXED_FAST:
  558. /*
  559. * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
  560. * between the high and low period does not really matter.
  561. */
  562. i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
  563. odstop = 1;
  564. break;
  565. case I3C_BUS_MODE_MIXED_LIMITED:
  566. case I3C_BUS_MODE_MIXED_SLOW:
  567. /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
  568. if (ppbaud > SVC_I3C_PPBAUD_MAX) {
  569. ppbaud = SVC_I3C_PPBAUD_MAX;
  570. pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
  571. }
  572. high_period_ns = (ppbaud + 1) * fclk_period_ns;
  573. odhpp = 0;
  574. odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
  575. od_low_period_ns = (odbaud + 1) * high_period_ns;
  576. i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
  577. odstop = 1;
  578. break;
  579. default:
  580. goto rpm_out;
  581. }
  582. reg = SVC_I3C_MCONFIG_MASTER_EN |
  583. SVC_I3C_MCONFIG_DISTO(0) |
  584. SVC_I3C_MCONFIG_HKEEP(0) |
  585. SVC_I3C_MCONFIG_ODSTOP(odstop) |
  586. SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
  587. SVC_I3C_MCONFIG_PPLOW(pplow) |
  588. SVC_I3C_MCONFIG_ODBAUD(odbaud) |
  589. SVC_I3C_MCONFIG_ODHPP(odhpp) |
  590. SVC_I3C_MCONFIG_SKEW(0) |
  591. SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
  592. writel(reg, master->regs + SVC_I3C_MCONFIG);
  593. master->mctrl_config = reg;
  594. /* Master core's registration */
  595. ret = i3c_master_get_free_addr(m, 0);
  596. if (ret < 0)
  597. goto rpm_out;
  598. info.dyn_addr = ret;
  599. writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
  600. master->regs + SVC_I3C_MDYNADDR);
  601. ret = i3c_master_set_info(&master->base, &info);
  602. if (ret)
  603. goto rpm_out;
  604. rpm_out:
  605. pm_runtime_mark_last_busy(master->dev);
  606. pm_runtime_put_autosuspend(master->dev);
  607. return ret;
  608. }
  609. static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
  610. {
  611. struct svc_i3c_master *master = to_svc_i3c_master(m);
  612. int ret;
  613. ret = pm_runtime_resume_and_get(master->dev);
  614. if (ret < 0) {
  615. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  616. return;
  617. }
  618. svc_i3c_master_disable_interrupts(master);
  619. /* Disable master */
  620. writel(0, master->regs + SVC_I3C_MCONFIG);
  621. pm_runtime_mark_last_busy(master->dev);
  622. pm_runtime_put_autosuspend(master->dev);
  623. }
  624. static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
  625. {
  626. unsigned int slot;
  627. if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
  628. return -ENOSPC;
  629. slot = ffs(master->free_slots) - 1;
  630. master->free_slots &= ~BIT(slot);
  631. return slot;
  632. }
  633. static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
  634. unsigned int slot)
  635. {
  636. master->free_slots |= BIT(slot);
  637. }
  638. static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
  639. {
  640. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  641. struct svc_i3c_master *master = to_svc_i3c_master(m);
  642. struct svc_i3c_i2c_dev_data *data;
  643. int slot;
  644. slot = svc_i3c_master_reserve_slot(master);
  645. if (slot < 0)
  646. return slot;
  647. data = kzalloc(sizeof(*data), GFP_KERNEL);
  648. if (!data) {
  649. svc_i3c_master_release_slot(master, slot);
  650. return -ENOMEM;
  651. }
  652. data->ibi = -1;
  653. data->index = slot;
  654. master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
  655. dev->info.static_addr;
  656. master->descs[slot] = dev;
  657. i3c_dev_set_master_data(dev, data);
  658. return 0;
  659. }
  660. static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
  661. u8 old_dyn_addr)
  662. {
  663. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  664. struct svc_i3c_master *master = to_svc_i3c_master(m);
  665. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  666. master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
  667. dev->info.static_addr;
  668. return 0;
  669. }
  670. static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
  671. {
  672. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  673. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  674. struct svc_i3c_master *master = to_svc_i3c_master(m);
  675. master->addrs[data->index] = 0;
  676. svc_i3c_master_release_slot(master, data->index);
  677. kfree(data);
  678. }
  679. static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
  680. {
  681. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  682. struct svc_i3c_master *master = to_svc_i3c_master(m);
  683. struct svc_i3c_i2c_dev_data *data;
  684. int slot;
  685. slot = svc_i3c_master_reserve_slot(master);
  686. if (slot < 0)
  687. return slot;
  688. data = kzalloc(sizeof(*data), GFP_KERNEL);
  689. if (!data) {
  690. svc_i3c_master_release_slot(master, slot);
  691. return -ENOMEM;
  692. }
  693. data->index = slot;
  694. master->addrs[slot] = dev->addr;
  695. i2c_dev_set_master_data(dev, data);
  696. return 0;
  697. }
  698. static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
  699. {
  700. struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  701. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  702. struct svc_i3c_master *master = to_svc_i3c_master(m);
  703. svc_i3c_master_release_slot(master, data->index);
  704. kfree(data);
  705. }
  706. static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
  707. unsigned int len)
  708. {
  709. int ret, i;
  710. u32 reg;
  711. for (i = 0; i < len; i++) {
  712. ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
  713. reg,
  714. SVC_I3C_MSTATUS_RXPEND(reg),
  715. 0, 1000);
  716. if (ret)
  717. return ret;
  718. dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
  719. }
  720. return 0;
  721. }
  722. static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
  723. u8 *addrs, unsigned int *count)
  724. {
  725. u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
  726. unsigned int dev_nb = 0, last_addr = 0;
  727. u32 reg;
  728. int ret, i;
  729. while (true) {
  730. /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
  731. *
  732. * ENTER DAA:
  733. * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
  734. * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
  735. *
  736. * PROCESS DAA:
  737. * 1 The DA is written using MWDATAB or ADDR bits 6:0.
  738. * 2 ProcessDAA is requested again to write the new address, and then starts the
  739. * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
  740. * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
  741. * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
  742. * (along with DONE), and a STOP issued automatically.
  743. */
  744. writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
  745. SVC_I3C_MCTRL_TYPE_I3C |
  746. SVC_I3C_MCTRL_IBIRESP_NACK |
  747. SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
  748. master->regs + SVC_I3C_MCTRL);
  749. /*
  750. * Either one slave will send its ID, or the assignment process
  751. * is done.
  752. */
  753. ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
  754. reg,
  755. SVC_I3C_MSTATUS_RXPEND(reg) |
  756. SVC_I3C_MSTATUS_MCTRLDONE(reg),
  757. 1, 1000);
  758. if (ret)
  759. break;
  760. if (SVC_I3C_MSTATUS_RXPEND(reg)) {
  761. u8 data[6];
  762. /*
  763. * We only care about the 48-bit provisioned ID yet to
  764. * be sure a device does not nack an address twice.
  765. * Otherwise, we would just need to flush the RX FIFO.
  766. */
  767. ret = svc_i3c_master_readb(master, data, 6);
  768. if (ret)
  769. break;
  770. for (i = 0; i < 6; i++)
  771. prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
  772. /* We do not care about the BCR and DCR yet */
  773. ret = svc_i3c_master_readb(master, data, 2);
  774. if (ret)
  775. break;
  776. } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
  777. if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
  778. SVC_I3C_MSTATUS_COMPLETE(reg)) {
  779. /*
  780. * All devices received and acked they dynamic
  781. * address, this is the natural end of the DAA
  782. * procedure.
  783. *
  784. * Hardware will auto emit STOP at this case.
  785. */
  786. *count = dev_nb;
  787. return 0;
  788. } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
  789. /* No I3C devices attached */
  790. if (dev_nb == 0) {
  791. /*
  792. * Hardware can't treat first NACK for ENTAA as normal
  793. * COMPLETE. So need manual emit STOP.
  794. */
  795. ret = 0;
  796. *count = 0;
  797. break;
  798. }
  799. /*
  800. * A slave device nacked the address, this is
  801. * allowed only once, DAA will be stopped and
  802. * then resumed. The same device is supposed to
  803. * answer again immediately and shall ack the
  804. * address this time.
  805. */
  806. if (prov_id[dev_nb] == nacking_prov_id) {
  807. ret = -EIO;
  808. break;
  809. }
  810. dev_nb--;
  811. nacking_prov_id = prov_id[dev_nb];
  812. svc_i3c_master_emit_stop(master);
  813. continue;
  814. } else {
  815. break;
  816. }
  817. }
  818. /* Wait for the slave to be ready to receive its address */
  819. ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
  820. reg,
  821. SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
  822. SVC_I3C_MSTATUS_STATE_DAA(reg) &&
  823. SVC_I3C_MSTATUS_BETWEEN(reg),
  824. 0, 1000);
  825. if (ret)
  826. break;
  827. /* Give the slave device a suitable dynamic address */
  828. ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
  829. if (ret < 0)
  830. break;
  831. addrs[dev_nb] = ret;
  832. dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
  833. dev_nb, addrs[dev_nb]);
  834. writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
  835. last_addr = addrs[dev_nb++];
  836. }
  837. /* Need manual issue STOP except for Complete condition */
  838. svc_i3c_master_emit_stop(master);
  839. return ret;
  840. }
  841. static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
  842. {
  843. struct i3c_dev_desc *dev;
  844. u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
  845. unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
  846. nobyte_addr_ko = 0;
  847. bool list_mbyte = false, list_nobyte = false;
  848. /* Create the IBIRULES register for both cases */
  849. i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
  850. if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
  851. continue;
  852. if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
  853. reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
  854. dev->info.dyn_addr);
  855. /* IBI rules cannot be applied to devices with MSb=1 */
  856. if (dev->info.dyn_addr & BIT(7))
  857. mbyte_addr_ko++;
  858. else
  859. mbyte_addr_ok++;
  860. } else {
  861. reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
  862. dev->info.dyn_addr);
  863. /* IBI rules cannot be applied to devices with MSb=1 */
  864. if (dev->info.dyn_addr & BIT(7))
  865. nobyte_addr_ko++;
  866. else
  867. nobyte_addr_ok++;
  868. }
  869. }
  870. /* Device list cannot be handled by hardware */
  871. if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
  872. list_mbyte = true;
  873. if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
  874. list_nobyte = true;
  875. /* No list can be properly handled, return an error */
  876. if (!list_mbyte && !list_nobyte)
  877. return -ERANGE;
  878. /* Pick the first list that can be handled by hardware, randomly */
  879. if (list_mbyte)
  880. writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
  881. else
  882. writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
  883. return 0;
  884. }
  885. static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
  886. {
  887. struct svc_i3c_master *master = to_svc_i3c_master(m);
  888. u8 addrs[SVC_I3C_MAX_DEVS];
  889. unsigned long flags;
  890. unsigned int dev_nb;
  891. int ret, i;
  892. ret = pm_runtime_resume_and_get(master->dev);
  893. if (ret < 0) {
  894. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  895. return ret;
  896. }
  897. spin_lock_irqsave(&master->xferqueue.lock, flags);
  898. ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
  899. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  900. svc_i3c_master_clear_merrwarn(master);
  901. if (ret)
  902. goto rpm_out;
  903. /*
  904. * Register all devices who participated to the core
  905. *
  906. * If two devices (A and B) are detected in DAA and address 0xa is assigned to
  907. * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
  908. * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
  909. * registered on the bus. The I3C stack might still consider 0xb a free
  910. * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
  911. * causing both devices A and B to use the same address 0xb, violating the I3C
  912. * specification.
  913. *
  914. * The return value for i3c_master_add_i3c_dev_locked() should not be checked
  915. * because subsequent steps will scan the entire I3C bus, independent of
  916. * whether i3c_master_add_i3c_dev_locked() returns success.
  917. *
  918. * If device A registration fails, there is still a chance to register device
  919. * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
  920. * retrieving device information.
  921. */
  922. for (i = 0; i < dev_nb; i++)
  923. i3c_master_add_i3c_dev_locked(m, addrs[i]);
  924. /* Configure IBI auto-rules */
  925. ret = svc_i3c_update_ibirules(master);
  926. if (ret)
  927. dev_err(master->dev, "Cannot handle such a list of devices");
  928. rpm_out:
  929. pm_runtime_mark_last_busy(master->dev);
  930. pm_runtime_put_autosuspend(master->dev);
  931. return ret;
  932. }
  933. static int svc_i3c_master_read(struct svc_i3c_master *master,
  934. u8 *in, unsigned int len)
  935. {
  936. int offset = 0, i;
  937. u32 mdctrl, mstatus;
  938. bool completed = false;
  939. unsigned int count;
  940. unsigned long start = jiffies;
  941. while (!completed) {
  942. mstatus = readl(master->regs + SVC_I3C_MSTATUS);
  943. if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
  944. completed = true;
  945. if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
  946. dev_dbg(master->dev, "I3C read timeout\n");
  947. return -ETIMEDOUT;
  948. }
  949. mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
  950. count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
  951. if (offset + count > len) {
  952. dev_err(master->dev, "I3C receive length too long!\n");
  953. return -EINVAL;
  954. }
  955. for (i = 0; i < count; i++)
  956. in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
  957. offset += count;
  958. }
  959. return offset;
  960. }
  961. static int svc_i3c_master_write(struct svc_i3c_master *master,
  962. const u8 *out, unsigned int len)
  963. {
  964. int offset = 0, ret;
  965. u32 mdctrl;
  966. while (offset < len) {
  967. ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
  968. mdctrl,
  969. !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
  970. 0, 1000);
  971. if (ret)
  972. return ret;
  973. /*
  974. * The last byte to be sent over the bus must either have the
  975. * "end" bit set or be written in MWDATABE.
  976. */
  977. if (likely(offset < (len - 1)))
  978. writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
  979. else
  980. writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
  981. }
  982. return 0;
  983. }
  984. static int svc_i3c_master_xfer(struct svc_i3c_master *master,
  985. bool rnw, unsigned int xfer_type, u8 addr,
  986. u8 *in, const u8 *out, unsigned int xfer_len,
  987. unsigned int *actual_len, bool continued)
  988. {
  989. int retry = 2;
  990. u32 reg;
  991. int ret;
  992. /* clean SVC_I3C_MINT_IBIWON w1c bits */
  993. writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
  994. while (retry--) {
  995. writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
  996. xfer_type |
  997. SVC_I3C_MCTRL_IBIRESP_NACK |
  998. SVC_I3C_MCTRL_DIR(rnw) |
  999. SVC_I3C_MCTRL_ADDR(addr) |
  1000. SVC_I3C_MCTRL_RDTERM(*actual_len),
  1001. master->regs + SVC_I3C_MCTRL);
  1002. ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
  1003. SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
  1004. if (ret)
  1005. goto emit_stop;
  1006. if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
  1007. /*
  1008. * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
  1009. * If the Controller chooses to start an I3C Message with an I3C Dynamic
  1010. * Address, then special provisions shall be made because that same I3C
  1011. * Target may be initiating an IBI or a Controller Role Request. So, one of
  1012. * three things may happen: (skip 1, 2)
  1013. *
  1014. * 3. The Addresses match and the RnW bits also match, and so neither
  1015. * Controller nor Target will ACK since both are expecting the other side to
  1016. * provide ACK. As a result, each side might think it had "won" arbitration,
  1017. * but neither side would continue, as each would subsequently see that the
  1018. * other did not provide ACK.
  1019. * ...
  1020. * For either value of RnW: Due to the NACK, the Controller shall defer the
  1021. * Private Write or Private Read, and should typically transmit the Target
  1022. * Address again after a Repeated START (i.e., the next one or any one prior
  1023. * to a STOP in the Frame). Since the Address Header following a Repeated
  1024. * START is not arbitrated, the Controller will always win (see Section
  1025. * 5.1.2.2.4).
  1026. */
  1027. if (retry && addr != 0x7e) {
  1028. writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
  1029. } else {
  1030. ret = -ENXIO;
  1031. *actual_len = 0;
  1032. goto emit_stop;
  1033. }
  1034. } else {
  1035. break;
  1036. }
  1037. }
  1038. /*
  1039. * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
  1040. * with I3C Target Address.
  1041. *
  1042. * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
  1043. * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
  1044. * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
  1045. * a Hot-Join Request has been made.
  1046. *
  1047. * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
  1048. * and yield the above events handler.
  1049. */
  1050. if (SVC_I3C_MSTATUS_IBIWON(reg)) {
  1051. ret = -EAGAIN;
  1052. *actual_len = 0;
  1053. goto emit_stop;
  1054. }
  1055. if (rnw)
  1056. ret = svc_i3c_master_read(master, in, xfer_len);
  1057. else
  1058. ret = svc_i3c_master_write(master, out, xfer_len);
  1059. if (ret < 0)
  1060. goto emit_stop;
  1061. if (rnw)
  1062. *actual_len = ret;
  1063. ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
  1064. SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
  1065. if (ret)
  1066. goto emit_stop;
  1067. writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
  1068. if (!continued) {
  1069. svc_i3c_master_emit_stop(master);
  1070. /* Wait idle if stop is sent. */
  1071. readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
  1072. SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
  1073. }
  1074. return 0;
  1075. emit_stop:
  1076. svc_i3c_master_emit_stop(master);
  1077. svc_i3c_master_clear_merrwarn(master);
  1078. return ret;
  1079. }
  1080. static struct svc_i3c_xfer *
  1081. svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
  1082. {
  1083. struct svc_i3c_xfer *xfer;
  1084. xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
  1085. if (!xfer)
  1086. return NULL;
  1087. INIT_LIST_HEAD(&xfer->node);
  1088. xfer->ncmds = ncmds;
  1089. xfer->ret = -ETIMEDOUT;
  1090. return xfer;
  1091. }
  1092. static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
  1093. {
  1094. kfree(xfer);
  1095. }
  1096. static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
  1097. struct svc_i3c_xfer *xfer)
  1098. {
  1099. if (master->xferqueue.cur == xfer)
  1100. master->xferqueue.cur = NULL;
  1101. else
  1102. list_del_init(&xfer->node);
  1103. }
  1104. static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
  1105. struct svc_i3c_xfer *xfer)
  1106. {
  1107. unsigned long flags;
  1108. spin_lock_irqsave(&master->xferqueue.lock, flags);
  1109. svc_i3c_master_dequeue_xfer_locked(master, xfer);
  1110. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  1111. }
  1112. static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
  1113. {
  1114. struct svc_i3c_xfer *xfer = master->xferqueue.cur;
  1115. int ret, i;
  1116. if (!xfer)
  1117. return;
  1118. svc_i3c_master_clear_merrwarn(master);
  1119. svc_i3c_master_flush_fifo(master);
  1120. for (i = 0; i < xfer->ncmds; i++) {
  1121. struct svc_i3c_cmd *cmd = &xfer->cmds[i];
  1122. ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
  1123. cmd->addr, cmd->in, cmd->out,
  1124. cmd->len, &cmd->actual_len,
  1125. cmd->continued);
  1126. /* cmd->xfer is NULL if I2C or CCC transfer */
  1127. if (cmd->xfer)
  1128. cmd->xfer->actual_len = cmd->actual_len;
  1129. if (ret)
  1130. break;
  1131. }
  1132. xfer->ret = ret;
  1133. complete(&xfer->comp);
  1134. if (ret < 0)
  1135. svc_i3c_master_dequeue_xfer_locked(master, xfer);
  1136. xfer = list_first_entry_or_null(&master->xferqueue.list,
  1137. struct svc_i3c_xfer,
  1138. node);
  1139. if (xfer)
  1140. list_del_init(&xfer->node);
  1141. master->xferqueue.cur = xfer;
  1142. svc_i3c_master_start_xfer_locked(master);
  1143. }
  1144. static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
  1145. struct svc_i3c_xfer *xfer)
  1146. {
  1147. unsigned long flags;
  1148. int ret;
  1149. ret = pm_runtime_resume_and_get(master->dev);
  1150. if (ret < 0) {
  1151. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  1152. return;
  1153. }
  1154. init_completion(&xfer->comp);
  1155. spin_lock_irqsave(&master->xferqueue.lock, flags);
  1156. if (master->xferqueue.cur) {
  1157. list_add_tail(&xfer->node, &master->xferqueue.list);
  1158. } else {
  1159. master->xferqueue.cur = xfer;
  1160. svc_i3c_master_start_xfer_locked(master);
  1161. }
  1162. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  1163. pm_runtime_mark_last_busy(master->dev);
  1164. pm_runtime_put_autosuspend(master->dev);
  1165. }
  1166. static bool
  1167. svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
  1168. const struct i3c_ccc_cmd *cmd)
  1169. {
  1170. /* No software support for CCC commands targeting more than one slave */
  1171. return (cmd->ndests == 1);
  1172. }
  1173. static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
  1174. struct i3c_ccc_cmd *ccc)
  1175. {
  1176. unsigned int xfer_len = ccc->dests[0].payload.len + 1;
  1177. struct svc_i3c_xfer *xfer;
  1178. struct svc_i3c_cmd *cmd;
  1179. u8 *buf;
  1180. int ret;
  1181. xfer = svc_i3c_master_alloc_xfer(master, 1);
  1182. if (!xfer)
  1183. return -ENOMEM;
  1184. buf = kmalloc(xfer_len, GFP_KERNEL);
  1185. if (!buf) {
  1186. svc_i3c_master_free_xfer(xfer);
  1187. return -ENOMEM;
  1188. }
  1189. buf[0] = ccc->id;
  1190. memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
  1191. xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
  1192. cmd = &xfer->cmds[0];
  1193. cmd->addr = ccc->dests[0].addr;
  1194. cmd->rnw = ccc->rnw;
  1195. cmd->in = NULL;
  1196. cmd->out = buf;
  1197. cmd->len = xfer_len;
  1198. cmd->actual_len = 0;
  1199. cmd->continued = false;
  1200. mutex_lock(&master->lock);
  1201. svc_i3c_master_enqueue_xfer(master, xfer);
  1202. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  1203. svc_i3c_master_dequeue_xfer(master, xfer);
  1204. mutex_unlock(&master->lock);
  1205. ret = xfer->ret;
  1206. kfree(buf);
  1207. svc_i3c_master_free_xfer(xfer);
  1208. return ret;
  1209. }
  1210. static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
  1211. struct i3c_ccc_cmd *ccc)
  1212. {
  1213. unsigned int xfer_len = ccc->dests[0].payload.len;
  1214. unsigned int actual_len = ccc->rnw ? xfer_len : 0;
  1215. struct svc_i3c_xfer *xfer;
  1216. struct svc_i3c_cmd *cmd;
  1217. int ret;
  1218. xfer = svc_i3c_master_alloc_xfer(master, 2);
  1219. if (!xfer)
  1220. return -ENOMEM;
  1221. xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
  1222. /* Broadcasted message */
  1223. cmd = &xfer->cmds[0];
  1224. cmd->addr = I3C_BROADCAST_ADDR;
  1225. cmd->rnw = 0;
  1226. cmd->in = NULL;
  1227. cmd->out = &ccc->id;
  1228. cmd->len = 1;
  1229. cmd->actual_len = 0;
  1230. cmd->continued = true;
  1231. /* Directed message */
  1232. cmd = &xfer->cmds[1];
  1233. cmd->addr = ccc->dests[0].addr;
  1234. cmd->rnw = ccc->rnw;
  1235. cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
  1236. cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
  1237. cmd->len = xfer_len;
  1238. cmd->actual_len = actual_len;
  1239. cmd->continued = false;
  1240. mutex_lock(&master->lock);
  1241. svc_i3c_master_enqueue_xfer(master, xfer);
  1242. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  1243. svc_i3c_master_dequeue_xfer(master, xfer);
  1244. mutex_unlock(&master->lock);
  1245. if (cmd->actual_len != xfer_len)
  1246. ccc->dests[0].payload.len = cmd->actual_len;
  1247. ret = xfer->ret;
  1248. svc_i3c_master_free_xfer(xfer);
  1249. return ret;
  1250. }
  1251. static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
  1252. struct i3c_ccc_cmd *cmd)
  1253. {
  1254. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1255. bool broadcast = cmd->id < 0x80;
  1256. int ret;
  1257. if (broadcast)
  1258. ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
  1259. else
  1260. ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
  1261. if (ret)
  1262. cmd->err = I3C_ERROR_M2;
  1263. return ret;
  1264. }
  1265. static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
  1266. struct i3c_priv_xfer *xfers,
  1267. int nxfers)
  1268. {
  1269. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1270. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1271. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1272. struct svc_i3c_xfer *xfer;
  1273. int ret, i;
  1274. xfer = svc_i3c_master_alloc_xfer(master, nxfers);
  1275. if (!xfer)
  1276. return -ENOMEM;
  1277. xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
  1278. for (i = 0; i < nxfers; i++) {
  1279. struct svc_i3c_cmd *cmd = &xfer->cmds[i];
  1280. cmd->xfer = &xfers[i];
  1281. cmd->addr = master->addrs[data->index];
  1282. cmd->rnw = xfers[i].rnw;
  1283. cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
  1284. cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
  1285. cmd->len = xfers[i].len;
  1286. cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
  1287. cmd->continued = (i + 1) < nxfers;
  1288. }
  1289. mutex_lock(&master->lock);
  1290. svc_i3c_master_enqueue_xfer(master, xfer);
  1291. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  1292. svc_i3c_master_dequeue_xfer(master, xfer);
  1293. mutex_unlock(&master->lock);
  1294. ret = xfer->ret;
  1295. svc_i3c_master_free_xfer(xfer);
  1296. return ret;
  1297. }
  1298. static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
  1299. const struct i2c_msg *xfers,
  1300. int nxfers)
  1301. {
  1302. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  1303. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1304. struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  1305. struct svc_i3c_xfer *xfer;
  1306. int ret, i;
  1307. xfer = svc_i3c_master_alloc_xfer(master, nxfers);
  1308. if (!xfer)
  1309. return -ENOMEM;
  1310. xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
  1311. for (i = 0; i < nxfers; i++) {
  1312. struct svc_i3c_cmd *cmd = &xfer->cmds[i];
  1313. cmd->addr = master->addrs[data->index];
  1314. cmd->rnw = xfers[i].flags & I2C_M_RD;
  1315. cmd->in = cmd->rnw ? xfers[i].buf : NULL;
  1316. cmd->out = cmd->rnw ? NULL : xfers[i].buf;
  1317. cmd->len = xfers[i].len;
  1318. cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
  1319. cmd->continued = (i + 1 < nxfers);
  1320. }
  1321. mutex_lock(&master->lock);
  1322. svc_i3c_master_enqueue_xfer(master, xfer);
  1323. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  1324. svc_i3c_master_dequeue_xfer(master, xfer);
  1325. mutex_unlock(&master->lock);
  1326. ret = xfer->ret;
  1327. svc_i3c_master_free_xfer(xfer);
  1328. return ret;
  1329. }
  1330. static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
  1331. const struct i3c_ibi_setup *req)
  1332. {
  1333. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1334. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1335. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1336. unsigned long flags;
  1337. unsigned int i;
  1338. if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
  1339. dev_err(master->dev, "IBI max payload %d should be < %d\n",
  1340. dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
  1341. return -ERANGE;
  1342. }
  1343. data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
  1344. if (IS_ERR(data->ibi_pool))
  1345. return PTR_ERR(data->ibi_pool);
  1346. spin_lock_irqsave(&master->ibi.lock, flags);
  1347. for (i = 0; i < master->ibi.num_slots; i++) {
  1348. if (!master->ibi.slots[i]) {
  1349. data->ibi = i;
  1350. master->ibi.slots[i] = dev;
  1351. break;
  1352. }
  1353. }
  1354. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1355. if (i < master->ibi.num_slots)
  1356. return 0;
  1357. i3c_generic_ibi_free_pool(data->ibi_pool);
  1358. data->ibi_pool = NULL;
  1359. return -ENOSPC;
  1360. }
  1361. static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
  1362. {
  1363. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1364. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1365. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1366. unsigned long flags;
  1367. spin_lock_irqsave(&master->ibi.lock, flags);
  1368. master->ibi.slots[data->ibi] = NULL;
  1369. data->ibi = -1;
  1370. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1371. i3c_generic_ibi_free_pool(data->ibi_pool);
  1372. }
  1373. static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
  1374. {
  1375. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1376. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1377. int ret;
  1378. ret = pm_runtime_resume_and_get(master->dev);
  1379. if (ret < 0) {
  1380. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  1381. return ret;
  1382. }
  1383. master->enabled_events++;
  1384. svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
  1385. return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
  1386. }
  1387. static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
  1388. {
  1389. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1390. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1391. int ret;
  1392. master->enabled_events--;
  1393. if (!master->enabled_events)
  1394. svc_i3c_master_disable_interrupts(master);
  1395. ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
  1396. pm_runtime_mark_last_busy(master->dev);
  1397. pm_runtime_put_autosuspend(master->dev);
  1398. return ret;
  1399. }
  1400. static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
  1401. {
  1402. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1403. int ret;
  1404. ret = pm_runtime_resume_and_get(master->dev);
  1405. if (ret < 0) {
  1406. dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
  1407. return ret;
  1408. }
  1409. master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
  1410. svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
  1411. return 0;
  1412. }
  1413. static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
  1414. {
  1415. struct svc_i3c_master *master = to_svc_i3c_master(m);
  1416. master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
  1417. if (!master->enabled_events)
  1418. svc_i3c_master_disable_interrupts(master);
  1419. pm_runtime_mark_last_busy(master->dev);
  1420. pm_runtime_put_autosuspend(master->dev);
  1421. return 0;
  1422. }
  1423. static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
  1424. struct i3c_ibi_slot *slot)
  1425. {
  1426. struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1427. i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
  1428. }
  1429. static const struct i3c_master_controller_ops svc_i3c_master_ops = {
  1430. .bus_init = svc_i3c_master_bus_init,
  1431. .bus_cleanup = svc_i3c_master_bus_cleanup,
  1432. .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
  1433. .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
  1434. .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
  1435. .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
  1436. .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
  1437. .do_daa = svc_i3c_master_do_daa,
  1438. .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
  1439. .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
  1440. .priv_xfers = svc_i3c_master_priv_xfers,
  1441. .i2c_xfers = svc_i3c_master_i2c_xfers,
  1442. .request_ibi = svc_i3c_master_request_ibi,
  1443. .free_ibi = svc_i3c_master_free_ibi,
  1444. .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
  1445. .enable_ibi = svc_i3c_master_enable_ibi,
  1446. .disable_ibi = svc_i3c_master_disable_ibi,
  1447. .enable_hotjoin = svc_i3c_master_enable_hotjoin,
  1448. .disable_hotjoin = svc_i3c_master_disable_hotjoin,
  1449. .set_speed = svc_i3c_master_set_speed,
  1450. };
  1451. static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
  1452. {
  1453. int ret = 0;
  1454. ret = clk_prepare_enable(master->pclk);
  1455. if (ret)
  1456. return ret;
  1457. ret = clk_prepare_enable(master->fclk);
  1458. if (ret) {
  1459. clk_disable_unprepare(master->pclk);
  1460. return ret;
  1461. }
  1462. ret = clk_prepare_enable(master->sclk);
  1463. if (ret) {
  1464. clk_disable_unprepare(master->pclk);
  1465. clk_disable_unprepare(master->fclk);
  1466. return ret;
  1467. }
  1468. return 0;
  1469. }
  1470. static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
  1471. {
  1472. clk_disable_unprepare(master->pclk);
  1473. clk_disable_unprepare(master->fclk);
  1474. clk_disable_unprepare(master->sclk);
  1475. }
  1476. static int svc_i3c_master_probe(struct platform_device *pdev)
  1477. {
  1478. struct device *dev = &pdev->dev;
  1479. struct svc_i3c_master *master;
  1480. int ret;
  1481. master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
  1482. if (!master)
  1483. return -ENOMEM;
  1484. master->regs = devm_platform_ioremap_resource(pdev, 0);
  1485. if (IS_ERR(master->regs))
  1486. return PTR_ERR(master->regs);
  1487. master->pclk = devm_clk_get(dev, "pclk");
  1488. if (IS_ERR(master->pclk))
  1489. return PTR_ERR(master->pclk);
  1490. master->fclk = devm_clk_get(dev, "fast_clk");
  1491. if (IS_ERR(master->fclk))
  1492. return PTR_ERR(master->fclk);
  1493. master->sclk = devm_clk_get(dev, "slow_clk");
  1494. if (IS_ERR(master->sclk))
  1495. return PTR_ERR(master->sclk);
  1496. master->irq = platform_get_irq(pdev, 0);
  1497. if (master->irq < 0)
  1498. return master->irq;
  1499. master->dev = dev;
  1500. ret = svc_i3c_master_prepare_clks(master);
  1501. if (ret)
  1502. return ret;
  1503. INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
  1504. INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
  1505. mutex_init(&master->lock);
  1506. ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
  1507. IRQF_NO_SUSPEND, "svc-i3c-irq", master);
  1508. if (ret)
  1509. goto err_disable_clks;
  1510. master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
  1511. spin_lock_init(&master->xferqueue.lock);
  1512. INIT_LIST_HEAD(&master->xferqueue.list);
  1513. spin_lock_init(&master->ibi.lock);
  1514. master->ibi.num_slots = SVC_I3C_MAX_DEVS;
  1515. master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
  1516. sizeof(*master->ibi.slots),
  1517. GFP_KERNEL);
  1518. if (!master->ibi.slots) {
  1519. ret = -ENOMEM;
  1520. goto err_disable_clks;
  1521. }
  1522. platform_set_drvdata(pdev, master);
  1523. pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
  1524. pm_runtime_use_autosuspend(&pdev->dev);
  1525. pm_runtime_get_noresume(&pdev->dev);
  1526. pm_runtime_set_active(&pdev->dev);
  1527. pm_runtime_enable(&pdev->dev);
  1528. svc_i3c_master_reset(master);
  1529. /* Register the master */
  1530. ret = i3c_master_register(&master->base, &pdev->dev,
  1531. &svc_i3c_master_ops, false);
  1532. if (ret)
  1533. goto rpm_disable;
  1534. pm_runtime_mark_last_busy(&pdev->dev);
  1535. pm_runtime_put_autosuspend(&pdev->dev);
  1536. return 0;
  1537. rpm_disable:
  1538. pm_runtime_dont_use_autosuspend(&pdev->dev);
  1539. pm_runtime_put_noidle(&pdev->dev);
  1540. pm_runtime_disable(&pdev->dev);
  1541. pm_runtime_set_suspended(&pdev->dev);
  1542. err_disable_clks:
  1543. svc_i3c_master_unprepare_clks(master);
  1544. return ret;
  1545. }
  1546. static void svc_i3c_master_remove(struct platform_device *pdev)
  1547. {
  1548. struct svc_i3c_master *master = platform_get_drvdata(pdev);
  1549. cancel_work_sync(&master->hj_work);
  1550. i3c_master_unregister(&master->base);
  1551. pm_runtime_dont_use_autosuspend(&pdev->dev);
  1552. pm_runtime_disable(&pdev->dev);
  1553. }
  1554. static void svc_i3c_save_regs(struct svc_i3c_master *master)
  1555. {
  1556. master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
  1557. master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
  1558. }
  1559. static void svc_i3c_restore_regs(struct svc_i3c_master *master)
  1560. {
  1561. if (readl(master->regs + SVC_I3C_MDYNADDR) !=
  1562. master->saved_regs.mdynaddr) {
  1563. writel(master->saved_regs.mconfig,
  1564. master->regs + SVC_I3C_MCONFIG);
  1565. writel(master->saved_regs.mdynaddr,
  1566. master->regs + SVC_I3C_MDYNADDR);
  1567. }
  1568. }
  1569. static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
  1570. {
  1571. struct svc_i3c_master *master = dev_get_drvdata(dev);
  1572. svc_i3c_save_regs(master);
  1573. svc_i3c_master_unprepare_clks(master);
  1574. pinctrl_pm_select_sleep_state(dev);
  1575. return 0;
  1576. }
  1577. static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
  1578. {
  1579. struct svc_i3c_master *master = dev_get_drvdata(dev);
  1580. pinctrl_pm_select_default_state(dev);
  1581. svc_i3c_master_prepare_clks(master);
  1582. svc_i3c_restore_regs(master);
  1583. return 0;
  1584. }
  1585. static const struct dev_pm_ops svc_i3c_pm_ops = {
  1586. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  1587. pm_runtime_force_resume)
  1588. SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
  1589. svc_i3c_runtime_resume, NULL)
  1590. };
  1591. static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
  1592. { .compatible = "silvaco,i3c-master-v1"},
  1593. { /* sentinel */ },
  1594. };
  1595. MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
  1596. static struct platform_driver svc_i3c_master = {
  1597. .probe = svc_i3c_master_probe,
  1598. .remove_new = svc_i3c_master_remove,
  1599. .driver = {
  1600. .name = "silvaco-i3c-master",
  1601. .of_match_table = svc_i3c_master_of_match_tbl,
  1602. .pm = &svc_i3c_pm_ops,
  1603. },
  1604. };
  1605. module_platform_driver(svc_i3c_master);
  1606. MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
  1607. MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
  1608. MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
  1609. MODULE_LICENSE("GPL v2");