dw-i3c-master.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
  4. *
  5. * Author: Vitor Soares <vitor.soares@synopsys.com>
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/clk.h>
  9. #include <linux/completion.h>
  10. #include <linux/err.h>
  11. #include <linux/errno.h>
  12. #include <linux/i3c/master.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/ioport.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/pinctrl/consumer.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/reset.h>
  23. #include <linux/slab.h>
  24. #include "dw-i3c-master.h"
  25. #define DEVICE_CTRL 0x0
  26. #define DEV_CTRL_ENABLE BIT(31)
  27. #define DEV_CTRL_RESUME BIT(30)
  28. #define DEV_CTRL_HOT_JOIN_NACK BIT(8)
  29. #define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
  30. #define DEVICE_ADDR 0x4
  31. #define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
  32. #define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
  33. #define HW_CAPABILITY 0x8
  34. #define COMMAND_QUEUE_PORT 0xc
  35. #define COMMAND_PORT_TOC BIT(30)
  36. #define COMMAND_PORT_READ_TRANSFER BIT(28)
  37. #define COMMAND_PORT_SDAP BIT(27)
  38. #define COMMAND_PORT_ROC BIT(26)
  39. #define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
  40. #define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
  41. #define COMMAND_PORT_CP BIT(15)
  42. #define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
  43. #define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
  44. #define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
  45. #define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
  46. #define COMMAND_PORT_TRANSFER_ARG 0x01
  47. #define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
  48. #define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
  49. #define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
  50. #define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
  51. #define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
  52. #define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
  53. #define COMMAND_PORT_SHORT_DATA_ARG 0x02
  54. #define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
  55. #define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
  56. #define RESPONSE_QUEUE_PORT 0x10
  57. #define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
  58. #define RESPONSE_NO_ERROR 0
  59. #define RESPONSE_ERROR_CRC 1
  60. #define RESPONSE_ERROR_PARITY 2
  61. #define RESPONSE_ERROR_FRAME 3
  62. #define RESPONSE_ERROR_IBA_NACK 4
  63. #define RESPONSE_ERROR_ADDRESS_NACK 5
  64. #define RESPONSE_ERROR_OVER_UNDER_FLOW 6
  65. #define RESPONSE_ERROR_TRANSF_ABORT 8
  66. #define RESPONSE_ERROR_I2C_W_NACK_ERR 9
  67. #define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
  68. #define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
  69. #define RX_TX_DATA_PORT 0x14
  70. #define IBI_QUEUE_STATUS 0x18
  71. #define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8)
  72. #define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0))
  73. #define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1)
  74. #define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0))
  75. #define IBI_TYPE_MR(x) \
  76. ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
  77. #define IBI_TYPE_HJ(x) \
  78. ((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
  79. #define IBI_TYPE_SIRQ(x) \
  80. ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x))
  81. #define QUEUE_THLD_CTRL 0x1c
  82. #define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24)
  83. #define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24)
  84. #define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16)
  85. #define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16)
  86. #define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
  87. #define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
  88. #define DATA_BUFFER_THLD_CTRL 0x20
  89. #define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
  90. #define IBI_QUEUE_CTRL 0x24
  91. #define IBI_MR_REQ_REJECT 0x2C
  92. #define IBI_SIR_REQ_REJECT 0x30
  93. #define IBI_REQ_REJECT_ALL GENMASK(31, 0)
  94. #define RESET_CTRL 0x34
  95. #define RESET_CTRL_IBI_QUEUE BIT(5)
  96. #define RESET_CTRL_RX_FIFO BIT(4)
  97. #define RESET_CTRL_TX_FIFO BIT(3)
  98. #define RESET_CTRL_RESP_QUEUE BIT(2)
  99. #define RESET_CTRL_CMD_QUEUE BIT(1)
  100. #define RESET_CTRL_SOFT BIT(0)
  101. #define SLV_EVENT_CTRL 0x38
  102. #define INTR_STATUS 0x3c
  103. #define INTR_STATUS_EN 0x40
  104. #define INTR_SIGNAL_EN 0x44
  105. #define INTR_FORCE 0x48
  106. #define INTR_BUSOWNER_UPDATE_STAT BIT(13)
  107. #define INTR_IBI_UPDATED_STAT BIT(12)
  108. #define INTR_READ_REQ_RECV_STAT BIT(11)
  109. #define INTR_DEFSLV_STAT BIT(10)
  110. #define INTR_TRANSFER_ERR_STAT BIT(9)
  111. #define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
  112. #define INTR_CCC_UPDATED_STAT BIT(6)
  113. #define INTR_TRANSFER_ABORT_STAT BIT(5)
  114. #define INTR_RESP_READY_STAT BIT(4)
  115. #define INTR_CMD_QUEUE_READY_STAT BIT(3)
  116. #define INTR_IBI_THLD_STAT BIT(2)
  117. #define INTR_RX_THLD_STAT BIT(1)
  118. #define INTR_TX_THLD_STAT BIT(0)
  119. #define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
  120. INTR_IBI_UPDATED_STAT | \
  121. INTR_READ_REQ_RECV_STAT | \
  122. INTR_DEFSLV_STAT | \
  123. INTR_TRANSFER_ERR_STAT | \
  124. INTR_DYN_ADDR_ASSGN_STAT | \
  125. INTR_CCC_UPDATED_STAT | \
  126. INTR_TRANSFER_ABORT_STAT | \
  127. INTR_RESP_READY_STAT | \
  128. INTR_CMD_QUEUE_READY_STAT | \
  129. INTR_IBI_THLD_STAT | \
  130. INTR_TX_THLD_STAT | \
  131. INTR_RX_THLD_STAT)
  132. #define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
  133. INTR_RESP_READY_STAT)
  134. #define QUEUE_STATUS_LEVEL 0x4c
  135. #define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
  136. #define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
  137. #define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
  138. #define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
  139. #define DATA_BUFFER_STATUS_LEVEL 0x50
  140. #define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
  141. #define PRESENT_STATE 0x54
  142. #define CCC_DEVICE_STATUS 0x58
  143. #define DEVICE_ADDR_TABLE_POINTER 0x5c
  144. #define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
  145. #define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
  146. #define DEV_CHAR_TABLE_POINTER 0x60
  147. #define VENDOR_SPECIFIC_REG_POINTER 0x6c
  148. #define SLV_PID_VALUE 0x74
  149. #define SLV_CHAR_CTRL 0x78
  150. #define SLV_MAX_LEN 0x7c
  151. #define MAX_READ_TURNAROUND 0x80
  152. #define MAX_DATA_SPEED 0x84
  153. #define SLV_DEBUG_STATUS 0x88
  154. #define SLV_INTR_REQ 0x8c
  155. #define DEVICE_CTRL_EXTENDED 0xb0
  156. #define SCL_I3C_OD_TIMING 0xb4
  157. #define SCL_I3C_PP_TIMING 0xb8
  158. #define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
  159. #define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
  160. #define SCL_I3C_TIMING_CNT_MIN 5
  161. #define SCL_I2C_FM_TIMING 0xbc
  162. #define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
  163. #define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
  164. #define SCL_I2C_FMP_TIMING 0xc0
  165. #define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
  166. #define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
  167. #define SCL_EXT_LCNT_TIMING 0xc8
  168. #define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
  169. #define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
  170. #define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
  171. #define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
  172. #define SCL_EXT_TERMN_LCNT_TIMING 0xcc
  173. #define BUS_FREE_TIMING 0xd4
  174. #define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
  175. #define BUS_IDLE_TIMING 0xd8
  176. #define I3C_VER_ID 0xe0
  177. #define I3C_VER_TYPE 0xe4
  178. #define EXTENDED_CAPABILITY 0xe8
  179. #define SLAVE_CONFIG 0xec
  180. #define DEV_ADDR_TABLE_IBI_MDB BIT(12)
  181. #define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
  182. #define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
  183. #define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
  184. #define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
  185. #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
  186. #define I3C_BUS_SDR1_SCL_RATE 8000000
  187. #define I3C_BUS_SDR2_SCL_RATE 6000000
  188. #define I3C_BUS_SDR3_SCL_RATE 4000000
  189. #define I3C_BUS_SDR4_SCL_RATE 2000000
  190. #define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
  191. #define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
  192. #define I3C_BUS_THIGH_MAX_NS 41
  193. #define XFER_TIMEOUT (msecs_to_jiffies(1000))
  194. #define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */
  195. struct dw_i3c_cmd {
  196. u32 cmd_lo;
  197. u32 cmd_hi;
  198. u16 tx_len;
  199. const void *tx_buf;
  200. u16 rx_len;
  201. void *rx_buf;
  202. u8 error;
  203. };
  204. struct dw_i3c_xfer {
  205. struct list_head node;
  206. struct completion comp;
  207. int ret;
  208. unsigned int ncmds;
  209. struct dw_i3c_cmd cmds[] __counted_by(ncmds);
  210. };
  211. struct dw_i3c_i2c_dev_data {
  212. u8 index;
  213. struct i3c_generic_ibi_pool *ibi_pool;
  214. };
  215. static u8 even_parity(u8 p)
  216. {
  217. p ^= p >> 4;
  218. p &= 0xf;
  219. return (0x9669 >> p) & 1;
  220. }
  221. static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
  222. const struct i3c_ccc_cmd *cmd)
  223. {
  224. if (cmd->ndests > 1)
  225. return false;
  226. switch (cmd->id) {
  227. case I3C_CCC_ENEC(true):
  228. case I3C_CCC_ENEC(false):
  229. case I3C_CCC_DISEC(true):
  230. case I3C_CCC_DISEC(false):
  231. case I3C_CCC_ENTAS(0, true):
  232. case I3C_CCC_ENTAS(0, false):
  233. case I3C_CCC_RSTDAA(true):
  234. case I3C_CCC_RSTDAA(false):
  235. case I3C_CCC_ENTDAA:
  236. case I3C_CCC_SETMWL(true):
  237. case I3C_CCC_SETMWL(false):
  238. case I3C_CCC_SETMRL(true):
  239. case I3C_CCC_SETMRL(false):
  240. case I3C_CCC_ENTHDR(0):
  241. case I3C_CCC_SETDASA:
  242. case I3C_CCC_SETNEWDA:
  243. case I3C_CCC_GETMWL:
  244. case I3C_CCC_GETMRL:
  245. case I3C_CCC_GETPID:
  246. case I3C_CCC_GETBCR:
  247. case I3C_CCC_GETDCR:
  248. case I3C_CCC_GETSTATUS:
  249. case I3C_CCC_GETMXDS:
  250. case I3C_CCC_GETHDRCAP:
  251. return true;
  252. default:
  253. return false;
  254. }
  255. }
  256. static inline struct dw_i3c_master *
  257. to_dw_i3c_master(struct i3c_master_controller *master)
  258. {
  259. return container_of(master, struct dw_i3c_master, base);
  260. }
  261. static void dw_i3c_master_disable(struct dw_i3c_master *master)
  262. {
  263. writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
  264. master->regs + DEVICE_CTRL);
  265. }
  266. static void dw_i3c_master_enable(struct dw_i3c_master *master)
  267. {
  268. u32 dev_ctrl;
  269. dev_ctrl = readl(master->regs + DEVICE_CTRL);
  270. /* For now don't support Hot-Join */
  271. dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK;
  272. if (master->i2c_slv_prsnt)
  273. dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT;
  274. writel(dev_ctrl | DEV_CTRL_ENABLE,
  275. master->regs + DEVICE_CTRL);
  276. }
  277. static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
  278. {
  279. int pos;
  280. for (pos = 0; pos < master->maxdevs; pos++) {
  281. if (addr == master->devs[pos].addr)
  282. return pos;
  283. }
  284. return -EINVAL;
  285. }
  286. static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
  287. {
  288. if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
  289. return -ENOSPC;
  290. return ffs(master->free_pos) - 1;
  291. }
  292. static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
  293. const u8 *bytes, int nbytes)
  294. {
  295. writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
  296. if (nbytes & 3) {
  297. u32 tmp = 0;
  298. memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
  299. writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
  300. }
  301. }
  302. static void dw_i3c_master_read_fifo(struct dw_i3c_master *master,
  303. int reg, u8 *bytes, int nbytes)
  304. {
  305. readsl(master->regs + reg, bytes, nbytes / 4);
  306. if (nbytes & 3) {
  307. u32 tmp;
  308. readsl(master->regs + reg, &tmp, 1);
  309. memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
  310. }
  311. }
  312. static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
  313. u8 *bytes, int nbytes)
  314. {
  315. return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes);
  316. }
  317. static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
  318. u8 *bytes, int nbytes)
  319. {
  320. return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes);
  321. }
  322. static struct dw_i3c_xfer *
  323. dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
  324. {
  325. struct dw_i3c_xfer *xfer;
  326. xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
  327. if (!xfer)
  328. return NULL;
  329. INIT_LIST_HEAD(&xfer->node);
  330. xfer->ncmds = ncmds;
  331. xfer->ret = -ETIMEDOUT;
  332. return xfer;
  333. }
  334. static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
  335. {
  336. kfree(xfer);
  337. }
  338. static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
  339. {
  340. struct dw_i3c_xfer *xfer = master->xferqueue.cur;
  341. unsigned int i;
  342. u32 thld_ctrl;
  343. if (!xfer)
  344. return;
  345. for (i = 0; i < xfer->ncmds; i++) {
  346. struct dw_i3c_cmd *cmd = &xfer->cmds[i];
  347. dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
  348. }
  349. thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
  350. thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
  351. thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
  352. writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
  353. for (i = 0; i < xfer->ncmds; i++) {
  354. struct dw_i3c_cmd *cmd = &xfer->cmds[i];
  355. writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
  356. writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
  357. }
  358. }
  359. static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
  360. struct dw_i3c_xfer *xfer)
  361. {
  362. unsigned long flags;
  363. init_completion(&xfer->comp);
  364. spin_lock_irqsave(&master->xferqueue.lock, flags);
  365. if (master->xferqueue.cur) {
  366. list_add_tail(&xfer->node, &master->xferqueue.list);
  367. } else {
  368. master->xferqueue.cur = xfer;
  369. dw_i3c_master_start_xfer_locked(master);
  370. }
  371. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  372. }
  373. static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
  374. struct dw_i3c_xfer *xfer)
  375. {
  376. if (master->xferqueue.cur == xfer) {
  377. u32 status;
  378. master->xferqueue.cur = NULL;
  379. writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
  380. RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
  381. master->regs + RESET_CTRL);
  382. readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
  383. !status, 10, 1000000);
  384. } else {
  385. list_del_init(&xfer->node);
  386. }
  387. }
  388. static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
  389. struct dw_i3c_xfer *xfer)
  390. {
  391. unsigned long flags;
  392. spin_lock_irqsave(&master->xferqueue.lock, flags);
  393. dw_i3c_master_dequeue_xfer_locked(master, xfer);
  394. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  395. }
  396. static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
  397. {
  398. struct dw_i3c_xfer *xfer = master->xferqueue.cur;
  399. int i, ret = 0;
  400. u32 nresp;
  401. if (!xfer)
  402. return;
  403. nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
  404. nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
  405. for (i = 0; i < nresp; i++) {
  406. struct dw_i3c_cmd *cmd;
  407. u32 resp;
  408. resp = readl(master->regs + RESPONSE_QUEUE_PORT);
  409. cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
  410. cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
  411. cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
  412. if (cmd->rx_len && !cmd->error)
  413. dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
  414. cmd->rx_len);
  415. }
  416. for (i = 0; i < nresp; i++) {
  417. switch (xfer->cmds[i].error) {
  418. case RESPONSE_NO_ERROR:
  419. break;
  420. case RESPONSE_ERROR_PARITY:
  421. case RESPONSE_ERROR_IBA_NACK:
  422. case RESPONSE_ERROR_TRANSF_ABORT:
  423. case RESPONSE_ERROR_CRC:
  424. case RESPONSE_ERROR_FRAME:
  425. ret = -EIO;
  426. break;
  427. case RESPONSE_ERROR_OVER_UNDER_FLOW:
  428. ret = -ENOSPC;
  429. break;
  430. case RESPONSE_ERROR_I2C_W_NACK_ERR:
  431. case RESPONSE_ERROR_ADDRESS_NACK:
  432. default:
  433. ret = -EINVAL;
  434. break;
  435. }
  436. }
  437. xfer->ret = ret;
  438. complete(&xfer->comp);
  439. if (ret < 0) {
  440. dw_i3c_master_dequeue_xfer_locked(master, xfer);
  441. writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
  442. master->regs + DEVICE_CTRL);
  443. }
  444. xfer = list_first_entry_or_null(&master->xferqueue.list,
  445. struct dw_i3c_xfer,
  446. node);
  447. if (xfer)
  448. list_del_init(&xfer->node);
  449. master->xferqueue.cur = xfer;
  450. dw_i3c_master_start_xfer_locked(master);
  451. }
  452. static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master)
  453. {
  454. u32 thld_ctrl;
  455. thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
  456. thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
  457. QUEUE_THLD_CTRL_IBI_STAT_MASK |
  458. QUEUE_THLD_CTRL_IBI_DATA_MASK);
  459. thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) |
  460. QUEUE_THLD_CTRL_IBI_DATA(31);
  461. writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
  462. thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
  463. thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
  464. writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
  465. writel(INTR_ALL, master->regs + INTR_STATUS);
  466. writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
  467. writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
  468. master->sir_rej_mask = IBI_REQ_REJECT_ALL;
  469. writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
  470. writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
  471. }
  472. static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
  473. {
  474. unsigned long core_rate, core_period;
  475. u32 scl_timing;
  476. u8 hcnt, lcnt;
  477. core_rate = clk_get_rate(master->core_clk);
  478. if (!core_rate)
  479. return -EINVAL;
  480. core_period = DIV_ROUND_UP(1000000000, core_rate);
  481. hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
  482. if (hcnt < SCL_I3C_TIMING_CNT_MIN)
  483. hcnt = SCL_I3C_TIMING_CNT_MIN;
  484. lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt;
  485. if (lcnt < SCL_I3C_TIMING_CNT_MIN)
  486. lcnt = SCL_I3C_TIMING_CNT_MIN;
  487. scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
  488. writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
  489. master->i3c_pp_timing = scl_timing;
  490. /*
  491. * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this
  492. * will be set up by dw_i2c_clk_cfg as tLOW.
  493. */
  494. if (master->base.bus.mode == I3C_BUS_MODE_PURE) {
  495. writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
  496. master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
  497. }
  498. lcnt = max_t(u8,
  499. DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
  500. scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
  501. writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
  502. master->i3c_od_timing = scl_timing;
  503. lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
  504. scl_timing = SCL_EXT_LCNT_1(lcnt);
  505. lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
  506. scl_timing |= SCL_EXT_LCNT_2(lcnt);
  507. lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
  508. scl_timing |= SCL_EXT_LCNT_3(lcnt);
  509. lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
  510. scl_timing |= SCL_EXT_LCNT_4(lcnt);
  511. writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
  512. master->ext_lcnt_timing = scl_timing;
  513. return 0;
  514. }
  515. static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
  516. {
  517. unsigned long core_rate, core_period;
  518. u16 hcnt, lcnt;
  519. u32 scl_timing;
  520. core_rate = clk_get_rate(master->core_clk);
  521. if (!core_rate)
  522. return -EINVAL;
  523. core_period = DIV_ROUND_UP(1000000000, core_rate);
  524. lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
  525. hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
  526. scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
  527. SCL_I2C_FMP_TIMING_LCNT(lcnt);
  528. writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
  529. master->i2c_fmp_timing = scl_timing;
  530. lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
  531. hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
  532. scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
  533. SCL_I2C_FM_TIMING_LCNT(lcnt);
  534. writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
  535. master->i2c_fm_timing = scl_timing;
  536. writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
  537. master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
  538. writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
  539. master->regs + DEVICE_CTRL);
  540. master->i2c_slv_prsnt = true;
  541. return 0;
  542. }
  543. static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
  544. {
  545. struct dw_i3c_master *master = to_dw_i3c_master(m);
  546. struct i3c_bus *bus = i3c_master_get_bus(m);
  547. struct i3c_device_info info = { };
  548. int ret;
  549. ret = pm_runtime_resume_and_get(master->dev);
  550. if (ret < 0) {
  551. dev_err(master->dev,
  552. "<%s> cannot resume i3c bus master, err: %d\n",
  553. __func__, ret);
  554. return ret;
  555. }
  556. ret = master->platform_ops->init(master);
  557. if (ret)
  558. goto rpm_out;
  559. switch (bus->mode) {
  560. case I3C_BUS_MODE_MIXED_FAST:
  561. case I3C_BUS_MODE_MIXED_LIMITED:
  562. ret = dw_i2c_clk_cfg(master);
  563. if (ret)
  564. goto rpm_out;
  565. fallthrough;
  566. case I3C_BUS_MODE_PURE:
  567. ret = dw_i3c_clk_cfg(master);
  568. if (ret)
  569. goto rpm_out;
  570. break;
  571. default:
  572. ret = -EINVAL;
  573. goto rpm_out;
  574. }
  575. ret = i3c_master_get_free_addr(m, 0);
  576. if (ret < 0)
  577. goto rpm_out;
  578. writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
  579. master->regs + DEVICE_ADDR);
  580. master->dev_addr = ret;
  581. memset(&info, 0, sizeof(info));
  582. info.dyn_addr = ret;
  583. ret = i3c_master_set_info(&master->base, &info);
  584. if (ret)
  585. goto rpm_out;
  586. dw_i3c_master_set_intr_regs(master);
  587. dw_i3c_master_enable(master);
  588. rpm_out:
  589. pm_runtime_mark_last_busy(master->dev);
  590. pm_runtime_put_autosuspend(master->dev);
  591. return ret;
  592. }
  593. static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
  594. {
  595. struct dw_i3c_master *master = to_dw_i3c_master(m);
  596. dw_i3c_master_disable(master);
  597. }
  598. static int dw_i3c_ccc_set(struct dw_i3c_master *master,
  599. struct i3c_ccc_cmd *ccc)
  600. {
  601. struct dw_i3c_xfer *xfer;
  602. struct dw_i3c_cmd *cmd;
  603. int ret, pos = 0;
  604. if (ccc->id & I3C_CCC_DIRECT) {
  605. pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
  606. if (pos < 0)
  607. return pos;
  608. }
  609. xfer = dw_i3c_master_alloc_xfer(master, 1);
  610. if (!xfer)
  611. return -ENOMEM;
  612. cmd = xfer->cmds;
  613. cmd->tx_buf = ccc->dests[0].payload.data;
  614. cmd->tx_len = ccc->dests[0].payload.len;
  615. cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
  616. COMMAND_PORT_TRANSFER_ARG;
  617. cmd->cmd_lo = COMMAND_PORT_CP |
  618. COMMAND_PORT_DEV_INDEX(pos) |
  619. COMMAND_PORT_CMD(ccc->id) |
  620. COMMAND_PORT_TOC |
  621. COMMAND_PORT_ROC;
  622. dw_i3c_master_enqueue_xfer(master, xfer);
  623. if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
  624. dw_i3c_master_dequeue_xfer(master, xfer);
  625. ret = xfer->ret;
  626. if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
  627. ccc->err = I3C_ERROR_M2;
  628. dw_i3c_master_free_xfer(xfer);
  629. return ret;
  630. }
  631. static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
  632. {
  633. struct dw_i3c_xfer *xfer;
  634. struct dw_i3c_cmd *cmd;
  635. int ret, pos;
  636. pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
  637. if (pos < 0)
  638. return pos;
  639. xfer = dw_i3c_master_alloc_xfer(master, 1);
  640. if (!xfer)
  641. return -ENOMEM;
  642. cmd = xfer->cmds;
  643. cmd->rx_buf = ccc->dests[0].payload.data;
  644. cmd->rx_len = ccc->dests[0].payload.len;
  645. cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
  646. COMMAND_PORT_TRANSFER_ARG;
  647. cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
  648. COMMAND_PORT_CP |
  649. COMMAND_PORT_DEV_INDEX(pos) |
  650. COMMAND_PORT_CMD(ccc->id) |
  651. COMMAND_PORT_TOC |
  652. COMMAND_PORT_ROC;
  653. dw_i3c_master_enqueue_xfer(master, xfer);
  654. if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
  655. dw_i3c_master_dequeue_xfer(master, xfer);
  656. ret = xfer->ret;
  657. if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
  658. ccc->err = I3C_ERROR_M2;
  659. dw_i3c_master_free_xfer(xfer);
  660. return ret;
  661. }
  662. static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
  663. struct i3c_ccc_cmd *ccc)
  664. {
  665. struct dw_i3c_master *master = to_dw_i3c_master(m);
  666. int ret = 0;
  667. if (ccc->id == I3C_CCC_ENTDAA)
  668. return -EINVAL;
  669. ret = pm_runtime_resume_and_get(master->dev);
  670. if (ret < 0) {
  671. dev_err(master->dev,
  672. "<%s> cannot resume i3c bus master, err: %d\n",
  673. __func__, ret);
  674. return ret;
  675. }
  676. if (ccc->rnw)
  677. ret = dw_i3c_ccc_get(master, ccc);
  678. else
  679. ret = dw_i3c_ccc_set(master, ccc);
  680. pm_runtime_mark_last_busy(master->dev);
  681. pm_runtime_put_autosuspend(master->dev);
  682. return ret;
  683. }
  684. static int dw_i3c_master_daa(struct i3c_master_controller *m)
  685. {
  686. struct dw_i3c_master *master = to_dw_i3c_master(m);
  687. struct dw_i3c_xfer *xfer;
  688. struct dw_i3c_cmd *cmd;
  689. u32 olddevs, newdevs;
  690. u8 p, last_addr = 0;
  691. int ret, pos;
  692. ret = pm_runtime_resume_and_get(master->dev);
  693. if (ret < 0) {
  694. dev_err(master->dev,
  695. "<%s> cannot resume i3c bus master, err: %d\n",
  696. __func__, ret);
  697. return ret;
  698. }
  699. olddevs = ~(master->free_pos);
  700. /* Prepare DAT before launching DAA. */
  701. for (pos = 0; pos < master->maxdevs; pos++) {
  702. if (olddevs & BIT(pos))
  703. continue;
  704. ret = i3c_master_get_free_addr(m, last_addr + 1);
  705. if (ret < 0) {
  706. ret = -ENOSPC;
  707. goto rpm_out;
  708. }
  709. master->devs[pos].addr = ret;
  710. p = even_parity(ret);
  711. last_addr = ret;
  712. ret |= (p << 7);
  713. writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
  714. master->regs +
  715. DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
  716. ret = 0;
  717. }
  718. xfer = dw_i3c_master_alloc_xfer(master, 1);
  719. if (!xfer) {
  720. ret = -ENOMEM;
  721. goto rpm_out;
  722. }
  723. pos = dw_i3c_master_get_free_pos(master);
  724. if (pos < 0) {
  725. dw_i3c_master_free_xfer(xfer);
  726. ret = pos;
  727. goto rpm_out;
  728. }
  729. cmd = &xfer->cmds[0];
  730. cmd->cmd_hi = 0x1;
  731. cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
  732. COMMAND_PORT_DEV_INDEX(pos) |
  733. COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
  734. COMMAND_PORT_ADDR_ASSGN_CMD |
  735. COMMAND_PORT_TOC |
  736. COMMAND_PORT_ROC;
  737. dw_i3c_master_enqueue_xfer(master, xfer);
  738. if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
  739. dw_i3c_master_dequeue_xfer(master, xfer);
  740. newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
  741. newdevs &= ~olddevs;
  742. for (pos = 0; pos < master->maxdevs; pos++) {
  743. if (newdevs & BIT(pos))
  744. i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr);
  745. }
  746. dw_i3c_master_free_xfer(xfer);
  747. rpm_out:
  748. pm_runtime_mark_last_busy(master->dev);
  749. pm_runtime_put_autosuspend(master->dev);
  750. return ret;
  751. }
  752. static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
  753. struct i3c_priv_xfer *i3c_xfers,
  754. int i3c_nxfers)
  755. {
  756. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  757. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  758. struct dw_i3c_master *master = to_dw_i3c_master(m);
  759. unsigned int nrxwords = 0, ntxwords = 0;
  760. struct dw_i3c_xfer *xfer;
  761. int i, ret = 0;
  762. if (!i3c_nxfers)
  763. return 0;
  764. if (i3c_nxfers > master->caps.cmdfifodepth)
  765. return -ENOTSUPP;
  766. for (i = 0; i < i3c_nxfers; i++) {
  767. if (i3c_xfers[i].rnw)
  768. nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
  769. else
  770. ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
  771. }
  772. if (ntxwords > master->caps.datafifodepth ||
  773. nrxwords > master->caps.datafifodepth)
  774. return -ENOTSUPP;
  775. xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
  776. if (!xfer)
  777. return -ENOMEM;
  778. ret = pm_runtime_resume_and_get(master->dev);
  779. if (ret < 0) {
  780. dev_err(master->dev,
  781. "<%s> cannot resume i3c bus master, err: %d\n",
  782. __func__, ret);
  783. return ret;
  784. }
  785. for (i = 0; i < i3c_nxfers; i++) {
  786. struct dw_i3c_cmd *cmd = &xfer->cmds[i];
  787. cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
  788. COMMAND_PORT_TRANSFER_ARG;
  789. if (i3c_xfers[i].rnw) {
  790. cmd->rx_buf = i3c_xfers[i].data.in;
  791. cmd->rx_len = i3c_xfers[i].len;
  792. cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
  793. COMMAND_PORT_SPEED(dev->info.max_read_ds);
  794. } else {
  795. cmd->tx_buf = i3c_xfers[i].data.out;
  796. cmd->tx_len = i3c_xfers[i].len;
  797. cmd->cmd_lo =
  798. COMMAND_PORT_SPEED(dev->info.max_write_ds);
  799. }
  800. cmd->cmd_lo |= COMMAND_PORT_TID(i) |
  801. COMMAND_PORT_DEV_INDEX(data->index) |
  802. COMMAND_PORT_ROC;
  803. if (i == (i3c_nxfers - 1))
  804. cmd->cmd_lo |= COMMAND_PORT_TOC;
  805. }
  806. dw_i3c_master_enqueue_xfer(master, xfer);
  807. if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
  808. dw_i3c_master_dequeue_xfer(master, xfer);
  809. for (i = 0; i < i3c_nxfers; i++) {
  810. struct dw_i3c_cmd *cmd = &xfer->cmds[i];
  811. if (i3c_xfers[i].rnw)
  812. i3c_xfers[i].len = cmd->rx_len;
  813. }
  814. ret = xfer->ret;
  815. dw_i3c_master_free_xfer(xfer);
  816. pm_runtime_mark_last_busy(master->dev);
  817. pm_runtime_put_autosuspend(master->dev);
  818. return ret;
  819. }
  820. static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
  821. u8 old_dyn_addr)
  822. {
  823. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  824. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  825. struct dw_i3c_master *master = to_dw_i3c_master(m);
  826. int pos;
  827. pos = dw_i3c_master_get_free_pos(master);
  828. if (data->index > pos && pos > 0) {
  829. writel(0,
  830. master->regs +
  831. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  832. master->devs[data->index].addr = 0;
  833. master->free_pos |= BIT(data->index);
  834. data->index = pos;
  835. master->devs[pos].addr = dev->info.dyn_addr;
  836. master->free_pos &= ~BIT(pos);
  837. }
  838. writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
  839. master->regs +
  840. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  841. master->devs[data->index].addr = dev->info.dyn_addr;
  842. return 0;
  843. }
  844. static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
  845. {
  846. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  847. struct dw_i3c_master *master = to_dw_i3c_master(m);
  848. struct dw_i3c_i2c_dev_data *data;
  849. int pos;
  850. pos = dw_i3c_master_get_free_pos(master);
  851. if (pos < 0)
  852. return pos;
  853. data = kzalloc(sizeof(*data), GFP_KERNEL);
  854. if (!data)
  855. return -ENOMEM;
  856. data->index = pos;
  857. master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr;
  858. master->free_pos &= ~BIT(pos);
  859. i3c_dev_set_master_data(dev, data);
  860. writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
  861. master->regs +
  862. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  863. return 0;
  864. }
  865. static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
  866. {
  867. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  868. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  869. struct dw_i3c_master *master = to_dw_i3c_master(m);
  870. writel(0,
  871. master->regs +
  872. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  873. i3c_dev_set_master_data(dev, NULL);
  874. master->devs[data->index].addr = 0;
  875. master->free_pos |= BIT(data->index);
  876. kfree(data);
  877. }
  878. static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
  879. const struct i2c_msg *i2c_xfers,
  880. int i2c_nxfers)
  881. {
  882. struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  883. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  884. struct dw_i3c_master *master = to_dw_i3c_master(m);
  885. unsigned int nrxwords = 0, ntxwords = 0;
  886. struct dw_i3c_xfer *xfer;
  887. int i, ret = 0;
  888. if (!i2c_nxfers)
  889. return 0;
  890. if (i2c_nxfers > master->caps.cmdfifodepth)
  891. return -ENOTSUPP;
  892. for (i = 0; i < i2c_nxfers; i++) {
  893. if (i2c_xfers[i].flags & I2C_M_RD)
  894. nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
  895. else
  896. ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
  897. }
  898. if (ntxwords > master->caps.datafifodepth ||
  899. nrxwords > master->caps.datafifodepth)
  900. return -ENOTSUPP;
  901. xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
  902. if (!xfer)
  903. return -ENOMEM;
  904. ret = pm_runtime_resume_and_get(master->dev);
  905. if (ret < 0) {
  906. dev_err(master->dev,
  907. "<%s> cannot resume i3c bus master, err: %d\n",
  908. __func__, ret);
  909. return ret;
  910. }
  911. for (i = 0; i < i2c_nxfers; i++) {
  912. struct dw_i3c_cmd *cmd = &xfer->cmds[i];
  913. cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
  914. COMMAND_PORT_TRANSFER_ARG;
  915. cmd->cmd_lo = COMMAND_PORT_TID(i) |
  916. COMMAND_PORT_DEV_INDEX(data->index) |
  917. COMMAND_PORT_ROC;
  918. if (i2c_xfers[i].flags & I2C_M_RD) {
  919. cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
  920. cmd->rx_buf = i2c_xfers[i].buf;
  921. cmd->rx_len = i2c_xfers[i].len;
  922. } else {
  923. cmd->tx_buf = i2c_xfers[i].buf;
  924. cmd->tx_len = i2c_xfers[i].len;
  925. }
  926. if (i == (i2c_nxfers - 1))
  927. cmd->cmd_lo |= COMMAND_PORT_TOC;
  928. }
  929. dw_i3c_master_enqueue_xfer(master, xfer);
  930. if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
  931. dw_i3c_master_dequeue_xfer(master, xfer);
  932. ret = xfer->ret;
  933. dw_i3c_master_free_xfer(xfer);
  934. pm_runtime_mark_last_busy(master->dev);
  935. pm_runtime_put_autosuspend(master->dev);
  936. return ret;
  937. }
  938. static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
  939. {
  940. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  941. struct dw_i3c_master *master = to_dw_i3c_master(m);
  942. struct dw_i3c_i2c_dev_data *data;
  943. int pos;
  944. pos = dw_i3c_master_get_free_pos(master);
  945. if (pos < 0)
  946. return pos;
  947. data = kzalloc(sizeof(*data), GFP_KERNEL);
  948. if (!data)
  949. return -ENOMEM;
  950. data->index = pos;
  951. master->devs[pos].addr = dev->addr;
  952. master->devs[pos].is_i2c_addr = true;
  953. master->free_pos &= ~BIT(pos);
  954. i2c_dev_set_master_data(dev, data);
  955. writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
  956. DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
  957. master->regs +
  958. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  959. return 0;
  960. }
  961. static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
  962. {
  963. struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  964. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  965. struct dw_i3c_master *master = to_dw_i3c_master(m);
  966. writel(0,
  967. master->regs +
  968. DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
  969. i2c_dev_set_master_data(dev, NULL);
  970. master->devs[data->index].addr = 0;
  971. master->free_pos |= BIT(data->index);
  972. kfree(data);
  973. }
  974. static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
  975. const struct i3c_ibi_setup *req)
  976. {
  977. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  978. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  979. struct dw_i3c_master *master = to_dw_i3c_master(m);
  980. unsigned long flags;
  981. data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
  982. if (IS_ERR(data->ibi_pool))
  983. return PTR_ERR(data->ibi_pool);
  984. spin_lock_irqsave(&master->devs_lock, flags);
  985. master->devs[data->index].ibi_dev = dev;
  986. spin_unlock_irqrestore(&master->devs_lock, flags);
  987. return 0;
  988. }
  989. static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev)
  990. {
  991. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  992. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  993. struct dw_i3c_master *master = to_dw_i3c_master(m);
  994. unsigned long flags;
  995. spin_lock_irqsave(&master->devs_lock, flags);
  996. master->devs[data->index].ibi_dev = NULL;
  997. spin_unlock_irqrestore(&master->devs_lock, flags);
  998. i3c_generic_ibi_free_pool(data->ibi_pool);
  999. data->ibi_pool = NULL;
  1000. }
  1001. static void dw_i3c_master_enable_sir_signal(struct dw_i3c_master *master, bool enable)
  1002. {
  1003. u32 reg;
  1004. reg = readl(master->regs + INTR_STATUS_EN);
  1005. reg &= ~INTR_IBI_THLD_STAT;
  1006. if (enable)
  1007. reg |= INTR_IBI_THLD_STAT;
  1008. writel(reg, master->regs + INTR_STATUS_EN);
  1009. reg = readl(master->regs + INTR_SIGNAL_EN);
  1010. reg &= ~INTR_IBI_THLD_STAT;
  1011. if (enable)
  1012. reg |= INTR_IBI_THLD_STAT;
  1013. writel(reg, master->regs + INTR_SIGNAL_EN);
  1014. }
  1015. static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
  1016. struct i3c_dev_desc *dev,
  1017. u8 idx, bool enable)
  1018. {
  1019. unsigned long flags;
  1020. u32 dat_entry, reg;
  1021. bool global;
  1022. dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx);
  1023. spin_lock_irqsave(&master->devs_lock, flags);
  1024. reg = readl(master->regs + dat_entry);
  1025. if (enable) {
  1026. reg &= ~DEV_ADDR_TABLE_SIR_REJECT;
  1027. if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
  1028. reg |= DEV_ADDR_TABLE_IBI_MDB;
  1029. } else {
  1030. reg |= DEV_ADDR_TABLE_SIR_REJECT;
  1031. }
  1032. master->platform_ops->set_dat_ibi(master, dev, enable, &reg);
  1033. writel(reg, master->regs + dat_entry);
  1034. if (enable) {
  1035. global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL);
  1036. master->sir_rej_mask &= ~BIT(idx);
  1037. } else {
  1038. bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
  1039. master->sir_rej_mask |= BIT(idx);
  1040. global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected;
  1041. }
  1042. writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
  1043. if (global)
  1044. dw_i3c_master_enable_sir_signal(master, enable);
  1045. spin_unlock_irqrestore(&master->devs_lock, flags);
  1046. }
  1047. static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
  1048. {
  1049. struct dw_i3c_master *master = to_dw_i3c_master(m);
  1050. int ret;
  1051. ret = pm_runtime_resume_and_get(master->dev);
  1052. if (ret < 0) {
  1053. dev_err(master->dev,
  1054. "<%s> cannot resume i3c bus master, err: %d\n",
  1055. __func__, ret);
  1056. return ret;
  1057. }
  1058. dw_i3c_master_enable_sir_signal(master, true);
  1059. writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK,
  1060. master->regs + DEVICE_CTRL);
  1061. return 0;
  1062. }
  1063. static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
  1064. {
  1065. struct dw_i3c_master *master = to_dw_i3c_master(m);
  1066. writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
  1067. master->regs + DEVICE_CTRL);
  1068. pm_runtime_mark_last_busy(master->dev);
  1069. pm_runtime_put_autosuspend(master->dev);
  1070. return 0;
  1071. }
  1072. static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
  1073. {
  1074. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1075. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1076. struct dw_i3c_master *master = to_dw_i3c_master(m);
  1077. int rc;
  1078. rc = pm_runtime_resume_and_get(master->dev);
  1079. if (rc < 0) {
  1080. dev_err(master->dev,
  1081. "<%s> cannot resume i3c bus master, err: %d\n",
  1082. __func__, rc);
  1083. return rc;
  1084. }
  1085. dw_i3c_master_set_sir_enabled(master, dev, data->index, true);
  1086. rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
  1087. if (rc) {
  1088. dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
  1089. pm_runtime_mark_last_busy(master->dev);
  1090. pm_runtime_put_autosuspend(master->dev);
  1091. }
  1092. return rc;
  1093. }
  1094. static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
  1095. {
  1096. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1097. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1098. struct dw_i3c_master *master = to_dw_i3c_master(m);
  1099. int rc;
  1100. rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
  1101. if (rc)
  1102. return rc;
  1103. dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
  1104. pm_runtime_mark_last_busy(master->dev);
  1105. pm_runtime_put_autosuspend(master->dev);
  1106. return 0;
  1107. }
  1108. static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
  1109. struct i3c_ibi_slot *slot)
  1110. {
  1111. struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1112. i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
  1113. }
  1114. static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master,
  1115. int len)
  1116. {
  1117. int i;
  1118. for (i = 0; i < DIV_ROUND_UP(len, 4); i++)
  1119. readl(master->regs + IBI_QUEUE_STATUS);
  1120. }
  1121. static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
  1122. u32 status)
  1123. {
  1124. struct dw_i3c_i2c_dev_data *data;
  1125. struct i3c_ibi_slot *slot;
  1126. struct i3c_dev_desc *dev;
  1127. unsigned long flags;
  1128. u8 addr, len;
  1129. int idx;
  1130. addr = IBI_QUEUE_IBI_ADDR(status);
  1131. len = IBI_QUEUE_STATUS_DATA_LEN(status);
  1132. /*
  1133. * We be tempted to check the error status in bit 30; however, due
  1134. * to the PEC errata workaround on some platform implementations (see
  1135. * ast2600_i3c_set_dat_ibi()), those will almost always have a PEC
  1136. * error on IBI payload data, as well as losing the last byte of
  1137. * payload.
  1138. *
  1139. * If we implement error status checking on that bit, we may need
  1140. * a new platform op to validate it.
  1141. */
  1142. spin_lock_irqsave(&master->devs_lock, flags);
  1143. idx = dw_i3c_master_get_addr_pos(master, addr);
  1144. if (idx < 0) {
  1145. dev_dbg_ratelimited(&master->base.dev,
  1146. "IBI from unknown addr 0x%x\n", addr);
  1147. goto err_drain;
  1148. }
  1149. dev = master->devs[idx].ibi_dev;
  1150. if (!dev || !dev->ibi) {
  1151. dev_dbg_ratelimited(&master->base.dev,
  1152. "IBI from non-requested dev idx %d\n", idx);
  1153. goto err_drain;
  1154. }
  1155. data = i3c_dev_get_master_data(dev);
  1156. slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
  1157. if (!slot) {
  1158. dev_dbg_ratelimited(&master->base.dev,
  1159. "No IBI slots available\n");
  1160. goto err_drain;
  1161. }
  1162. if (dev->ibi->max_payload_len < len) {
  1163. dev_dbg_ratelimited(&master->base.dev,
  1164. "IBI payload len %d greater than max %d\n",
  1165. len, dev->ibi->max_payload_len);
  1166. goto err_drain;
  1167. }
  1168. if (len) {
  1169. dw_i3c_master_read_ibi_fifo(master, slot->data, len);
  1170. slot->len = len;
  1171. }
  1172. i3c_master_queue_ibi(dev, slot);
  1173. spin_unlock_irqrestore(&master->devs_lock, flags);
  1174. return;
  1175. err_drain:
  1176. dw_i3c_master_drain_ibi_queue(master, len);
  1177. spin_unlock_irqrestore(&master->devs_lock, flags);
  1178. }
  1179. /* "ibis": referring to In-Band Interrupts, and not
  1180. * https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should
  1181. * not be handled.
  1182. */
  1183. static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master)
  1184. {
  1185. unsigned int i, len, n_ibis;
  1186. u32 reg;
  1187. reg = readl(master->regs + QUEUE_STATUS_LEVEL);
  1188. n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg);
  1189. if (!n_ibis)
  1190. return;
  1191. for (i = 0; i < n_ibis; i++) {
  1192. reg = readl(master->regs + IBI_QUEUE_STATUS);
  1193. if (IBI_TYPE_SIRQ(reg)) {
  1194. dw_i3c_master_handle_ibi_sir(master, reg);
  1195. } else if (IBI_TYPE_HJ(reg)) {
  1196. queue_work(master->base.wq, &master->hj_work);
  1197. } else {
  1198. len = IBI_QUEUE_STATUS_DATA_LEN(reg);
  1199. dev_info(&master->base.dev,
  1200. "unsupported IBI type 0x%lx len %d\n",
  1201. IBI_QUEUE_STATUS_IBI_ID(reg), len);
  1202. dw_i3c_master_drain_ibi_queue(master, len);
  1203. }
  1204. }
  1205. }
  1206. static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
  1207. {
  1208. struct dw_i3c_master *master = dev_id;
  1209. u32 status;
  1210. status = readl(master->regs + INTR_STATUS);
  1211. if (!(status & readl(master->regs + INTR_STATUS_EN))) {
  1212. writel(INTR_ALL, master->regs + INTR_STATUS);
  1213. return IRQ_NONE;
  1214. }
  1215. spin_lock(&master->xferqueue.lock);
  1216. dw_i3c_master_end_xfer_locked(master, status);
  1217. if (status & INTR_TRANSFER_ERR_STAT)
  1218. writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
  1219. spin_unlock(&master->xferqueue.lock);
  1220. if (status & INTR_IBI_THLD_STAT)
  1221. dw_i3c_master_irq_handle_ibis(master);
  1222. return IRQ_HANDLED;
  1223. }
  1224. static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
  1225. .bus_init = dw_i3c_master_bus_init,
  1226. .bus_cleanup = dw_i3c_master_bus_cleanup,
  1227. .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
  1228. .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
  1229. .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
  1230. .do_daa = dw_i3c_master_daa,
  1231. .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
  1232. .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
  1233. .priv_xfers = dw_i3c_master_priv_xfers,
  1234. .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
  1235. .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
  1236. .i2c_xfers = dw_i3c_master_i2c_xfers,
  1237. .request_ibi = dw_i3c_master_request_ibi,
  1238. .free_ibi = dw_i3c_master_free_ibi,
  1239. .enable_ibi = dw_i3c_master_enable_ibi,
  1240. .disable_ibi = dw_i3c_master_disable_ibi,
  1241. .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot,
  1242. .enable_hotjoin = dw_i3c_master_enable_hotjoin,
  1243. .disable_hotjoin = dw_i3c_master_disable_hotjoin,
  1244. };
  1245. /* default platform ops implementations */
  1246. static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c)
  1247. {
  1248. return 0;
  1249. }
  1250. static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c,
  1251. struct i3c_dev_desc *dev,
  1252. bool enable, u32 *dat)
  1253. {
  1254. }
  1255. static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = {
  1256. .init = dw_i3c_platform_init_nop,
  1257. .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop,
  1258. };
  1259. static void dw_i3c_hj_work(struct work_struct *work)
  1260. {
  1261. struct dw_i3c_master *master =
  1262. container_of(work, typeof(*master), hj_work);
  1263. i3c_master_do_daa(&master->base);
  1264. }
  1265. int dw_i3c_common_probe(struct dw_i3c_master *master,
  1266. struct platform_device *pdev)
  1267. {
  1268. int ret, irq;
  1269. if (!master->platform_ops)
  1270. master->platform_ops = &dw_i3c_platform_ops_default;
  1271. master->dev = &pdev->dev;
  1272. master->regs = devm_platform_ioremap_resource(pdev, 0);
  1273. if (IS_ERR(master->regs))
  1274. return PTR_ERR(master->regs);
  1275. master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL);
  1276. if (IS_ERR(master->core_clk))
  1277. return PTR_ERR(master->core_clk);
  1278. master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
  1279. if (IS_ERR(master->pclk))
  1280. return PTR_ERR(master->pclk);
  1281. master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
  1282. "core_rst");
  1283. if (IS_ERR(master->core_rst))
  1284. return PTR_ERR(master->core_rst);
  1285. reset_control_deassert(master->core_rst);
  1286. spin_lock_init(&master->xferqueue.lock);
  1287. INIT_LIST_HEAD(&master->xferqueue.list);
  1288. writel(INTR_ALL, master->regs + INTR_STATUS);
  1289. irq = platform_get_irq(pdev, 0);
  1290. ret = devm_request_irq(&pdev->dev, irq,
  1291. dw_i3c_master_irq_handler, 0,
  1292. dev_name(&pdev->dev), master);
  1293. if (ret)
  1294. goto err_assert_rst;
  1295. platform_set_drvdata(pdev, master);
  1296. pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT);
  1297. pm_runtime_use_autosuspend(&pdev->dev);
  1298. pm_runtime_set_active(&pdev->dev);
  1299. pm_runtime_enable(&pdev->dev);
  1300. /* Information regarding the FIFOs/QUEUEs depth */
  1301. ret = readl(master->regs + QUEUE_STATUS_LEVEL);
  1302. master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
  1303. ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
  1304. master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
  1305. ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
  1306. master->datstartaddr = ret;
  1307. master->maxdevs = ret >> 16;
  1308. master->free_pos = GENMASK(master->maxdevs - 1, 0);
  1309. INIT_WORK(&master->hj_work, dw_i3c_hj_work);
  1310. ret = i3c_master_register(&master->base, &pdev->dev,
  1311. &dw_mipi_i3c_ops, false);
  1312. if (ret)
  1313. goto err_disable_pm;
  1314. return 0;
  1315. err_disable_pm:
  1316. pm_runtime_disable(&pdev->dev);
  1317. pm_runtime_set_suspended(&pdev->dev);
  1318. pm_runtime_dont_use_autosuspend(&pdev->dev);
  1319. err_assert_rst:
  1320. reset_control_assert(master->core_rst);
  1321. return ret;
  1322. }
  1323. EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
  1324. void dw_i3c_common_remove(struct dw_i3c_master *master)
  1325. {
  1326. cancel_work_sync(&master->hj_work);
  1327. i3c_master_unregister(&master->base);
  1328. pm_runtime_disable(master->dev);
  1329. pm_runtime_set_suspended(master->dev);
  1330. pm_runtime_dont_use_autosuspend(master->dev);
  1331. }
  1332. EXPORT_SYMBOL_GPL(dw_i3c_common_remove);
  1333. /* base platform implementation */
  1334. static int dw_i3c_probe(struct platform_device *pdev)
  1335. {
  1336. struct dw_i3c_master *master;
  1337. master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
  1338. if (!master)
  1339. return -ENOMEM;
  1340. return dw_i3c_common_probe(master, pdev);
  1341. }
  1342. static void dw_i3c_remove(struct platform_device *pdev)
  1343. {
  1344. struct dw_i3c_master *master = platform_get_drvdata(pdev);
  1345. dw_i3c_common_remove(master);
  1346. }
  1347. static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master)
  1348. {
  1349. u32 pos, reg_val;
  1350. writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr),
  1351. master->regs + DEVICE_ADDR);
  1352. for (pos = 0; pos < master->maxdevs; pos++) {
  1353. if (master->free_pos & BIT(pos))
  1354. continue;
  1355. if (master->devs[pos].is_i2c_addr)
  1356. reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV |
  1357. DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr);
  1358. else
  1359. reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr);
  1360. writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
  1361. }
  1362. }
  1363. static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master)
  1364. {
  1365. writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING);
  1366. writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING);
  1367. writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING);
  1368. writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING);
  1369. if (master->i2c_slv_prsnt) {
  1370. writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING);
  1371. writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING);
  1372. }
  1373. }
  1374. static int dw_i3c_master_enable_clks(struct dw_i3c_master *master)
  1375. {
  1376. int ret = 0;
  1377. ret = clk_prepare_enable(master->core_clk);
  1378. if (ret)
  1379. return ret;
  1380. ret = clk_prepare_enable(master->pclk);
  1381. if (ret) {
  1382. clk_disable_unprepare(master->core_clk);
  1383. return ret;
  1384. }
  1385. return 0;
  1386. }
  1387. static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master)
  1388. {
  1389. clk_disable_unprepare(master->pclk);
  1390. clk_disable_unprepare(master->core_clk);
  1391. }
  1392. static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev)
  1393. {
  1394. struct dw_i3c_master *master = dev_get_drvdata(dev);
  1395. dw_i3c_master_disable(master);
  1396. reset_control_assert(master->core_rst);
  1397. dw_i3c_master_disable_clks(master);
  1398. pinctrl_pm_select_sleep_state(dev);
  1399. return 0;
  1400. }
  1401. static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev)
  1402. {
  1403. struct dw_i3c_master *master = dev_get_drvdata(dev);
  1404. pinctrl_pm_select_default_state(dev);
  1405. dw_i3c_master_enable_clks(master);
  1406. reset_control_deassert(master->core_rst);
  1407. dw_i3c_master_set_intr_regs(master);
  1408. dw_i3c_master_restore_timing_regs(master);
  1409. dw_i3c_master_restore_addrs(master);
  1410. dw_i3c_master_enable(master);
  1411. return 0;
  1412. }
  1413. static const struct dev_pm_ops dw_i3c_pm_ops = {
  1414. SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
  1415. SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
  1416. };
  1417. static const struct of_device_id dw_i3c_master_of_match[] = {
  1418. { .compatible = "snps,dw-i3c-master-1.00a", },
  1419. {},
  1420. };
  1421. MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
  1422. static struct platform_driver dw_i3c_driver = {
  1423. .probe = dw_i3c_probe,
  1424. .remove_new = dw_i3c_remove,
  1425. .driver = {
  1426. .name = "dw-i3c-master",
  1427. .of_match_table = dw_i3c_master_of_match,
  1428. .pm = &dw_i3c_pm_ops,
  1429. },
  1430. };
  1431. module_platform_driver(dw_i3c_driver);
  1432. MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
  1433. MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
  1434. MODULE_LICENSE("GPL v2");